hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4d42441b96321040a54f16e2b431bb5d1c4b18d
| 3,334
|
py
|
Python
|
include/server/bw/tools/__init__.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | 2
|
2019-10-30T04:26:21.000Z
|
2019-10-31T17:26:59.000Z
|
include/server/bw/tools/__init__.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | 22
|
2019-08-21T17:13:45.000Z
|
2020-08-06T00:38:56.000Z
|
include/server/bw/tools/__init__.py
|
spacebeam/bw
|
8f975a2925f309b0038c876f1234595df9798c98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of bw.
# Distributed under the terms of the last AGPL License.
__author__ = 'Jean Chassoul'
import uuid
def validate_uuid4(uuid_string):
'''
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
'''
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return str(val) == uuid_string
def get_average(total, marks):
'''
Get average from signals
'''
return float(total) / len(marks)
def get_percentage(part, whole):
'''
Get percentage of part and whole.
'''
return "{0:.0f}%".format(float(part)/whole * 100)
def clean_message(struct):
'''
clean message
'''
struct = struct.to_native()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_structure(struct):
'''
clean structure
'''
struct = struct.to_primitive()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_results(results):
'''
clean results
'''
results = results.to_primitive()
results = results.get('results')
results = [
{
key: dic[key] for key in dic if dic[key] is not None
} for dic in results
]
return {'results': results}
def str2bool(boo):
'''
String to boolean
'''
return boo.lower() in ('yes', 'true', 't', '1')
| 21.934211
| 69
| 0.588482
|
# -*- coding: utf-8 -*-
# This file is part of bw.
# Distributed under the terms of the last AGPL License.
__author__ = 'Jean Chassoul'
import arrow
import ujson as json
import logging
import uuid
from tornado import gen
def validate_uuid4(uuid_string):
'''
Validate that a UUID string is in
fact a valid uuid4.
Happily, the uuid module does the actual
checking for us.
'''
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return str(val) == uuid_string
def get_average(total, marks):
'''
Get average from signals
'''
return float(total) / len(marks)
def get_percentage(part, whole):
'''
Get percentage of part and whole.
'''
return "{0:.0f}%".format(float(part)/whole * 100)
@gen.coroutine
def check_json(struct):
'''
Check for malformed JSON
'''
try:
message = json.loads(struct)
except Exception as error:
message = json.dumps({'error': 400})
raise error
return message
@gen.coroutine
def check_times(start, end):
'''
Check times
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_timestamp(start, end):
'''
Check times get timestamp
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.timestamp, 'end': end.timestamp}
except Exception as error:
logging.exception(error)
raise error
return message
@gen.coroutine
def check_times_get_datetime(start, end):
'''
Check times get datetime
'''
try:
start = (arrow.get(start) if start
else arrow.get(arrow.utcnow().date()))
end = (arrow.get(end) if end else start.replace(days=+1))
message = {'start': start.naive, 'end': end.naive}
except Exception as error:
logging.exception(error)
raise error
return message
def clean_message(struct):
'''
clean message
'''
struct = struct.to_native()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_structure(struct):
'''
clean structure
'''
struct = struct.to_primitive()
struct = {
key: struct[key] for key in struct if struct[key] is not None
}
return struct
def clean_results(results):
'''
clean results
'''
results = results.to_primitive()
results = results.get('results')
results = [
{
key: dic[key] for key in dic if dic[key] is not None
} for dic in results
]
return {'results': results}
def str2bool(boo):
'''
String to boolean
'''
return boo.lower() in ('yes', 'true', 't', '1')
| 0
| 1,479
| 0
| 0
| 0
| 0
| 0
| -15
| 181
|
356caf4ee17cfa856f9452e5b81fc78885d6f859
| 122
|
py
|
Python
|
utils/poweroff_restart.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
utils/poweroff_restart.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | null | null | null |
utils/poweroff_restart.py
|
ndkjing/usv
|
132e021432a0344a22914aaf68da7d7955d7331f
|
[
"MIT"
] | 1
|
2021-09-04T10:27:30.000Z
|
2021-09-04T10:27:30.000Z
|
#
| 11.090909
| 31
| 0.655738
|
from os import system
# 重启电脑
def restart():
system('sudo reboot')
def poweroff():
system('sudo shutdown now')
| 12
| 0
| 0
| 0
| 0
| 45
| 0
| 0
| 67
|
33b53b231a99d94ff5cc752b86bbc0159b2e1fb3
| 17,472
|
py
|
Python
|
pyctrl/flask/server.py
|
ComplexArts/pyctrl-core
|
a72bd53924410c2e7f1e71c8188a0391550febdd
|
[
"Apache-2.0"
] | 12
|
2017-06-20T13:20:40.000Z
|
2021-01-18T00:12:10.000Z
|
pyctrl/flask/server.py
|
mcdeoliveira/beaglebone
|
6c6062c6d1e9902178500abcd10be6ac0bcf043d
|
[
"Apache-2.0"
] | 2
|
2017-06-12T15:17:24.000Z
|
2018-01-30T18:22:19.000Z
|
pyctrl/flask/server.py
|
mcdeoliveira/beaglebone
|
6c6062c6d1e9902178500abcd10be6ac0bcf043d
|
[
"Apache-2.0"
] | 4
|
2017-09-25T12:19:19.000Z
|
2019-01-31T21:46:24.000Z
|
import sys
from pyctrl.flask import JSONEncoder, JSONDecoder
encoder = JSONEncoder(sort_keys = True, indent = 4)
decoder = JSONDecoder()
# decorators
# decode
# decode_kwargs_aux
# decode_kwargs
# json_response
# Server class
if __name__ == "__main__":
try:
import os
os.environ['RCPY_NO_HANDLERS'] = 't'
from pyctrl.rc import Controller
debug = False
RCPY = True
except:
from pyctrl.timer import Controller
debug = True
RCPY = False
try:
app = Server(__name__)
app.config['SECRET_KEY'] = 'secret!'
# initialize controller
app.set_controller(controller = Controller(period = .01))
# run app
app.run(host='0.0.0.0',
debug = debug)
except:
pass
finally:
sys.exit(0)
| 33.926214
| 129
| 0.54802
|
from flask import Flask, request, render_template, jsonify, make_response, redirect, flash, url_for
from functools import wraps
import re
import pyctrl
from pyctrl.block import Logger
import warnings
import importlib
import traceback, sys, io
from pyctrl.flask import JSONEncoder, JSONDecoder
encoder = JSONEncoder(sort_keys = True, indent = 4)
decoder = JSONDecoder()
# decorators
# decode
def decode_value(f):
@wraps(f)
def wrapper(label, value, *args, **kwargs):
return f(label, decoder.decode(value), *args, **kwargs)
return wrapper
# decode_kwargs_aux
def decode_kwargs_aux(e):
if len(e) == 1:
return decoder.decode(e[0])
elif len(e) > 1:
return [decoder.decode(v) for v in e]
else:
return None
# decode_kwargs
def decode_kwargs(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
kwargs.update({k: decode_kwargs_aux(request.args.getlist(k))
for k in request.args.keys()})
except:
raise Exception("Arguments '{}' are not json compatible".format(request.args))
#print('>>> kwargs = {}'.format(kwargs))
return f(*args, **kwargs)
return wrapper
# json_response
def json_response(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
retval = f(*args, **kwargs)
if retval is None:
retval = { 'status': 'success' }
except Exception as e:
message = io.StringIO()
traceback.print_exc(file=message)
retval = { 'status': 'error',
'message': message.getvalue() }
next = request.args.get('next', None)
if next:
if 'status' in retval and retval['status'] == 'error':
flash(retval['message'])
return redirect(url_for(next))
else:
return jsonify(retval)
return wrapper
# Server class
class Server(Flask):
def __init__(self, *args, **kwargs):
self.controller = None
self.base_url = ''
# call super
super().__init__(*args, **kwargs)
# change json_encoder
self.json_encoder = JSONEncoder
# set api entry points
# index, info and scope
self.add_url_rule(self.base_url + '/',
view_func = self.index)
self.add_url_rule(self.base_url + '/info',
view_func = self.info)
self.add_url_rule(self.base_url + '/scope/<path:label>',
view_func = self.scope)
# download controller
self.add_url_rule(self.base_url + '/download',
view_func = self.download)
# upload controller
self.add_url_rule(self.base_url + '/upload',
methods=['GET', 'POST'],
view_func = self.upload)
# reset
self.add_url_rule(self.base_url + '/reset',
view_func = self.reset)
# set controller
self.add_url_rule(self.base_url + '/set/controller/<module>/<pyctrl_class>',
view_func = self.reset_controller)
# start and stop
self.add_url_rule(self.base_url + '/start',
view_func = self.start)
self.add_url_rule(self.base_url + '/stop',
view_func = self.stop)
# signals
self.add_url_rule(self.base_url + '/add/signal/<path:label>',
view_func = self.add_signal)
self.add_url_rule(self.base_url + '/remove/signal/<path:label>',
view_func = self.remove_signal)
self.add_url_rule(self.base_url + '/get/signal/<path:label>',
view_func = self.get_signal)
self.add_url_rule(self.base_url + '/set/signal/<path:label>/<value>',
view_func = self.set_signal)
self.add_url_rule(self.base_url + '/list/signals',
view_func = self.list_signals)
# sources
self.add_url_rule(self.base_url + '/add/source/<path:label>/<module_name>/<class_name>',
view_func = self.add_source)
self.add_url_rule(self.base_url + '/remove/source/<path:label>',
view_func = self.remove_source)
self.add_url_rule(self.base_url + '/get/source/<path:label>',
view_func = self.get_source)
self.add_url_rule(self.base_url + '/set/source/<path:label>',
view_func = self.set_source)
self.add_url_rule(self.base_url + '/html/source/<path:label>',
view_func = self.html_source)
# filters
self.add_url_rule(self.base_url + '/add/filter/<path:label>/<module_name>/<class_name>',
view_func = self.add_filter)
self.add_url_rule(self.base_url + '/remove/filter/<path:label>',
view_func = self.remove_filter)
self.add_url_rule(self.base_url + '/get/filter/<path:label>',
view_func = self.get_filter)
self.add_url_rule(self.base_url + '/set/filter/<path:label>',
view_func = self.set_filter)
self.add_url_rule(self.base_url + '/html/filter/<path:label>',
view_func = self.html_filter)
# sinks
self.add_url_rule(self.base_url + '/add/sink/<path:label>/<module_name>/<class_name>',
view_func = self.add_sink)
self.add_url_rule(self.base_url + '/remove/sink/<path:label>',
view_func = self.remove_sink)
self.add_url_rule(self.base_url + '/get/sink/<path:label>',
view_func = self.get_sink)
self.add_url_rule(self.base_url + '/set/sink/<path:label>',
view_func = self.set_sink)
self.add_url_rule(self.base_url + '/html/sink/<path:label>',
view_func = self.html_sink)
# timers
self.add_url_rule(self.base_url + '/add/timer/<path:label>/<module_name>/<class_name>',
view_func = self.add_timer)
self.add_url_rule(self.base_url + '/remove/timer/<path:label>',
view_func = self.remove_timer)
self.add_url_rule(self.base_url + '/get/timer/<path:label>',
view_func = self.get_timer)
self.add_url_rule(self.base_url + '/set/timer/<path:label>',
view_func = self.set_timer)
self.add_url_rule(self.base_url + '/html/timer/<path:label>',
view_func = self.html_timer)
def set_controller(self, **kwargs):
# Create new controller?
if 'module' in kwargs or 'pyctrl_class' in kwargs:
module = kwargs.pop('module', 'pyctrl')
pyctrl_class = kwargs.pop('pyctrl_class', 'Controller')
ckwargs = kwargs.pop('kwargs', {})
if len(kwargs) > 0:
raise Exception("webserver.reset():: Unknown parameter(s) '{}'".format(', '.join(str(k) for k in kwargs.keys())))
try:
if True:
warnings.warn("> Installing new instance of '{}.{}({})' as controller".format(module, pyctrl_class, ckwargs))
obj_class = getattr(importlib.import_module(module),
pyctrl_class)
controller = obj_class(**ckwargs)
# print('obj_class = {}'.format(obj_class))
# print('_controller = {}'.format(_controller))
# Make sure it is an instance of pyctrl.Controller
if not isinstance(controller, pyctrl.Controller):
raise Exception("Object '{}.{}' is not and instance of pyctrl.Controller".format(module, pyctrl_class))
self.controller = controller
except Exception as e:
raise Exception("Error resetting controller: {}".format(e))
elif 'controller' in kwargs:
controller = kwargs.pop('controller')
# Make sure it is an instance of pyctrl.Controller
if not isinstance(controller, pyctrl.Controller):
raise Exception("Object '{}.{}' is not and instance of pyctrl.Controller".format(module, pyctrl_class))
self.controller = controller
# auxiliary
def get_keys(self, method, type_name,
label, **kwargs):
# get keys
keys = kwargs.get('keys', '')
if keys and not isinstance(keys, (list,tuple)):
keys = [keys]
print('keys = {}'.format(keys))
# get container
(container,label) = self.controller.resolve_label(label)
if keys:
# return attributes
if len(keys) > 1:
return method(label, *keys)
else:
return {keys[0]: method(label, *keys)}
else:
# return container
return {label:
getattr(container, type_name)[label]['block']}
# handlers
def index(self):
sinks = [ {'label': k, 'is_logger': isinstance(v['block'], Logger)}
for (k,v) in self.controller.sinks.items() ]
return render_template('index.html',
baseurl = self.base_url,
class_name = self.controller.info('class'),
signals = sorted(self.controller.list_signals()),
sources = self.controller.list_sources(),
filters = self.controller.list_filters(),
sinks = sinks,
timers = self.controller.list_timers(),
is_running = self.controller.get_signal('is_running'))
def info(self):
return self.controller.html()
def scope(self, label, *args, **kwargs):
return render_template('scope.html',
baseurl = self.base_url,
logger = label)
def download(self):
response = make_response(jsonify(self.controller))
response.headers["Content-Disposition"] \
= "attachment; filename=controller.json"
return response
def upload(self, **kwargs):
# post?
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash("Form has no field 'part'")
else:
# has file
file = request.files['file']
# empty filename?
if not file or file.filename == '':
flash('No file selected')
else:
# there is a file
try:
controller = decoder.decode(file.read().decode('utf-8'))
# print('controller = {}'.format(controller))
self.set_controller(controller = controller)
flash('New controller succesfully loaded.')
except Exception as e:
message = io.StringIO()
traceback.print_exc(file=message)
flash('Could not load controller.')
flash(message.getvalue())
return redirect(self.base_url + '/')
@json_response
@decode_kwargs
def reset(self, **kwargs):
return self.controller.reset(**kwargs)
@decode_kwargs
def reset_controller(self, **kwargs):
# set new controller
self.set_controller(**kwargs)
# redirect to base
return redirect(self.base_url + '/')
@json_response
def start(self):
return self.controller.start()
@json_response
def stop(self):
return self.controller.stop()
@json_response
def add_signal(self, *args, **kwargs):
return self.controller.add_signal(*args, **kwargs)
@json_response
def remove_signal(self, *args, **kwargs):
return self.controller.remove_signal(*args, **kwargs)
@json_response
def get_signal(self, label, *args, **kwargs):
return {label: self.controller.get_signal(label, *args, **kwargs)}
@json_response
@decode_value
def set_signal(self, *args, **kwargs):
return self.controller.set_signal(*args, **kwargs)
@json_response
def list_signals(self):
return self.controller.list_signals()
# sources
@json_response
@decode_kwargs
def add_source(self, label, module_name, class_name, **kwargs):
return self.controller.add_source(label, (module_name, class_name),
**kwargs)
@json_response
def remove_source(self, *args, **kwargs):
return self.controller.remove_source(*args, **kwargs)
@json_response
@decode_kwargs
def get_source(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_source, 'sources',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_source(self, *args, **kwargs):
return self.controller.set_source(*args, **kwargs)
@decode_kwargs
def html_source(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.sources[label]['block'].html();
# filters
@json_response
@decode_kwargs
def add_filter(self, label, module_name, class_name, **kwargs):
return self.controller.add_filter(label, (module_name, class_name),
**kwargs)
@json_response
def remove_filter(self, *args, **kwargs):
return self.controller.remove_filter(*args, **kwargs)
@json_response
@decode_kwargs
def get_filter(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_filter, 'filters',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_filter(self, *args, **kwargs):
return self.controller.set_filter(*args, **kwargs)
@decode_kwargs
def html_filter(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.filters[label]['block'].html();
# sinks
@json_response
@decode_kwargs
def add_sink(self, label, module_name, class_name, **kwargs):
return self.controller.add_sink(label, (module_name, class_name),
**kwargs)
@json_response
def remove_sink(self, *args, **kwargs):
return self.controller.remove_sink(*args, **kwargs)
@json_response
@decode_kwargs
def get_sink(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_sink, 'sinks',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_sink(self, *args, **kwargs):
return self.controller.set_sink(*args, **kwargs)
@decode_kwargs
def html_sink(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.sinks[label]['block'].html();
# timers
@json_response
@decode_kwargs
def add_timer(self, label, module_name, class_name, **kwargs):
return self.controller.add_timer(label, (module_name, class_name),
**kwargs)
@json_response
def remove_timer(self, *args, **kwargs):
return self.controller.remove_timer(*args, **kwargs)
@json_response
@decode_kwargs
def get_timer(self, label, *args, **kwargs):
return self.get_keys(self.controller.get_timer, 'timers',
label, *args, **kwargs)
@json_response
@decode_kwargs
def set_timer(self, *args, **kwargs):
return self.controller.set_timer(*args, **kwargs)
@decode_kwargs
def html_timer(self, label, *args, **kwargs):
# get container
(container,label) = self.controller.resolve_label(label)
return self.controller.timers[label]['block'].html();
if __name__ == "__main__":
try:
import os
os.environ['RCPY_NO_HANDLERS'] = 't'
from pyctrl.rc import Controller
debug = False
RCPY = True
except:
from pyctrl.timer import Controller
debug = True
RCPY = False
try:
app = Server(__name__)
app.config['SECRET_KEY'] = 'secret!'
# initialize controller
app.set_controller(controller = Controller(period = .01))
# run app
app.run(host='0.0.0.0',
debug = debug)
except:
pass
finally:
sys.exit(0)
| 0
| 5,155
| 0
| 10,782
| 0
| 317
| 0
| 78
| 266
|
5f6900da0b97f7cb9568e67607b74fc2b4feca0c
| 3,732
|
py
|
Python
|
proxy-alpha.py
|
ARTRoyale/ZapRoyale
|
984d72ee942b29f18250eae130d083d29151bd68
|
[
"MIT"
] | null | null | null |
proxy-alpha.py
|
ARTRoyale/ZapRoyale
|
984d72ee942b29f18250eae130d083d29151bd68
|
[
"MIT"
] | null | null | null |
proxy-alpha.py
|
ARTRoyale/ZapRoyale
|
984d72ee942b29f18250eae130d083d29151bd68
|
[
"MIT"
] | null | null | null |
# by ARTRoyale (A. Lebedev) for ZapRoyale
#
global debugmode
debugmode = True
global gl_server_address
gl_server_address = ('***.***.*.**', 9339)
if __name__ == "__main__":
port_num = 9339
print('[INFO] Proksi podkluchaetsa k portu', port_num)
ThreadedServer('0.0.0.0',port_num).listen()
| 32.172414
| 111
| 0.549303
|
# by ARTRoyale (A. Lebedev) for ZapRoyale
import socket
import threading
import struct
import os
import uuid
import random
# начинаем дебаг
global debugmode
debugmode = True
def debug(debmessage):
if debmessage:
if debugmode:
print('[DEBUG]',debmessage)
else:
pass
else:
pass
def randomBytes(n):
return bytes(random.getrandbits(8) for i in range(n))
def mockrcv(mock):
rdata = mock.recv(10086)
if not rdata:
return "nulldata"
return rdata
def serverlisten(mock, client):
ndata=mockrcv(mock)
if(ndata=="nulldata"):
print('[WARNING] Net proksi!')
return False
else:
lmessage_id = int(str(struct.unpack('>H', ndata[:2]))[1:-2])
if (lmessage_id >= 30000 or lmessage_id < 10000):
lmessage_id = 'Neizvestniy messadzh'
elif len(str(lmessage_id)) is not 5:
lmessage_id = 'Neizvestniy messadzh'
print('[OK] Servak => Proksi', lmessage_id)
response = ndata
try:
client.send(response)
except ConnectionAbortedError:
client.close()
debug('closed')
global gl_server_address
gl_server_address = ('***.***.*.**', 9339)
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
print('[OK] Zapusk Proksi')
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
print('[INFO] Klient => Proksi', address, 'podklucheno')
client.settimeout(60) #таймаут
mock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("[INFO] Proksi =><= Server pudkluchen k",gl_server_address[0],'on port',gl_server_address[1])
mock.connect(gl_server_address)
print('[INFO] Proksi =><= Server podkluchen')
threading.Thread(target = self.listenToClient,args = (client,address,mock)).start()
def listenToClient(self, client, address, mock):
while True:
try:
data = client.recv(4098)
except:
debug('closed')
mock.close()
try:
message_id = int(str(struct.unpack('>H', data[:2]))[1:-2])
except:
message_id = 'Neizvestniy messadzh'
try:
if (message_id >= 30000 or message_id < 10000):
message_id = 'Neizvestniy messadzh'
except:
message_id = 'Neizvestniy messadzh'
try:
if len(str(message_id)) is not 5:
message_id = 'Neizvestniy messadzh'
except:
message_id = 'Neizvestniy messadzh'
print('[OK] Klient => Proksi', message_id)
fmessage = data
try:
mock.sendall(fmessage)
except:
debug('done closing?')
break
print('[OK] Proksi => Server', message_id)
while 1:
debug('Slushayu servak')
r = serverlisten(mock, client);
if r == False:
debug('Net infy ot servaka')
break
else:
debug('Danniye polucheny ot servaka')
break
if __name__ == "__main__":
port_num = 9339
print('[INFO] Proksi podkluchaetsa k portu', port_num)
ThreadedServer('0.0.0.0',port_num).listen()
| 40
| 0
| 0
| 2,302
| 0
| 898
| 0
| -51
| 245
|
a5087d2093117c0c8a944443b7263a2f96effcb6
| 213
|
py
|
Python
|
api/api_scheme.py
|
raywu60kg/tensorlfow-project-demo
|
acd1085788da289ec7ed21ec0d46c9599188e32c
|
[
"MIT"
] | null | null | null |
api/api_scheme.py
|
raywu60kg/tensorlfow-project-demo
|
acd1085788da289ec7ed21ec0d46c9599188e32c
|
[
"MIT"
] | null | null | null |
api/api_scheme.py
|
raywu60kg/tensorlfow-project-demo
|
acd1085788da289ec7ed21ec0d46c9599188e32c
|
[
"MIT"
] | null | null | null |
# class MetricsOutput(BaseModel):
# name: str
# metrics: dict
| 14.2
| 36
| 0.723005
|
from pydantic import BaseModel
class HealthCheckOutput(BaseModel):
health: bool
# class MetricsOutput(BaseModel):
# name: str
# metrics: dict
class RetrainModelOutput(BaseModel):
train: bool
| 0
| 0
| 0
| 62
| 0
| 0
| 0
| 9
| 68
|
de32d45449405b3ec00b4326a7b6348906ee8392
| 742
|
py
|
Python
|
api/blueprints/users/views/roles.py
|
mohamed040406/API
|
40ceb2b35271938d90e4309a6cdcf63ba0c17f0b
|
[
"MIT"
] | 1
|
2021-05-01T02:25:27.000Z
|
2021-05-01T02:25:27.000Z
|
api/blueprints/users/views/roles.py
|
mohamed040406/API
|
40ceb2b35271938d90e4309a6cdcf63ba0c17f0b
|
[
"MIT"
] | null | null | null |
api/blueprints/users/views/roles.py
|
mohamed040406/API
|
40ceb2b35271938d90e4309a6cdcf63ba0c17f0b
|
[
"MIT"
] | null | null | null |
from .. import bp
import utils
request: utils.Request
| 24.733333
| 66
| 0.617251
|
from quart import request, jsonify
import time
from api.models import User
from .. import bp
import utils
request: utils.Request
@bp.route("/<int:user_id>/roles", methods=["GET"])
@utils.auth_required
async def fetch_user_roles(user_id: int):
"""Fetch the specific users roles"""
query = """
SELECT json_agg(json_build_object(
'name', r.name,
'base', r.base,
'id', r.id::TEXT,
'color', r.color,
'position', r.position,
'permissions', r.permissions::TEXT
))
FROM roles r WHERE r.id IN (
SELECT ur.role_id FROM userroles WHERE ur.user_id = $1
)
"""
record = await User.pool.fetchval(query, user_id)
return jsonify(roles=record)
| 0
| 587
| 0
| 0
| 0
| 0
| 0
| 9
| 90
|
d262a3348286d2c2acf7e83331728949dbe00b99
| 2,328
|
py
|
Python
|
mkgta.py
|
shaun95/Tacotron2-PyTorch
|
b1761fd7660e56adf39f3c8d02852fbaec1da2c5
|
[
"MIT"
] | 1
|
2022-03-10T20:02:58.000Z
|
2022-03-10T20:02:58.000Z
|
mkgta.py
|
shaun95/Tacotron2-PyTorch
|
b1761fd7660e56adf39f3c8d02852fbaec1da2c5
|
[
"MIT"
] | null | null | null |
mkgta.py
|
shaun95/Tacotron2-PyTorch
|
b1761fd7660e56adf39f3c8d02852fbaec1da2c5
|
[
"MIT"
] | null | null | null |
import torch
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--ckpt_pth', type = str, default = '',
required = True, help = 'path to load checkpoints')
parser.add_argument('-n', '--npy_pth', type = str, default = 'dump',
help = 'path to save mels')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
model = load_model(args.ckpt_pth)
flist = files_to_list()
for x in flist:
ret = infer(x[0], x[1], model)
name = x[0].split('/')[-1].split('.wav')[0]
if args.npy_pth != '':
save_mel(ret, args.npy_pth, name)
| 31.04
| 75
| 0.627577
|
import os
import torch
import argparse
import numpy as np
import matplotlib.pylab as plt
from text import text_to_sequence
from model.model import Tacotron2
from hparams import hparams as hps
from utils.util import mode, to_var, to_arr
from utils.audio import load_wav, save_wav, melspectrogram
def files_to_list(fdir = 'data'):
f_list = []
with open(os.path.join(fdir, 'metadata.csv'), encoding = 'utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(fdir, 'wavs', '%s.wav' % parts[0])
f_list.append([wav_path, parts[1]])
return f_list
def load_model(ckpt_pth):
ckpt_dict = torch.load(ckpt_pth)
model = Tacotron2()
model.load_state_dict(ckpt_dict['model'])
model = mode(model, True).eval()
model.decoder.train()
model.postnet.train()
return model
def infer(wav_path, text, model):
sequence = text_to_sequence(text, hps.text_cleaners)
sequence = to_var(torch.IntTensor(sequence)[None, :]).long()
mel = melspectrogram(load_wav(wav_path))
mel_in = to_var(torch.Tensor([mel]))
r = mel_in.shape[2]%hps.n_frames_per_step
if r != 0:
mel_in = mel_in[:, :, :-r]
sequence = torch.cat([sequence, sequence], 0)
mel_in = torch.cat([mel_in, mel_in], 0)
_, mel_outputs_postnet, _, _ = model.teacher_infer(sequence, mel_in)
ret = mel
if r != 0:
ret[:, :-r] = to_arr(mel_outputs_postnet[0])
else:
ret = to_arr(mel_outputs_postnet[0])
return ret
def save_mel(res, pth, name):
out = os.path.join(pth, name)
np.save(out, res)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--ckpt_pth', type = str, default = '',
required = True, help = 'path to load checkpoints')
parser.add_argument('-n', '--npy_pth', type = str, default = 'dump',
help = 'path to save mels')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
model = load_model(args.ckpt_pth)
flist = files_to_list()
for x in flist:
ret = infer(x[0], x[1], model)
name = x[0].split('/')[-1].split('.wav')[0]
if args.npy_pth != '':
save_mel(ret, args.npy_pth, name)
| 0
| 0
| 0
| 0
| 0
| 1,224
| 0
| 90
| 268
|
005dfd3bd6b99b749c3643626e7c275bbe2acb28
| 1,251
|
py
|
Python
|
com/Leetcode/981.TimeBasedKey-ValueStore.py
|
samkitsheth95/InterviewPrep
|
6be68c19bcaab4e64a8f646cc64f651bade8ba86
|
[
"MIT"
] | null | null | null |
com/Leetcode/981.TimeBasedKey-ValueStore.py
|
samkitsheth95/InterviewPrep
|
6be68c19bcaab4e64a8f646cc64f651bade8ba86
|
[
"MIT"
] | null | null | null |
com/Leetcode/981.TimeBasedKey-ValueStore.py
|
samkitsheth95/InterviewPrep
|
6be68c19bcaab4e64a8f646cc64f651bade8ba86
|
[
"MIT"
] | null | null | null |
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| 25.02
| 64
| 0.513189
|
from collections import defaultdict
from bisect import bisect
class TimeMap:
def binarySearch(self, a, key):
if key < a[0][1]:
return ''
elif key >= a[-1][1]:
return a[-1][0]
low = 0
high = len(a) - 1
while low <= high:
mid = low + (high - low) // 2
if a[mid][1] == key:
return a[mid][0]
elif a[mid][1] > key:
high = mid - 1
else:
low = mid + 1
return a[high][0]
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = defaultdict(list)
def set(self, key: str, value: str, timestamp: int) -> None:
self.d[key].append((value, timestamp))
def get(self, key: str, timestamp: int) -> str:
return self.binarySearch(self.d[key], timestamp)
def getBisect(self, key, timestamp):
A = self.M.get(key, None)
if A is None:
return ""
i = bisect.bisect(A, (timestamp, chr(127)))
return A[i-1][1] if i else ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| 0
| 0
| 0
| 1,016
| 0
| 0
| 0
| 18
| 67
|
5017ac97f2b5056a11800f28fde484ec4a35c1b3
| 8,797
|
py
|
Python
|
sstvis.py
|
mdjong1/sstvis
|
927590b1295491a062a77634008a9146e783c617
|
[
"MIT"
] | null | null | null |
sstvis.py
|
mdjong1/sstvis
|
927590b1295491a062a77634008a9146e783c617
|
[
"MIT"
] | null | null | null |
sstvis.py
|
mdjong1/sstvis
|
927590b1295491a062a77634008a9146e783c617
|
[
"MIT"
] | null | null | null |
import os
# prevent pygame from printing their welcome message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
# Define some basic colors for easy use
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Screen resolution to use
window_dimensions = (1200, 800)
# Higher frequency is less updates, lower frequency is more updates (it's a x % frequency == 0)
UPDATE_FREQUENCY = 1000
# Only updates every nth triangle, increases clarity in high density datasets
# Can also put this to 1 and make the scaling factor larger
THINNING_FACTOR = 1
pygame.init()
screen = pygame.display.set_mode(window_dimensions)
screen.fill(white)
font = pygame.font.SysFont("Arial", 12)
# TODO: Split label and value for each statistics field
time_taken = font.render("time:", True, white, blue)
tt_rect = time_taken.get_rect(bottomright=(80, window_dimensions[1] - 65))
screen.blit(time_taken, tt_rect)
time_taken_val = font.render(" ", True, white, blue)
tt_rect2 = time_taken_val.get_rect(bottomleft=(80, window_dimensions[1] - 65))
screen.blit(time_taken_val, tt_rect2)
points_per_second = font.render("avg #pts/s:", True, white, blue)
pps_rect = points_per_second.get_rect(bottomright=(80, window_dimensions[1] - 45))
screen.blit(points_per_second, pps_rect)
points_per_second_val = font.render(" ", True, white, blue)
pps_rect2 = points_per_second_val.get_rect(bottomleft=(80, window_dimensions[1] - 45))
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute = font.render(" # pts last minute:", True, white, blue)
# plm_rect = points_last_minute.get_rect(bottomright=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute, plm_rect)
# points_last_minute_val = font.render(" ", True, white, blue)
# plm_rect2 = points_last_minute_val.get_rect(bottomleft=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute_val, plm_rect2)
total_points = font.render("# pts:", True, white, blue)
tp_rect = total_points.get_rect(bottomright=(80, window_dimensions[1] - 25))
screen.blit(total_points, tp_rect)
total_points_val = font.render(" ", True, white, blue)
tp_rect2 = total_points_val.get_rect(bottomleft=(80, window_dimensions[1] - 25))
screen.blit(total_points_val, tp_rect2)
total_triangles = font.render("# triangles:", True, white, blue)
ttr_rect = total_triangles.get_rect(bottomright=(80, window_dimensions[1] - 5))
screen.blit(total_triangles, ttr_rect)
total_triangles_val = font.render(" ", True, white, blue)
ttr_rect2 = total_triangles_val.get_rect(bottomleft=(80, window_dimensions[1] - 5))
screen.blit(total_triangles_val, ttr_rect2)
pygame.display.set_caption('sstvis')
pygame.display.flip()
if __name__ == "__main__":
main()
| 35.615385
| 128
| 0.630329
|
import fileinput
import sys
import math
import time
import os
import click
# prevent pygame from printing their welcome message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
# Define some basic colors for easy use
white = (255, 255, 255)
red = (255, 0, 0)
black = (0, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Screen resolution to use
window_dimensions = (1200, 800)
# Higher frequency is less updates, lower frequency is more updates (it's a x % frequency == 0)
UPDATE_FREQUENCY = 1000
# Only updates every nth triangle, increases clarity in high density datasets
# Can also put this to 1 and make the scaling factor larger
THINNING_FACTOR = 1
pygame.init()
screen = pygame.display.set_mode(window_dimensions)
screen.fill(white)
font = pygame.font.SysFont("Arial", 12)
# TODO: Split label and value for each statistics field
time_taken = font.render("time:", True, white, blue)
tt_rect = time_taken.get_rect(bottomright=(80, window_dimensions[1] - 65))
screen.blit(time_taken, tt_rect)
time_taken_val = font.render(" ", True, white, blue)
tt_rect2 = time_taken_val.get_rect(bottomleft=(80, window_dimensions[1] - 65))
screen.blit(time_taken_val, tt_rect2)
points_per_second = font.render("avg #pts/s:", True, white, blue)
pps_rect = points_per_second.get_rect(bottomright=(80, window_dimensions[1] - 45))
screen.blit(points_per_second, pps_rect)
points_per_second_val = font.render(" ", True, white, blue)
pps_rect2 = points_per_second_val.get_rect(bottomleft=(80, window_dimensions[1] - 45))
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute = font.render(" # pts last minute:", True, white, blue)
# plm_rect = points_last_minute.get_rect(bottomright=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute, plm_rect)
# points_last_minute_val = font.render(" ", True, white, blue)
# plm_rect2 = points_last_minute_val.get_rect(bottomleft=(80, window_dimensions[1] - 95))
# screen.blit(points_last_minute_val, plm_rect2)
total_points = font.render("# pts:", True, white, blue)
tp_rect = total_points.get_rect(bottomright=(80, window_dimensions[1] - 25))
screen.blit(total_points, tp_rect)
total_points_val = font.render(" ", True, white, blue)
tp_rect2 = total_points_val.get_rect(bottomleft=(80, window_dimensions[1] - 25))
screen.blit(total_points_val, tp_rect2)
total_triangles = font.render("# triangles:", True, white, blue)
ttr_rect = total_triangles.get_rect(bottomright=(80, window_dimensions[1] - 5))
screen.blit(total_triangles, ttr_rect)
total_triangles_val = font.render(" ", True, white, blue)
ttr_rect2 = total_triangles_val.get_rect(bottomleft=(80, window_dimensions[1] - 5))
screen.blit(total_triangles_val, ttr_rect2)
pygame.display.set_caption('sstvis')
pygame.display.flip()
class Vertex:
def __init__(self, x, y, z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
class Processor:
def __init__(self):
self.bbox = []
self.vertices = {}
self.count = 0
self.vertex_count = 1
self.triangle_count = 0
self.scale = 1
self.start_time = time.time()
self.points_per_time = {}
def transform(self, x, y):
rex = (float(x) - self.bbox[0]) * self.scale + 5
rey = (float(y) - self.bbox[1]) * self.scale
return rex, rey
def increment_count(self):
self.count += 1
def update_statistics(self):
current_epoch = int(time.time())
time_taken_val = font.render(" " + str(round(current_epoch - self.start_time)) + "s ", True, black, white)
screen.blit(time_taken_val, tt_rect2)
points_in_past_minute = 0
for i in range(current_epoch - 60, current_epoch):
if i in self.points_per_time:
points_in_past_minute += self.points_per_time[i]
points_per_second_val = font.render(" " + str(round(points_in_past_minute / 60)) + " ", True, black, white)
screen.blit(points_per_second_val, pps_rect2)
# points_last_minute_val = font.render(" " + str(points_in_past_minute) + " ", True, black, white)
# screen.blit(points_last_minute_val, plm_rect2)
total_points_val = font.render(" " + str(self.vertex_count - 1) + " ", True, black, white)
screen.blit(total_points_val, tp_rect2)
total_triangles_val = font.render(" " + str(self.triangle_count) + " ", True, black, white)
screen.blit(total_triangles_val, ttr_rect2)
# Keep these on top for legibility
screen.blit(time_taken, tt_rect)
screen.blit(points_per_second, pps_rect)
# screen.blit(points_last_minute, plm_rect)
screen.blit(total_points, tp_rect)
screen.blit(total_triangles, ttr_rect)
def process_line(self, line):
pygame.event.get()
split_line = line.rstrip("\n").split(" ")
if split_line[0] == "#":
return
elif split_line[0] == "b":
self.bbox.append(float(split_line[1]))
self.bbox.append(float(split_line[2]))
self.bbox.append(float(split_line[3]))
self.bbox.append(float(split_line[4]))
delta_x = self.bbox[2] - self.bbox[0]
delta_y = self.bbox[3] - self.bbox[1]
largest_delta = delta_y if delta_y > delta_x else delta_x
self.scale = math.floor(window_dimensions[1] / largest_delta)
minx, miny = self.transform(self.bbox[0], self.bbox[1])
maxx, maxy = self.transform(self.bbox[2], self.bbox[3])
pygame.draw.lines(
surface=screen,
color=red,
closed=True,
points=(
(minx, window_dimensions[1] - miny - 5),
(maxx, window_dimensions[1] - miny - 5),
(maxx, window_dimensions[1] - maxy - 5),
(minx, window_dimensions[1] - maxy - 5)
),
width=3
)
pygame.display.update()
elif split_line[0] == "v":
# Add vertex count per unit
current_epoch = int(time.time())
if current_epoch not in self.points_per_time:
self.points_per_time[current_epoch] = 1
else:
self.points_per_time[current_epoch] += 1
# Transform x and y into current scale for visualization, then store that version in the Vertex
x, y = self.transform(split_line[1], split_line[2])
z = split_line[3]
self.vertices[self.vertex_count] = Vertex(x, y, z)
self.vertex_count += 1
elif split_line[0] == "f":
f1 = int(split_line[1])
f2 = int(split_line[2])
f3 = int(split_line[3])
if self.count % THINNING_FACTOR == 0:
pygame.draw.lines(
surface=screen,
color=black,
closed=True,
points=(
(self.vertices[f1].x, window_dimensions[1] - self.vertices[f1].y - 5),
(self.vertices[f2].x, window_dimensions[1] - self.vertices[f2].y - 5),
(self.vertices[f3].x, window_dimensions[1] - self.vertices[f3].y - 5)
),
width=1)
# pygame.draw.circle(screen, black, ((vertices[f1].x, vertices[f1].y)), 1)
self.update_statistics()
if self.count % UPDATE_FREQUENCY == 0:
pygame.display.update()
self.triangle_count += 1
@click.command()
@click.option('--thinning', default=THINNING_FACTOR, help='thinning factor (1 = no thinning)')
@click.option('--frequency', default=UPDATE_FREQUENCY, help='Higher frequency is less updates, lower frequency is more updates')
def main(thinning, frequency):
global THINNING_FACTOR
global UPDATE_FREQUENCY
THINNING_FACTOR = thinning
UPDATE_FREQUENCY = frequency
processor = Processor()
for stdin_line in sys.stdin:
if stdin_line == "":
continue
processor.process_line(stdin_line)
processor.increment_count()
sys.stdout.write(stdin_line)
# Last update of statistics to ensure uniformity
processor.update_statistics()
# Do a final update; because of update frequency a final update in processing loop is not guaranteed
pygame.display.update()
# Keep the pygame window running so you can view the final result
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
running = False
if __name__ == "__main__":
main()
| 0
| 1,108
| 0
| 4,806
| 0
| 0
| 0
| -45
| 179
|
c566c62bfc91343391f87b835b9e079719e2045b
| 24,813
|
py
|
Python
|
neo4j/_async/io/_bolt.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
neo4j/_async/io/_bolt.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
neo4j/_async/io/_bolt.py
|
matilda-me/neo4j-python-driver
|
4fb25a266841bf2a861f00d5dcf257bd5ae5c686
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from ..._async_compat.network import AsyncBoltSocket
# Set up logger
log = getLogger("neo4j")
AsyncBoltSocket.Bolt = AsyncBolt
| 35.548711
| 95
| 0.614476
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import asyncio
from collections import deque
from logging import getLogger
from time import perf_counter
from ..._async_compat.network import AsyncBoltSocket
from ..._async_compat.util import AsyncUtil
from ..._exceptions import (
BoltError,
BoltHandshakeError,
SocketDeadlineExceeded,
)
from ...addressing import Address
from ...api import (
ServerInfo,
Version,
)
from ...conf import PoolConfig
from ...exceptions import (
AuthError,
DriverError,
IncompleteCommit,
ServiceUnavailable,
SessionExpired,
)
from ...meta import get_user_agent
from ...packstream import (
Packer,
Unpacker,
)
from ._common import (
AsyncInbox,
CommitResponse,
Outbox,
)
# Set up logger
log = getLogger("neo4j")
class AsyncBolt:
""" Server connection for Bolt protocol.
A :class:`.Bolt` should be constructed following a
successful .open()
Bolt handshake and takes the socket over which
the handshake was carried out.
"""
MAGIC_PREAMBLE = b"\x60\x60\xB0\x17"
PROTOCOL_VERSION = None
# flag if connection needs RESET to go back to READY state
is_reset = False
# The socket
in_use = False
# When the connection was last put back into the pool
idle_since = float("-inf")
# The socket
_closing = False
_closed = False
# The socket
_defunct = False
#: The pool of which this connection is a member
pool = None
# Store the id of the most recent ran query to be able to reduce sent bits by
# using the default (-1) to refer to the most recent query when pulling
# results for it.
most_recent_qid = None
def __init__(self, unresolved_address, sock, max_connection_lifetime, *,
auth=None, user_agent=None, routing_context=None):
self.unresolved_address = unresolved_address
self.socket = sock
self.server_info = ServerInfo(Address(sock.getpeername()),
self.PROTOCOL_VERSION)
# so far `connection.recv_timeout_seconds` is the only available
# configuration hint that exists. Therefore, all hints can be stored at
# connection level. This might change in the future.
self.configuration_hints = {}
self.outbox = Outbox()
self.inbox = AsyncInbox(self.socket, on_error=self._set_defunct_read)
self.packer = Packer(self.outbox)
self.unpacker = Unpacker(self.inbox)
self.responses = deque()
self._max_connection_lifetime = max_connection_lifetime
self._creation_timestamp = perf_counter()
self.routing_context = routing_context
self.idle_since = perf_counter()
# Determine the user agent
if user_agent:
self.user_agent = user_agent
else:
self.user_agent = get_user_agent()
# Determine auth details
if not auth:
self.auth_dict = {}
elif isinstance(auth, tuple) and 2 <= len(auth) <= 3:
from neo4j import Auth
self.auth_dict = vars(Auth("basic", *auth))
else:
try:
self.auth_dict = vars(auth)
except (KeyError, TypeError):
raise AuthError("Cannot determine auth details from %r" % auth)
# Check for missing password
try:
credentials = self.auth_dict["credentials"]
except KeyError:
pass
else:
if credentials is None:
raise AuthError("Password cannot be None")
def __del__(self):
if not asyncio.iscoroutinefunction(self.close):
self.close()
@property
@abc.abstractmethod
def supports_multiple_results(self):
""" Boolean flag to indicate if the connection version supports multiple
queries to be buffered on the server side (True) or if all results need
to be eagerly pulled before sending the next RUN (False).
"""
pass
@property
@abc.abstractmethod
def supports_multiple_databases(self):
""" Boolean flag to indicate if the connection version supports multiple
databases.
"""
pass
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out Bolt subclass imports locally to avoid circular dependency issues.
from ._bolt3 import AsyncBolt3
from ._bolt4 import (
AsyncBolt4x1,
AsyncBolt4x2,
AsyncBolt4x3,
AsyncBolt4x4,
)
from ._bolt5 import AsyncBolt5x0
handlers = {
AsyncBolt3.PROTOCOL_VERSION: AsyncBolt3,
# 4.0 unsupported because no space left in the handshake
AsyncBolt4x1.PROTOCOL_VERSION: AsyncBolt4x1,
AsyncBolt4x2.PROTOCOL_VERSION: AsyncBolt4x2,
AsyncBolt4x3.PROTOCOL_VERSION: AsyncBolt4x3,
AsyncBolt4x4.PROTOCOL_VERSION: AsyncBolt4x4,
AsyncBolt5x0.PROTOCOL_VERSION: AsyncBolt5x0,
}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
if protocol_version in handlers:
return {protocol_version: handlers[protocol_version]}
return {}
@classmethod
def version_list(cls, versions, limit=4):
""" Return a list of supported protocol versions in order of
preference. The number of protocol versions (or ranges)
returned is limited to four.
"""
# In fact, 4.3 is the fist version to support ranges. However, the
# range support got backported to 4.2. But even if the server is too
# old to have the backport, negotiating BOLT 4.1 is no problem as it's
# equivalent to 4.2
first_with_range_support = Version(4, 2)
result = []
for version in versions:
if (result
and version >= first_with_range_support
and result[-1][0] == version[0]
and result[-1][1][1] == version[1] + 1):
# can use range to encompass this version
result[-1][1][1] = version[1]
continue
result.append(Version(version[0], [version[1], version[1]]))
if len(result) == 4:
break
return result
@classmethod
def get_handshake(cls):
""" Return the supported Bolt versions as bytes.
The length is 16 bytes as specified in the Bolt version negotiation.
:return: bytes
"""
supported_versions = sorted(cls.protocol_handlers().keys(), reverse=True)
offered_versions = cls.version_list(supported_versions)
return b"".join(version.to_bytes() for version in offered_versions).ljust(16, b"\x00")
@classmethod
async def ping(cls, address, *, timeout=None, **config):
""" Attempt to establish a Bolt connection, returning the
agreed Bolt protocol version if successful.
"""
config = PoolConfig.consume(config)
try:
s, protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=timeout,
custom_resolver=config.resolver,
ssl_context=config.get_ssl_context(),
keep_alive=config.keep_alive,
)
except (ServiceUnavailable, SessionExpired, BoltHandshakeError):
return None
else:
AsyncBoltSocket.close_socket(s)
return protocol_version
@classmethod
async def open(
cls, address, *, auth=None, timeout=None, routing_context=None,
**pool_config
):
"""Open a new Bolt connection to a given server address.
:param address:
:param auth:
:param timeout: the connection timeout in seconds
:param routing_context: dict containing routing context
:param pool_config:
:return: connected AsyncBolt instance
:raise BoltHandshakeError:
raised if the Bolt Protocol can not negotiate a protocol version.
:raise ServiceUnavailable: raised if there was a connection issue.
"""
def time_remaining():
if timeout is None:
return None
t = timeout - (perf_counter() - t0)
return t if t > 0 else 0
t0 = perf_counter()
pool_config = PoolConfig.consume(pool_config)
socket_connection_timeout = pool_config.connection_timeout
if socket_connection_timeout is None:
socket_connection_timeout = time_remaining()
elif timeout is not None:
socket_connection_timeout = min(pool_config.connection_timeout,
time_remaining())
s, pool_config.protocol_version, handshake, data = \
await AsyncBoltSocket.connect(
address,
timeout=socket_connection_timeout,
custom_resolver=pool_config.resolver,
ssl_context=pool_config.get_ssl_context(),
keep_alive=pool_config.keep_alive,
)
# Carry out Bolt subclass imports locally to avoid circular dependency
# issues.
if pool_config.protocol_version == (3, 0):
from ._bolt3 import AsyncBolt3
bolt_cls = AsyncBolt3
# Implementation for 4.0 exists, but there was no space left in the
# handshake to offer this version to the server. Hence, the server
# should never request us to speak bolt 4.0.
# elif pool_config.protocol_version == (4, 0):
# from ._bolt4 import AsyncBolt4x0
# bolt_cls = AsyncBolt4x0
elif pool_config.protocol_version == (4, 1):
from ._bolt4 import AsyncBolt4x1
bolt_cls = AsyncBolt4x1
elif pool_config.protocol_version == (4, 2):
from ._bolt4 import AsyncBolt4x2
bolt_cls = AsyncBolt4x2
elif pool_config.protocol_version == (4, 3):
from ._bolt4 import AsyncBolt4x3
bolt_cls = AsyncBolt4x3
elif pool_config.protocol_version == (4, 4):
from ._bolt4 import AsyncBolt4x4
bolt_cls = AsyncBolt4x4
elif pool_config.protocol_version == (5, 0):
from ._bolt5 import AsyncBolt5x0
bolt_cls = AsyncBolt5x0
else:
log.debug("[#%04X] S: <CLOSE>", s.getsockname()[1])
AsyncBoltSocket.close_socket(s)
supported_versions = cls.protocol_handlers().keys()
raise BoltHandshakeError(
"The Neo4J server does not support communication with this "
"driver. This driver has support for Bolt protocols "
"{}".format(tuple(map(str, supported_versions))),
address=address, request_data=handshake, response_data=data
)
connection = bolt_cls(
address, s, pool_config.max_connection_lifetime, auth=auth,
user_agent=pool_config.user_agent, routing_context=routing_context
)
try:
connection.socket.set_deadline(time_remaining())
try:
await connection.hello()
except SocketDeadlineExceeded as e:
# connection._defunct = True
raise ServiceUnavailable(
"Timeout during initial handshake occurred"
) from e
finally:
connection.socket.set_deadline(None)
except Exception:
await connection.close_non_blocking()
raise
return connection
@property
@abc.abstractmethod
def encrypted(self):
pass
@property
@abc.abstractmethod
def der_encoded_server_certificate(self):
pass
@property
@abc.abstractmethod
def local_port(self):
pass
@abc.abstractmethod
async def hello(self):
""" Appends a HELLO message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
async def route(self, database=None, imp_user=None, bookmarks=None):
""" Fetch a routing table from the server for the given
`database`. For Bolt 4.3 and above, this appends a ROUTE
message; for earlier versions, a procedure call is made via
the regular Cypher execution mechanism. In all cases, this is
sent to the network, and a response is fetched.
:param database: database for which to fetch a routing table
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param bookmarks: iterable of bookmark values after which this
transaction should begin
:return: dictionary of raw routing data
"""
pass
@abc.abstractmethod
def run(self, query, parameters=None, mode=None, bookmarks=None,
metadata=None, timeout=None, db=None, imp_user=None, **handlers):
""" Appends a RUN message to the output queue.
:param query: Cypher query string
:param parameters: dictionary of Cypher parameters
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+.
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def discard(self, n=-1, qid=-1, **handlers):
""" Appends a DISCARD message to the output queue.
:param n: number of records to discard, default = -1 (ALL)
:param qid: query ID to discard for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def pull(self, n=-1, qid=-1, **handlers):
""" Appends a PULL message to the output queue.
:param n: number of records to pull, default = -1 (ALL)
:param qid: query ID to pull for, default = -1 (last query)
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def begin(self, mode=None, bookmarks=None, metadata=None, timeout=None,
db=None, imp_user=None, **handlers):
""" Appends a BEGIN message to the output queue.
:param mode: access mode for routing - "READ" or "WRITE" (default)
:param bookmarks: iterable of bookmark values after which this transaction should begin
:param metadata: custom metadata dictionary to attach to the transaction
:param timeout: timeout for transaction execution (seconds)
:param db: name of the database against which to begin the transaction
Requires Bolt 4.0+.
:param imp_user: the user to impersonate
Requires Bolt 4.4+
:param handlers: handler functions passed into the returned Response object
:return: Response object
"""
pass
@abc.abstractmethod
def commit(self, **handlers):
""" Appends a COMMIT message to the output queue."""
pass
@abc.abstractmethod
def rollback(self, **handlers):
""" Appends a ROLLBACK message to the output queue."""
pass
@abc.abstractmethod
async def reset(self):
""" Appends a RESET message to the outgoing queue, sends it and consumes
all remaining messages.
"""
pass
@abc.abstractmethod
def goodbye(self):
"""Append a GOODBYE message to the outgoing queued."""
pass
def _append(self, signature, fields=(), response=None):
""" Appends a message to the outgoing queue.
:param signature: the signature of the message
:param fields: the fields of the message as a tuple
:param response: a response object to handle callbacks
"""
with self.outbox.tmp_buffer():
self.packer.pack_struct(signature, fields)
self.outbox.wrap_message()
self.responses.append(response)
async def _send_all(self):
data = self.outbox.view()
if data:
try:
await self.socket.sendall(data)
except OSError as error:
await self._set_defunct_write(error)
self.outbox.clear()
self.idle_since = perf_counter()
async def send_all(self):
""" Send all queued messages to the server.
"""
if self.closed():
raise ServiceUnavailable(
"Failed to write to closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self.defunct():
raise ServiceUnavailable(
"Failed to write to defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
await self._send_all()
@abc.abstractmethod
async def _process_message(self, details, summary_signature,
summary_metadata):
""" Receive at most one message from the server, if available.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
pass
async def fetch_message(self):
if self._closed:
raise ServiceUnavailable(
"Failed to read from closed connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if self._defunct:
raise ServiceUnavailable(
"Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
)
if not self.responses:
return 0, 0
# Receive exactly one message
details, summary_signature, summary_metadata = \
await AsyncUtil.next(self.inbox)
res = await self._process_message(
details, summary_signature, summary_metadata
)
self.idle_since = perf_counter()
return res
async def fetch_all(self):
""" Fetch all outstanding messages.
:return: 2-tuple of number of detail messages and number of summary
messages fetched
"""
detail_count = summary_count = 0
while self.responses:
response = self.responses[0]
while not response.complete:
detail_delta, summary_delta = await self.fetch_message()
detail_count += detail_delta
summary_count += summary_delta
return detail_count, summary_count
async def _set_defunct_read(self, error=None, silent=False):
message = "Failed to read from defunct connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
await self._set_defunct(message, error=error, silent=silent)
async def _set_defunct_write(self, error=None, silent=False):
message = "Failed to write data to connection {!r} ({!r})".format(
self.unresolved_address, self.server_info.address
)
await self._set_defunct(message, error=error, silent=silent)
async def _set_defunct(self, message, error=None, silent=False):
from ._pool import AsyncBoltPool
direct_driver = isinstance(self.pool, AsyncBoltPool)
if error:
log.debug("[#%04X] %s", self.socket.getsockname()[1], error)
log.error(message)
# We were attempting to receive data but the connection
# has unexpectedly terminated. So, we need to close the
# connection from the client side, and remove the address
# from the connection pool.
self._defunct = True
if not self._closing:
# If we fail while closing the connection, there is no need to
# remove the connection from the pool, nor to try to close the
# connection again.
await self.close()
if self.pool:
await self.pool.deactivate(address=self.unresolved_address)
# Iterate through the outstanding responses, and if any correspond
# to COMMIT requests then raise an error to signal that we are
# unable to confirm that the COMMIT completed successfully.
if silent:
return
for response in self.responses:
if isinstance(response, CommitResponse):
if error:
raise IncompleteCommit(message) from error
else:
raise IncompleteCommit(message)
if direct_driver:
if error:
raise ServiceUnavailable(message) from error
else:
raise ServiceUnavailable(message)
else:
if error:
raise SessionExpired(message) from error
else:
raise SessionExpired(message)
def stale(self):
return (self._stale
or (0 <= self._max_connection_lifetime
<= perf_counter() - self._creation_timestamp))
_stale = False
def set_stale(self):
self._stale = True
async def close(self):
"""Close the connection."""
if self._closed or self._closing:
return
self._closing = True
if not self._defunct:
self.goodbye()
try:
await self._send_all()
except (OSError, BoltError, DriverError):
pass
log.debug("[#%04X] C: <CLOSE>", self.local_port)
try:
self.socket.close()
except OSError:
pass
finally:
self._closed = True
async def close_non_blocking(self):
"""Set the socket to non-blocking and close it.
This will try to send the `GOODBYE` message (given the socket is not
marked as defunct). However, should the write operation require
blocking (e.g., a full network buffer), then the socket will be closed
immediately (without `GOODBYE` message).
"""
if self._closed or self._closing:
return
self.socket.settimeout(0)
await self.close()
def closed(self):
return self._closed
def defunct(self):
return self._defunct
def is_idle_for(self, timeout):
"""Check if connection has been idle for at least the given timeout.
:param timeout: timeout in seconds
:type timeout: float
:rtype: bool
"""
return perf_counter() - self.idle_since > timeout
AsyncBoltSocket.Bolt = AsyncBolt
| 0
| 13,086
| 5,480
| 4,783
| 0
| 0
| 0
| 351
| 310
|
9578398d67c4ab380e45b5e1357b9a225ddd1afc
| 1,633
|
py
|
Python
|
examples/ex09/process.py
|
oditorium/PageBuilder
|
74fa95285d41ed390f46f22129a45900c1d8b474
|
[
"MIT"
] | null | null | null |
examples/ex09/process.py
|
oditorium/PageBuilder
|
74fa95285d41ed390f46f22129a45900c1d8b474
|
[
"MIT"
] | null | null | null |
examples/ex09/process.py
|
oditorium/PageBuilder
|
74fa95285d41ed390f46f22129a45900c1d8b474
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
process data generated by PageBuilder
"""
import json
INFN = "document.json"
OUTFN = "_DATA.json"
print ("PROCESSING ===========================================================")
########################################################################
## READING THE INPUT FILE
print ("reading", INFN)
with open(INFN, "r") as f: document_json = f.read()
print ("parsing", INFN)
document_data = json.loads(document_json)
print ("analysing {} ({} records)".format(INFN, len(document_data)))
data = []
FIELDS = ["_filename", "data"]
for r in document_data:
data.append({ k: r.get(k, None) for k in FIELDS})
print ("extracted {} records".format(len(data)))
print ("EXTRACTED DATA:", data)
########################################################################
## PROCESSING
out_sums = {}
for i in range(len(data)):
d = data[i]
sdata = d['data'].split(",")
sdata = map(int, sdata)
out_sums[d["_filename"]] = {"sum": sum(sdata)}
########################################################################
## WRITING THE OUTPUT FILE
out = {
"_select": out_sums,
# the key `_select` is special; it MUST contain a dict where the
# dict keys are the filename (from the `_filename` field); when
# a specific file `filename` is processed, the content of
# out["_select"][filename] (which must be a dict) is added to
# the environment, and can be added in the template
#"sums": 1,
}
with open(OUTFN, "w") as f: f.write(json.dumps(out))
print("OUT:", out)
print ("END PROCESSING =======================================================")
| 27.677966
| 80
| 0.509492
|
#!/usr/bin/env python3
"""
process data generated by PageBuilder
"""
import json
INFN = "document.json"
OUTFN = "_DATA.json"
print ("PROCESSING ===========================================================")
########################################################################
## READING THE INPUT FILE
print ("reading", INFN)
with open(INFN, "r") as f: document_json = f.read()
print ("parsing", INFN)
document_data = json.loads(document_json)
print ("analysing {} ({} records)".format(INFN, len(document_data)))
data = []
FIELDS = ["_filename", "data"]
for r in document_data:
data.append({ k: r.get(k, None) for k in FIELDS})
print ("extracted {} records".format(len(data)))
print ("EXTRACTED DATA:", data)
########################################################################
## PROCESSING
out_sums = {}
for i in range(len(data)):
d = data[i]
sdata = d['data'].split(",")
sdata = map(int, sdata)
out_sums[d["_filename"]] = {"sum": sum(sdata)}
########################################################################
## WRITING THE OUTPUT FILE
out = {
"_select": out_sums,
# the key `_select` is special; it MUST contain a dict where the
# dict keys are the filename (from the `_filename` field); when
# a specific file `filename` is processed, the content of
# out["_select"][filename] (which must be a dict) is added to
# the environment, and can be added in the template
#"sums": 1,
}
with open(OUTFN, "w") as f: f.write(json.dumps(out))
print("OUT:", out)
print ("END PROCESSING =======================================================")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8a8cfc04c5c7ae8b231f967292f73edd9f04f568
| 141
|
py
|
Python
|
oogli/Texture.py
|
brianbruggeman/oogli
|
6a6f681468d609035924ede27d895afcc9d432b6
|
[
"Apache-2.0"
] | 3
|
2016-01-18T22:10:51.000Z
|
2016-06-10T16:02:55.000Z
|
oogli/Texture.py
|
brianbruggeman/oogli
|
6a6f681468d609035924ede27d895afcc9d432b6
|
[
"Apache-2.0"
] | null | null | null |
oogli/Texture.py
|
brianbruggeman/oogli
|
6a6f681468d609035924ede27d895afcc9d432b6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
| 15.666667
| 38
| 0.560284
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Texture(object):
def __init__(self, *args, **kwds):
'''TODO: something'''
| 0
| 0
| 0
| 71
| 0
| 0
| 0
| 0
| 23
|
fc7058a10d7e658bef7595f63f5638b9966e1a4c
| 6,236
|
py
|
Python
|
neutron/common/config.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | 1
|
2016-04-23T21:33:31.000Z
|
2016-04-23T21:33:31.000Z
|
neutron/common/config.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | null | null | null |
neutron/common/config.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | 4
|
2015-04-14T10:06:51.000Z
|
2019-10-02T01:28:34.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from paste import deploy
from neutron.common import utils
from neutron.openstack.common.db.sqlalchemy import session as db_session
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_session.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| 39.974359
| 78
| 0.657473
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common.db.sqlalchemy import session as db_session
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.version import version_info as neutron_version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
rpc.set_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_session.set_defaults(sql_connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def parse(args):
cfg.CONF(args=args, project='neutron',
version='%%prog %s' % neutron_version.release_string())
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| 0
| 0
| 0
| 0
| 0
| 362
| 0
| 54
| 68
|
b12bdd3b7613ac6f1ca82e2ac22d65ec1929d997
| 3,978
|
py
|
Python
|
code/tmp_rtrip/test/test_structmembers.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/test/test_structmembers.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/test/test_structmembers.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
import unittest
from test import support
support.import_module('_testcapi')
from _testcapi import _test_structmembersType
ts = _test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8, 23, 9.99999,
10.101010101, 'hi')
if __name__ == '__main__':
unittest.main()
| 35.20354
| 232
| 0.65083
|
import unittest
from test import support
support.import_module('_testcapi')
from _testcapi import _test_structmembersType, CHAR_MAX, CHAR_MIN, UCHAR_MAX, SHRT_MAX, SHRT_MIN, USHRT_MAX, INT_MAX, INT_MIN, UINT_MAX, LONG_MAX, LONG_MIN, ULONG_MAX, LLONG_MAX, LLONG_MIN, ULLONG_MAX, PY_SSIZE_T_MAX, PY_SSIZE_T_MIN
ts = _test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8, 23, 9.99999,
10.101010101, 'hi')
class ReadWriteTests(unittest.TestCase):
def test_bool(self):
ts.T_BOOL = True
self.assertEqual(ts.T_BOOL, True)
ts.T_BOOL = False
self.assertEqual(ts.T_BOOL, False)
self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1)
def test_byte(self):
ts.T_BYTE = CHAR_MAX
self.assertEqual(ts.T_BYTE, CHAR_MAX)
ts.T_BYTE = CHAR_MIN
self.assertEqual(ts.T_BYTE, CHAR_MIN)
ts.T_UBYTE = UCHAR_MAX
self.assertEqual(ts.T_UBYTE, UCHAR_MAX)
def test_short(self):
ts.T_SHORT = SHRT_MAX
self.assertEqual(ts.T_SHORT, SHRT_MAX)
ts.T_SHORT = SHRT_MIN
self.assertEqual(ts.T_SHORT, SHRT_MIN)
ts.T_USHORT = USHRT_MAX
self.assertEqual(ts.T_USHORT, USHRT_MAX)
def test_int(self):
ts.T_INT = INT_MAX
self.assertEqual(ts.T_INT, INT_MAX)
ts.T_INT = INT_MIN
self.assertEqual(ts.T_INT, INT_MIN)
ts.T_UINT = UINT_MAX
self.assertEqual(ts.T_UINT, UINT_MAX)
def test_long(self):
ts.T_LONG = LONG_MAX
self.assertEqual(ts.T_LONG, LONG_MAX)
ts.T_LONG = LONG_MIN
self.assertEqual(ts.T_LONG, LONG_MIN)
ts.T_ULONG = ULONG_MAX
self.assertEqual(ts.T_ULONG, ULONG_MAX)
def test_py_ssize_t(self):
ts.T_PYSSIZET = PY_SSIZE_T_MAX
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MAX)
ts.T_PYSSIZET = PY_SSIZE_T_MIN
self.assertEqual(ts.T_PYSSIZET, PY_SSIZE_T_MIN)
@unittest.skipUnless(hasattr(ts, 'T_LONGLONG'), 'long long not present')
def test_longlong(self):
ts.T_LONGLONG = LLONG_MAX
self.assertEqual(ts.T_LONGLONG, LLONG_MAX)
ts.T_LONGLONG = LLONG_MIN
self.assertEqual(ts.T_LONGLONG, LLONG_MIN)
ts.T_ULONGLONG = ULLONG_MAX
self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX)
ts.T_LONGLONG = 3
self.assertEqual(ts.T_LONGLONG, 3)
ts.T_ULONGLONG = 4
self.assertEqual(ts.T_ULONGLONG, 4)
def test_bad_assignments(self):
integer_attributes = ['T_BOOL', 'T_BYTE', 'T_UBYTE', 'T_SHORT',
'T_USHORT', 'T_INT', 'T_UINT', 'T_LONG', 'T_ULONG', 'T_PYSSIZET']
if hasattr(ts, 'T_LONGLONG'):
integer_attributes.extend(['T_LONGLONG', 'T_ULONGLONG'])
for nonint in (None, 3.2j, 'full of eels', {}, []):
for attr in integer_attributes:
self.assertRaises(TypeError, setattr, ts, attr, nonint)
def test_inplace_string(self):
self.assertEqual(ts.T_STRING_INPLACE, 'hi')
self.assertRaises(TypeError, setattr, ts, 'T_STRING_INPLACE', 's')
self.assertRaises(TypeError, delattr, ts, 'T_STRING_INPLACE')
class TestWarnings(unittest.TestCase):
def test_byte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MAX + 1
def test_byte_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MIN - 1
def test_ubyte_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_UBYTE = UCHAR_MAX + 1
def test_short_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MAX + 1
def test_short_min(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MIN - 1
def test_ushort_max(self):
with support.check_warnings(('', RuntimeWarning)):
ts.T_USHORT = USHRT_MAX + 1
if __name__ == '__main__':
unittest.main()
| 0
| 479
| 0
| 2,995
| 0
| 0
| 0
| 187
| 46
|
ace8a02a07baa1d3676ee33620ccb26e1bf748c5
| 5,705
|
py
|
Python
|
src/predict.py
|
jamesmcclain/algae-model
|
45e3e83544034022aba16ad1ed254f1445e4bb1b
|
[
"MIT"
] | null | null | null |
src/predict.py
|
jamesmcclain/algae-model
|
45e3e83544034022aba16ad1ed254f1445e4bb1b
|
[
"MIT"
] | null | null | null |
src/predict.py
|
jamesmcclain/algae-model
|
45e3e83544034022aba16ad1ed254f1445e4bb1b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import copy
import logging
import sys
import warnings
import numpy as np
import rasterio as rio
import torch
import torch.hub
import tqdm
from rasterio.windows import Window
BACKBONES = [
'vgg16', 'densenet161', 'shufflenet_v2_x1_0', 'mobilenet_v2',
'mobilenet_v3_large', 'mobilenet_v3_small', 'resnet18', 'resnet34',
'resnet50', 'resnet101', 'resnet152', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5',
'efficientnet_b6', 'efficientnet_b7', 'fpn_resnet18', 'fpn_resnet34',
'fpn_resnet50'
]
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = cli_parser().parse_args()
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format='%(asctime)-15s %(message)s')
log = logging.getLogger()
n = args.window_size
device = torch.device(args.device)
model = torch.hub.load('jamesmcclain/algae-classifier:730726f5bccc679fa334da91fe4dc4cb71a35208',
'make_algae_model',
in_channels=[4, 12, 224],
prescale=args.prescale,
backbone_str=args.backbone,
pretrained=False)
model.load_state_dict(torch.load(args.pth_load))
model.to(device)
model.eval()
if args.outfile is None:
model_name = args.pth_load.split('/')[-1].split('.')[0]
args.outfile = [transmute(f) for f in args.infile]
for (infile, outfile) in zip(args.infile, args.outfile):
log.info(outfile)
with rio.open(infile, 'r') as infile_ds, torch.no_grad():
out_raw_profile = copy.deepcopy(infile_ds.profile)
out_raw_profile.update({
'compress': 'lzw',
'dtype': np.float32,
'count': 1,
'bigtiff': 'yes',
'sparse_ok': 'yes',
'tiled': 'yes',
})
width = infile_ds.width
height = infile_ds.height
bandcount = infile_ds.count
ar_out = torch.zeros((1, height, width), dtype=torch.float32).to(device)
pixel_hits = torch.zeros((1, height, width), dtype=torch.uint8).to(device)
if bandcount == 224:
indexes = list(range(1, 224 + 1))
elif bandcount in {12, 13}:
indexes = list(range(1, 12 + 1))
# NOTE: 13 bands does not indicate L1C support, this is
# for Franklin COGs that have an extra band.
bandcount = 12
elif bandcount == 4:
indexes = list(range(1, 4 + 1))
elif bandcount == 5:
indexes = [1, 2, 3, 5]
bandcount = 4
else:
raise Exception(f'bands={bandcount}')
# gather up batches
batches = []
for i in range(0, width - n, args.stride):
for j in range(0, height - n, args.stride):
batches.append((i, j))
batches = [batches[i:i + args.chunksize] for i in range(0, len(batches), args.chunksize)]
for batch in tqdm.tqdm(batches):
windows = [infile_ds.read(indexes, window=Window(i, j, n, n)) for (i, j) in batch]
windows = [w.astype(np.float32) for w in windows]
if args.ndwi_mask:
windows = [w * (((w[2] - w[7]) / (w[2] + w[7])) > 0.0) for w in windows]
try:
windows = np.stack(windows, axis=0)
except:
continue
windows = torch.from_numpy(windows).to(dtype=torch.float32, device=device)
prob = model(windows)
for k, (i, j) in enumerate(batch):
if 'seg' in prob:
_prob = torch.sigmoid(prob.get('seg')[k, 1]) - torch.sigmoid(prob.get('seg')[k, 0])
ar_out[0, j:(j + n), i:(i + n)] += _prob
else:
ar_out[0, j:(j + n), i:(i + n)] += torch.sigmoid(prob.get('class')[k, 0])
pixel_hits[0, j:(j + n), i:(i + n)] += 1
# Bring results back to CPU
ar_out /= pixel_hits
ar_out = ar_out.cpu().numpy()
# Write results to file
with rio.open(outfile, 'w', **out_raw_profile) as outfile_raw_ds:
outfile_raw_ds.write(ar_out[0], indexes=1)
| 39.895105
| 107
| 0.564242
|
#!/usr/bin/env python3
import argparse
import copy
import logging
import sys
import warnings
import numpy as np
import rasterio as rio
import torch
import torch.hub
import tqdm
from rasterio.windows import Window
BACKBONES = [
'vgg16', 'densenet161', 'shufflenet_v2_x1_0', 'mobilenet_v2',
'mobilenet_v3_large', 'mobilenet_v3_small', 'resnet18', 'resnet34',
'resnet50', 'resnet101', 'resnet152', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5',
'efficientnet_b6', 'efficientnet_b7', 'fpn_resnet18', 'fpn_resnet34',
'fpn_resnet50'
]
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--backbone', required=True, type=str, choices=BACKBONES)
parser.add_argument('--chunksize', required=False, type=int, default=256)
parser.add_argument('--device', required=False, type=str, default='cuda', choices=['cuda', 'cpu'])
parser.add_argument('--infile', required=True, type=str, nargs='+')
parser.add_argument('--outfile', required=False, default=None, type=str, nargs='+')
parser.add_argument('--prescale', required=False, type=int, default=1)
parser.add_argument('--pth-load', required=True, type=str)
parser.add_argument('--stride', required=False, type=int, default=13)
parser.add_argument('--window-size', required=False, type=int, default=32)
parser.add_argument('--ndwi-mask', required=False, dest='ndwi_mask', action='store_true')
parser.set_defaults(ndwi_mask=False)
return parser
if __name__ == '__main__':
warnings.filterwarnings('ignore')
args = cli_parser().parse_args()
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format='%(asctime)-15s %(message)s')
log = logging.getLogger()
n = args.window_size
device = torch.device(args.device)
model = torch.hub.load('jamesmcclain/algae-classifier:730726f5bccc679fa334da91fe4dc4cb71a35208',
'make_algae_model',
in_channels=[4, 12, 224],
prescale=args.prescale,
backbone_str=args.backbone,
pretrained=False)
model.load_state_dict(torch.load(args.pth_load))
model.to(device)
model.eval()
if args.outfile is None:
model_name = args.pth_load.split('/')[-1].split('.')[0]
def transmute(filename):
filename = filename.split('/')[-1]
filename = f"./predict-{model_name}-{filename}"
if not filename.endswith('.tiff'):
filename = filename.replace('.tif', '.tiff')
return filename
args.outfile = [transmute(f) for f in args.infile]
for (infile, outfile) in zip(args.infile, args.outfile):
log.info(outfile)
with rio.open(infile, 'r') as infile_ds, torch.no_grad():
out_raw_profile = copy.deepcopy(infile_ds.profile)
out_raw_profile.update({
'compress': 'lzw',
'dtype': np.float32,
'count': 1,
'bigtiff': 'yes',
'sparse_ok': 'yes',
'tiled': 'yes',
})
width = infile_ds.width
height = infile_ds.height
bandcount = infile_ds.count
ar_out = torch.zeros((1, height, width), dtype=torch.float32).to(device)
pixel_hits = torch.zeros((1, height, width), dtype=torch.uint8).to(device)
if bandcount == 224:
indexes = list(range(1, 224 + 1))
elif bandcount in {12, 13}:
indexes = list(range(1, 12 + 1))
# NOTE: 13 bands does not indicate L1C support, this is
# for Franklin COGs that have an extra band.
bandcount = 12
elif bandcount == 4:
indexes = list(range(1, 4 + 1))
elif bandcount == 5:
indexes = [1, 2, 3, 5]
bandcount = 4
else:
raise Exception(f'bands={bandcount}')
# gather up batches
batches = []
for i in range(0, width - n, args.stride):
for j in range(0, height - n, args.stride):
batches.append((i, j))
batches = [batches[i:i + args.chunksize] for i in range(0, len(batches), args.chunksize)]
for batch in tqdm.tqdm(batches):
windows = [infile_ds.read(indexes, window=Window(i, j, n, n)) for (i, j) in batch]
windows = [w.astype(np.float32) for w in windows]
if args.ndwi_mask:
windows = [w * (((w[2] - w[7]) / (w[2] + w[7])) > 0.0) for w in windows]
try:
windows = np.stack(windows, axis=0)
except:
continue
windows = torch.from_numpy(windows).to(dtype=torch.float32, device=device)
prob = model(windows)
for k, (i, j) in enumerate(batch):
if 'seg' in prob:
_prob = torch.sigmoid(prob.get('seg')[k, 1]) - torch.sigmoid(prob.get('seg')[k, 0])
ar_out[0, j:(j + n), i:(i + n)] += _prob
else:
ar_out[0, j:(j + n), i:(i + n)] += torch.sigmoid(prob.get('class')[k, 0])
pixel_hits[0, j:(j + n), i:(i + n)] += 1
# Bring results back to CPU
ar_out /= pixel_hits
ar_out = ar_out.cpu().numpy()
# Write results to file
with rio.open(outfile, 'w', **out_raw_profile) as outfile_raw_ds:
outfile_raw_ds.write(ar_out[0], indexes=1)
| 0
| 0
| 0
| 0
| 0
| 1,150
| 0
| -6
| 76
|
ed5cc620f755b91673991e6e44482f82fb01cfdf
| 669
|
py
|
Python
|
tz_detect/defaults.py
|
dkirkham/django-tz-detect
|
ec3c66a967e2518adf070bfd42a9076471f1bc2a
|
[
"MIT"
] | null | null | null |
tz_detect/defaults.py
|
dkirkham/django-tz-detect
|
ec3c66a967e2518adf070bfd42a9076471f1bc2a
|
[
"MIT"
] | null | null | null |
tz_detect/defaults.py
|
dkirkham/django-tz-detect
|
ec3c66a967e2518adf070bfd42a9076471f1bc2a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
# How often to check
TZ_DETECT_PERIOD = getattr(settings, 'TZ_DETECT_PERIOD', 3*3600)
# Version of moment and moment-timezone to load
TZ_DETECT_SCRIPTS = getattr(settings, 'TZ_DETECT_SCRIPTS', [
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.24.0/moment.min.js" integrity="sha256-4iQZ6BVL4qNKlQ27TExEhBN1HFPvAvAMbFavKKosSWQ=" crossorigin="anonymous"></script>',
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment-timezone/0.5.28/moment-timezone-with-data-10-year-range.min.js" integrity="sha256-HS6OzSyhM0rDG0PhZGwf/FvptBzIJnv4MgL2pe87xgg=" crossorigin="anonymous"></script>'
])
| 55.75
| 224
| 0.77429
|
# -*- coding: utf-8 -*-
from django.conf import settings
# How often to check
TZ_DETECT_PERIOD = getattr(settings, 'TZ_DETECT_PERIOD', 3*3600)
# Version of moment and moment-timezone to load
TZ_DETECT_SCRIPTS = getattr(settings, 'TZ_DETECT_SCRIPTS', [
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.24.0/moment.min.js" integrity="sha256-4iQZ6BVL4qNKlQ27TExEhBN1HFPvAvAMbFavKKosSWQ=" crossorigin="anonymous"></script>',
'<script src="https://cdnjs.cloudflare.com/ajax/libs/moment-timezone/0.5.28/moment-timezone-with-data-10-year-range.min.js" integrity="sha256-HS6OzSyhM0rDG0PhZGwf/FvptBzIJnv4MgL2pe87xgg=" crossorigin="anonymous"></script>'
])
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0051e0bd2a9085c3ed7b3685be94b2da7bc22176
| 1,525
|
py
|
Python
|
cs/algorithms/graph/kargers.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | 1
|
2021-06-14T01:20:09.000Z
|
2021-06-14T01:20:09.000Z
|
cs/algorithms/graph/kargers.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | null | null | null |
cs/algorithms/graph/kargers.py
|
TylerYep/workshop
|
69b19afc81c1b84b7f60723077670fb789b55744
|
[
"MIT"
] | null | null | null |
import random
from cs.structures import Edge, Graph, Node, V
def kargers_min_cut(orig_graph: Graph[V]) -> set[Edge[V]]:
"""
Partitions a graph using Karger's Algorithm. Works on directed and undirected
graphs, but involves random choices, so it does not give consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
"""
graph: Graph[Node[tuple[V, ...]]] = Graph.from_graph(
orig_graph, node_fn=lambda x: Node((x,))
)
while len(graph) > 2:
edge = random.choice(tuple(graph.edges))
# Contract edge (u, v) to new node uv
uv = Node(edge.start.data + edge.end.data)
uv_neighbors = graph[edge.start] | graph[edge.end]
del uv_neighbors[edge.start]
del uv_neighbors[edge.end]
graph.add_node(uv)
for neighbor in uv_neighbors:
graph.add_edge(uv, neighbor)
if graph.is_directed:
graph.add_edge(neighbor, uv)
# Remove nodes u and v.
graph.remove_node(edge.start)
graph.remove_node(edge.end)
# Find cutset.
group1, group2 = graph.nodes
result_set = set()
for subnode in group1.data:
for subneighbor in group2.data:
if subneighbor in orig_graph[subnode] or subnode in orig_graph[subneighbor]:
result_set.add(orig_graph[subnode][subneighbor])
return result_set
| 31.122449
| 88
| 0.633443
|
import random
from cs.structures import Edge, Graph, Node, V
def kargers_min_cut(orig_graph: Graph[V]) -> set[Edge[V]]:
"""
Partitions a graph using Karger's Algorithm. Works on directed and undirected
graphs, but involves random choices, so it does not give consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
"""
graph: Graph[Node[tuple[V, ...]]] = Graph.from_graph(
orig_graph, node_fn=lambda x: Node((x,))
)
while len(graph) > 2:
edge = random.choice(tuple(graph.edges))
# Contract edge (u, v) to new node uv
uv = Node(edge.start.data + edge.end.data)
uv_neighbors = graph[edge.start] | graph[edge.end]
del uv_neighbors[edge.start]
del uv_neighbors[edge.end]
graph.add_node(uv)
for neighbor in uv_neighbors:
graph.add_edge(uv, neighbor)
if graph.is_directed:
graph.add_edge(neighbor, uv)
# Remove nodes u and v.
graph.remove_node(edge.start)
graph.remove_node(edge.end)
# Find cutset.
group1, group2 = graph.nodes
result_set = set()
for subnode in group1.data:
for subneighbor in group2.data:
if subneighbor in orig_graph[subnode] or subnode in orig_graph[subneighbor]:
result_set.add(orig_graph[subnode][subneighbor])
return result_set
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2a8f7460a21b7cad5dc74cfff3405c3af0fe2006
| 471
|
py
|
Python
|
Python/FindDigits.py
|
MuriloRoque/coding_challenges
|
dd1ca31bc1c9e77026ef625fbca7f8938d3e965e
|
[
"MIT"
] | 7
|
2020-06-03T19:19:07.000Z
|
2022-01-08T03:00:59.000Z
|
Python/FindDigits.py
|
MuriloRoque/coding-challenges
|
dd1ca31bc1c9e77026ef625fbca7f8938d3e965e
|
[
"MIT"
] | 4
|
2020-05-25T10:31:26.000Z
|
2022-02-26T08:03:55.000Z
|
Python/FindDigits.py
|
MuriloRoque/coding_challenges
|
dd1ca31bc1c9e77026ef625fbca7f8938d3e965e
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import os
# Complete the findDigits function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| 15.193548
| 47
| 0.501062
|
#!/bin/python3
import os
# Complete the findDigits function below.
def findDigits(n):
s = str(n)
res = 0
for i in s:
if int(i) != 0:
if n % int(i) == 0:
res += 1
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
| 0
| 0
| 0
| 0
| 0
| 136
| 0
| 0
| 23
|
bc45dbd742de9463f2a3a8dabbfff37df96ff9fa
| 3,467
|
py
|
Python
|
dfirtrack_artifacts/urls.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack_artifacts/urls.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | 6
|
2022-03-16T12:30:51.000Z
|
2022-03-28T01:34:45.000Z
|
dfirtrack_artifacts/urls.py
|
thomas-kropeit/dfirtrack
|
b1e0e659af7bc8085cfe2d269ddc651f9f4ba585
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from dfirtrack_artifacts.creator import artifact_creator
from dfirtrack_artifacts.exporter.spreadsheet import xls
from dfirtrack_artifacts.views import (artifact_view, artifactpriority_view, artifactstatus_view, artifacttype_view)
urlpatterns = (
# urls for Artifact
path(
r'artifact/',
artifact_view.ArtifactListView.as_view(),
name='artifacts_artifact_list',
),
path(
r'artifact/closed/',
artifact_view.ArtifactClosedView.as_view(),
name='artifacts_artifact_closed',
),
path(
r'artifact/all/',
artifact_view.ArtifactAllView.as_view(),
name='artifacts_artifact_all',
),
path(
r'artifact/create/',
artifact_view.ArtifactCreateView.as_view(),
name='artifacts_artifact_create',
),
path(
r'artifact/detail/<int:pk>/',
artifact_view.ArtifactDetailView.as_view(),
name='artifacts_artifact_detail',
),
path(
r'artifact/update/<int:pk>/',
artifact_view.ArtifactUpdateView.as_view(),
name='artifacts_artifact_update',
),
path(
r'artifact/<int:pk>/set_user/',
artifact_view.ArtifactSetUser.as_view(),
name='artifact_set_user',
),
path(
r'artifact/<int:pk>/unset_user/',
artifact_view.ArtifactUnsetUser.as_view(),
name='artifact_unset_user',
),
path(
r'artifact/creator/', artifact_creator.artifact_creator, name='artifact_creator'
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/',
xls.artifact,
name='artifact_exporter_spreadsheet_xls',
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/cron/',
xls.artifact_create_cron,
name='artifact_exporter_spreadsheet_xls_cron',
),
)
urlpatterns += (
# urls for Artifactpriority
path(
r'artifactpriority/',
artifactpriority_view.ArtifactpriorityListView.as_view(),
name='artifacts_artifactpriority_list',
),
path(
r'artifactpriority/detail/<int:pk>/',
artifactpriority_view.ArtifactpriorityDetailView.as_view(),
name='artifacts_artifactpriority_detail',
),
)
urlpatterns += (
# urls for Artifactstatus
path(
r'artifactstatus/',
artifactstatus_view.ArtifactstatusListView.as_view(),
name='artifacts_artifactstatus_list',
),
path(
r'artifactstatus/detail/<int:pk>/',
artifactstatus_view.ArtifactstatusDetailView.as_view(),
name='artifacts_artifactstatus_detail',
),
)
urlpatterns += (
# urls for Artifacttype
path(
r'artifacttype/',
artifacttype_view.ArtifacttypeListView.as_view(),
name='artifacts_artifacttype_list',
),
path(
r'artifacttype/create/',
artifacttype_view.ArtifacttypeCreateView.as_view(),
name='artifacts_artifacttype_create',
),
path(
r'artifacttype/add_popup/',
artifacttype_view.ArtifacttypeCreatePopup.as_view(),
name='artifacttype_add_popup',
),
path(
r'artifacttype/detail/<int:pk>/',
artifacttype_view.ArtifacttypeDetailView.as_view(),
name='artifacts_artifacttype_detail',
),
path(
r'artifacttype/update/<int:pk>/',
artifacttype_view.ArtifacttypeUpdateView.as_view(),
name='artifacts_artifacttype_update',
),
)
| 27.736
| 88
| 0.651284
|
from django.urls import path
from dfirtrack_artifacts.creator import artifact_creator
from dfirtrack_artifacts.exporter.spreadsheet import xls
from dfirtrack_artifacts.views import (
artifact_view,
artifactpriority_view,
artifactstatus_view,
artifacttype_view,
)
urlpatterns = (
# urls for Artifact
path(
r'artifact/',
artifact_view.ArtifactListView.as_view(),
name='artifacts_artifact_list',
),
path(
r'artifact/closed/',
artifact_view.ArtifactClosedView.as_view(),
name='artifacts_artifact_closed',
),
path(
r'artifact/all/',
artifact_view.ArtifactAllView.as_view(),
name='artifacts_artifact_all',
),
path(
r'artifact/create/',
artifact_view.ArtifactCreateView.as_view(),
name='artifacts_artifact_create',
),
path(
r'artifact/detail/<int:pk>/',
artifact_view.ArtifactDetailView.as_view(),
name='artifacts_artifact_detail',
),
path(
r'artifact/update/<int:pk>/',
artifact_view.ArtifactUpdateView.as_view(),
name='artifacts_artifact_update',
),
path(
r'artifact/<int:pk>/set_user/',
artifact_view.ArtifactSetUser.as_view(),
name='artifact_set_user',
),
path(
r'artifact/<int:pk>/unset_user/',
artifact_view.ArtifactUnsetUser.as_view(),
name='artifact_unset_user',
),
path(
r'artifact/creator/', artifact_creator.artifact_creator, name='artifact_creator'
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/',
xls.artifact,
name='artifact_exporter_spreadsheet_xls',
),
path(
r'artifact/exporter/spreadsheet/xls/artifact/cron/',
xls.artifact_create_cron,
name='artifact_exporter_spreadsheet_xls_cron',
),
)
urlpatterns += (
# urls for Artifactpriority
path(
r'artifactpriority/',
artifactpriority_view.ArtifactpriorityListView.as_view(),
name='artifacts_artifactpriority_list',
),
path(
r'artifactpriority/detail/<int:pk>/',
artifactpriority_view.ArtifactpriorityDetailView.as_view(),
name='artifacts_artifactpriority_detail',
),
)
urlpatterns += (
# urls for Artifactstatus
path(
r'artifactstatus/',
artifactstatus_view.ArtifactstatusListView.as_view(),
name='artifacts_artifactstatus_list',
),
path(
r'artifactstatus/detail/<int:pk>/',
artifactstatus_view.ArtifactstatusDetailView.as_view(),
name='artifacts_artifactstatus_detail',
),
)
urlpatterns += (
# urls for Artifacttype
path(
r'artifacttype/',
artifacttype_view.ArtifacttypeListView.as_view(),
name='artifacts_artifacttype_list',
),
path(
r'artifacttype/create/',
artifacttype_view.ArtifacttypeCreateView.as_view(),
name='artifacts_artifacttype_create',
),
path(
r'artifacttype/add_popup/',
artifacttype_view.ArtifacttypeCreatePopup.as_view(),
name='artifacttype_add_popup',
),
path(
r'artifacttype/detail/<int:pk>/',
artifacttype_view.ArtifacttypeDetailView.as_view(),
name='artifacts_artifacttype_detail',
),
path(
r'artifacttype/update/<int:pk>/',
artifacttype_view.ArtifacttypeUpdateView.as_view(),
name='artifacts_artifacttype_update',
),
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0
|
3f7cd28b00b51df823099bd4153d8f5599444380
| 270
|
py
|
Python
|
mayan/apps/locales/icons.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/locales/icons.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/locales/icons.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
from mayan.apps.appearance.classes import Icon
icon_user_locale_profile_detail = Icon(
driver_name='fontawesome', symbol='globe'
)
icon_user_locale_profile_edit = Icon(
driver_name='fontawesome-dual', primary_symbol='globe',
secondary_symbol='pencil-alt'
)
| 27
| 59
| 0.781481
|
from mayan.apps.appearance.classes import Icon
icon_user_locale_profile_detail = Icon(
driver_name='fontawesome', symbol='globe'
)
icon_user_locale_profile_edit = Icon(
driver_name='fontawesome-dual', primary_symbol='globe',
secondary_symbol='pencil-alt'
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6ab583d13ec98e93752ce61e59742114fe5f4689
| 4,643
|
py
|
Python
|
result/arka/parse_result.py
|
MingzheWu418/plastering
|
322531e934c3acf2ecc8f520b37a6d255b9959c2
|
[
"MIT"
] | 29
|
2018-09-19T01:16:27.000Z
|
2022-03-29T14:35:36.000Z
|
result/arka/parse_result.py
|
MingzheWu418/plastering
|
322531e934c3acf2ecc8f520b37a6d255b9959c2
|
[
"MIT"
] | 14
|
2019-04-12T18:37:36.000Z
|
2022-02-10T00:27:55.000Z
|
result/arka/parse_result.py
|
MingzheWu418/plastering
|
322531e934c3acf2ecc8f520b37a6d255b9959c2
|
[
"MIT"
] | 14
|
2019-03-05T23:44:11.000Z
|
2022-03-18T07:29:31.000Z
|
import os
import sys
import re
from copy import deepcopy
import json
import pandas as pd
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + '/../..')
target_building = 'sdh'
currfile = __file__
base_dir = os.path.dirname(currfile)
target_dir = base_dir + '/' + target_building
orig_cluster_sizes = {}
total_names = []
for filename in os.listdir(target_dir):
if not re.match('{0}-ORIGINAL-METADATA-\\d+$'.format(target_building.upper()),
filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
names = fp.readlines()
orig_cluster_sizes[cid] = len(names)
total_names += names
total_names = list(set(total_names))
total_srcids = [get_srcid(name) for name in total_names]
curr_cluster_sizes = deepcopy(orig_cluster_sizes)
true_tagsets = {srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in total_srcids}
true_points = {srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in total_srcids}
qualified_examples_nums = {}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out$', filename):
continue
cid = get_number(filename)
df = pd.read_csv(target_dir + '/' + filename)
df.columns = df.columns.str.strip()
coverages = df['Num Examples Thought to be fully qualified'].tolist()
qualified_examples_nums[cid] = coverages
inferred_points_dict = {i: {} for i in curr_cluster_sizes.keys()}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out-points-qualified$', filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
ex_id = int(line.split(' ')[0])
if "'" not in line:
items = []
else:
items = line.split('[')[-1].split(']')[0][1:-1].split("', '")
inferred_points_dict[cid][ex_id] = items
pred = {}
curr_eids = {i: 0 for i in curr_cluster_sizes.keys()}
total_num = sum(orig_cluster_sizes.values())
pred_names = set()
cnt = 0
accs = []
f1s = []
mf1s = []
anymf1s = []
srcids = []
pred = {srcid: [] for srcid in total_srcids}
point_pred = {srcid: [] for srcid in total_srcids}
res = []
while not is_finished():
# select cluster
#max_cid = max(curr_cluster_sizes.items(), key=itemgetter(1))[0]
cnt += 1
max_cid = select_next_cid()
curr_eids[max_cid] += 1
curr_eid = curr_eids[max_cid]
found_names = set(inferred_points_dict[max_cid][curr_eid])
new_names = found_names - pred_names
new_srcids = [get_srcid(name) for name in new_names]
pred_names = pred_names.union(new_names)
curr_cluster_sizes[max_cid] = orig_cluster_sizes[max_cid] - len(found_names)
acc = len(pred_names) / total_num
print('{0}\tacc: {1}'.format(cnt, acc))
pred.update({srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in new_srcids})
point_pred.update({
srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in new_srcids})
anymf1 = get_macro_f1(true_tagsets, pred)
mf1 = get_macro_f1(true_points, point_pred)
f1 = get_micro_f1(true_points, point_pred)
#mf1s.append(mf1)
#f1s.append(f1)
#anymf1s.append(anymf1)
#accs.append(acc)
#srcids.append(len(pred_names))
row = {
'metrics': {
'f1': f1,
'macrof1': mf1,
'accuracy': acc,
'macrof1-all': anymf1
},
'learning_srcids': cnt
}
res.append(row)
with open('result/pointonly_notransfer_arka_{0}_0.json'.format(target_building),
'w') as fp:
json.dump(res, fp)
| 30.748344
| 82
| 0.645272
|
import os
import sys
import pdb
import re
from copy import deepcopy
from operator import itemgetter
import json
import pandas as pd
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + '/../..')
from plastering.metadata_interface import *
from plastering.evaluator import *
target_building = 'sdh'
currfile = __file__
base_dir = os.path.dirname(currfile)
target_dir = base_dir + '/' + target_building
def get_number(s):
return int(re.findall('\\d+', s)[0])
def is_finished():
for cid, curr_eid in curr_eids.items():
if curr_eid < len(qualified_examples_nums[cid]) - 1:
return False
return True
def select_next_cid():
ordered_cids = [row[0] for row in
sorted(curr_cluster_sizes.items(),
key=itemgetter(1),
reverse=True)]
for cid in ordered_cids:
curr_eid = curr_eids[cid]
if curr_eid < len(qualified_examples_nums[cid]) - 1:
return cid
raise Exception('cannot find cids without finishing the algorithm. A bug')
def get_srcid(name):
return '_'.join(re.findall('[a-zA-Z0-9]+', name))
orig_cluster_sizes = {}
total_names = []
for filename in os.listdir(target_dir):
if not re.match('{0}-ORIGINAL-METADATA-\\d+$'.format(target_building.upper()),
filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
names = fp.readlines()
orig_cluster_sizes[cid] = len(names)
total_names += names
total_names = list(set(total_names))
total_srcids = [get_srcid(name) for name in total_names]
curr_cluster_sizes = deepcopy(orig_cluster_sizes)
true_tagsets = {srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in total_srcids}
true_points = {srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in total_srcids}
qualified_examples_nums = {}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out$', filename):
continue
cid = get_number(filename)
df = pd.read_csv(target_dir + '/' + filename)
df.columns = df.columns.str.strip()
coverages = df['Num Examples Thought to be fully qualified'].tolist()
qualified_examples_nums[cid] = coverages
inferred_points_dict = {i: {} for i in curr_cluster_sizes.keys()}
for filename in os.listdir(target_dir):
if not re.match('l-ex-\\d+-out-points-qualified$', filename):
continue
cid = get_number(filename)
with open(target_dir + '/' + filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
ex_id = int(line.split(' ')[0])
if "'" not in line:
items = []
else:
items = line.split('[')[-1].split(']')[0][1:-1].split("', '")
inferred_points_dict[cid][ex_id] = items
pred = {}
curr_eids = {i: 0 for i in curr_cluster_sizes.keys()}
total_num = sum(orig_cluster_sizes.values())
pred_names = set()
cnt = 0
accs = []
f1s = []
mf1s = []
anymf1s = []
srcids = []
pred = {srcid: [] for srcid in total_srcids}
point_pred = {srcid: [] for srcid in total_srcids}
res = []
while not is_finished():
# select cluster
#max_cid = max(curr_cluster_sizes.items(), key=itemgetter(1))[0]
cnt += 1
max_cid = select_next_cid()
curr_eids[max_cid] += 1
curr_eid = curr_eids[max_cid]
found_names = set(inferred_points_dict[max_cid][curr_eid])
new_names = found_names - pred_names
new_srcids = [get_srcid(name) for name in new_names]
pred_names = pred_names.union(new_names)
curr_cluster_sizes[max_cid] = orig_cluster_sizes[max_cid] - len(found_names)
acc = len(pred_names) / total_num
print('{0}\tacc: {1}'.format(cnt, acc))
pred.update({srcid: LabeledMetadata.objects(srcid=srcid).first().tagsets
for srcid in new_srcids})
point_pred.update({
srcid: LabeledMetadata.objects(srcid=srcid).first().point_tagset
for srcid in new_srcids})
anymf1 = get_macro_f1(true_tagsets, pred)
mf1 = get_macro_f1(true_points, point_pred)
f1 = get_micro_f1(true_points, point_pred)
#mf1s.append(mf1)
#f1s.append(f1)
#anymf1s.append(anymf1)
#accs.append(acc)
#srcids.append(len(pred_names))
row = {
'metrics': {
'f1': f1,
'macrof1': mf1,
'accuracy': acc,
'macrof1-all': anymf1
},
'learning_srcids': cnt
}
res.append(row)
with open('result/pointonly_notransfer_arka_{0}_0.json'.format(target_building),
'w') as fp:
json.dump(res, fp)
| 0
| 0
| 0
| 0
| 0
| 642
| 0
| 34
| 180
|
580fe86ae0aa9c38a9e6907e1803cb156d5b2bdf
| 7,285
|
py
|
Python
|
learntools/ml_intermediate/ex3.py
|
roannav/learntools
|
355a5df6a66562de62254b723da1a9389b9acc49
|
[
"Apache-2.0"
] | null | null | null |
learntools/ml_intermediate/ex3.py
|
roannav/learntools
|
355a5df6a66562de62254b723da1a9389b9acc49
|
[
"Apache-2.0"
] | null | null | null |
learntools/ml_intermediate/ex3.py
|
roannav/learntools
|
355a5df6a66562de62254b723da1a9389b9acc49
|
[
"Apache-2.0"
] | null | null | null |
Label = MultipartProblem(LabelA, LabelB)
Cardinality = MultipartProblem(CardinalityA, CardinalityB)
qvars = bind_exercises(globals(), [
Drop,
Label,
Cardinality,
OneHot
],
var_format='step_{n}',
)
__all__ = list(qvars)
| 41.158192
| 104
| 0.68744
|
import pandas as pd
import warnings
from learntools.core import *
class Drop(CodingProblem):
_vars = ['drop_X_train', 'drop_X_valid']
_hint = ("Use the [`select_dtypes()`](https://pandas.pydata.org/pandas-"
"docs/stable/reference/api/pandas.DataFrame.select_dtypes.html) method "
"to drop all columns with the `object` dtype.")
_solution = CS(
"""# Drop columns in training and validation data
drop_X_train = X_train.select_dtypes(exclude=['object'])
drop_X_valid = X_valid.select_dtypes(exclude=['object'])
""")
def check(self, drop_X_train, drop_X_valid):
assert type(drop_X_train) == pd.core.frame.DataFrame, \
"`drop_X_train` is not a pandas DataFrame."
assert type(drop_X_valid) == pd.core.frame.DataFrame, \
"`drop_X_valid` is not a pandas DataFrame."
assert not(any((drop_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((drop_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
assert drop_X_train.shape[1] == 33, \
("`drop_X_train` should have 33 columns.")
assert drop_X_valid.shape[1] == 33, \
("`drop_X_valid` should have 33 columns.")
class LabelA(ThoughtExperiment):
_hint = ("Are there any values that appear in the validation data but not in the training data?")
_solution = ("Fitting a label encoder to a column in the training data creates a corresponding "
"integer-valued label for each unique value **that appears in the training data**. In "
"the case that the validation data contains values that don't also appear in the "
"training data, the encoder will throw an error, because these values won't have an "
"integer assigned to them. Notice that the `'Condition2'` "
"column in the validation data contains the values `'RRAn'` and `'RRNn'`, but these "
"don't appear in the training data -- thus, if we try to use a label encoder with "
"scikit-learn, the code will throw an error.")
class LabelB(CodingProblem):
_vars = ['label_X_train', 'label_X_valid']
_hint = ("Use the `LabelEncoder` class from scikit-learn. You should only encode the columns in "
"`good_label_cols`.")
_solution = CS(
"""# Drop categorical columns that will not be encoded
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# Apply label encoder
label_encoder = LabelEncoder()
for col in set(good_label_cols):
label_X_train[col] = label_encoder.fit_transform(X_train[col])
label_X_valid[col] = label_encoder.transform(X_valid[col])
""")
def check(self, label_X_train, label_X_valid):
assert type(label_X_train) == pd.core.frame.DataFrame, \
"`label_X_train` is not a pandas DataFrame."
assert type(label_X_valid) == pd.core.frame.DataFrame, \
"`label_X_valid` is not a pandas DataFrame."
assert not(any((label_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((label_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
# remove 45 after nb update
assert label_X_train.shape[1] in [57, 45], \
"`label_X_train` does not have the correct number of columns."
# remove 45 after nb update
assert label_X_valid.shape[1] in [57, 45], \
"`label_X_valid` does not have the correct number of columns."
Label = MultipartProblem(LabelA, LabelB)
class CardinalityA(EqualityCheckProblem):
_vars = ['high_cardinality_numcols', 'num_cols_neighborhood']
_expected = [3, 25]
_hint = ("To one-hot encode a variable, we need one column for each unique entry.")
_solution = CS(
"""# How many categorical variables in the training data
# have cardinality greater than 10?
high_cardinality_numcols = 3
# How many columns are needed to one-hot encode the
# 'Neighborhood' variable in the training data?
num_cols_neighborhood = 25
""")
class CardinalityB(EqualityCheckProblem):
_vars = ['OH_entries_added', 'label_entries_added']
_expected = [990000, 0]
_hint = ("To calculate how many entries are added to the dataset through the one-hot encoding, "
"begin by calculating how many entries are needed to encode the categorical variable "
"(by multiplying the number of rows by the number of columns in the one-hot encoding). "
"Then, to obtain how many entries are **added** to the dataset, subtract the number "
"of entries in the original column.")
_solution = CS(
"""# How many entries are added to the dataset by
# replacing the column with a one-hot encoding?
OH_entries_added = 1e4*100 - 1e4
# How many entries are added to the dataset by
# replacing the column with a label encoding?
label_entries_added = 0
""")
Cardinality = MultipartProblem(CardinalityA, CardinalityB)
class OneHot(CodingProblem):
_vars = ['OH_X_train', 'OH_X_valid']
_hint = ("Begin by applying the one-hot encoder to the low cardinality columns in the "
"training and validation data in `X_train[low_cardinality_cols]` and "
"`X_valid[low_cardinality_cols]`, respectively.")
_solution = CS(
"""# Apply one-hot encoder to each column with categorical data
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cardinality_cols]))
OH_cols_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cardinality_cols]))
# One-hot encoding removed index; put it back
OH_cols_train.index = X_train.index
OH_cols_valid.index = X_valid.index
# Remove categorical columns (will replace with one-hot encoding)
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
# Add one-hot encoded columns to numerical features
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
""")
def check(self, OH_X_train, OH_X_valid):
assert type(OH_X_train) == pd.core.frame.DataFrame, \
"`OH_X_train` is not a pandas DataFrame."
assert type(OH_X_valid) == pd.core.frame.DataFrame, \
"`OH_X_valid` is not a pandas DataFrame."
assert not(any((OH_X_train.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your training data."
assert not(any((OH_X_valid.dtypes == 'object').values)), \
"You still need to encode some of the categorical columns in your validation data."
assert len(OH_X_train.columns) == 155, \
"`OH_X_train` should have 155 columns."
assert len(OH_X_valid.columns) == 155, \
"`OH_X_valid` should have 155 columns."
qvars = bind_exercises(globals(), [
Drop,
Label,
Cardinality,
OneHot
],
var_format='step_{n}',
)
__all__ = list(qvars)
| 0
| 0
| 0
| 6,829
| 0
| 0
| 0
| 0
| 205
|
adcc7eef90b09be43068eff5739a52723c4a565f
| 976
|
py
|
Python
|
src/vnc_me/controllers/connect.py
|
maizy/vnc-me
|
644cbe7c58d5077b2a2c41145e088430c97860ee
|
[
"MIT"
] | null | null | null |
src/vnc_me/controllers/connect.py
|
maizy/vnc-me
|
644cbe7c58d5077b2a2c41145e088430c97860ee
|
[
"MIT"
] | null | null | null |
src/vnc_me/controllers/connect.py
|
maizy/vnc-me
|
644cbe7c58d5077b2a2c41145e088430c97860ee
|
[
"MIT"
] | null | null | null |
# _*_ coding: utf-8 _*_
# Copyright (c) Nikita Kovaliov, maizy.ru, 2013
# See LICENSE.txt for details.
| 27.885714
| 68
| 0.604508
|
# _*_ coding: utf-8 _*_
# Copyright (c) Nikita Kovaliov, maizy.ru, 2013
# See LICENSE.txt for details.
from tornado.web import asynchronous
from vnc_me.controllers import HttpHandler
from vnc_me.vnc_client import VncClient
class Handler(HttpHandler):
@asynchronous
def post(self):
host = self.get_argument('host', None)
port = self.get_argument('port', None)
password = self.get_argument('password', '')
if not host or not port:
self.redirect('/?error=bad_params')
return
client = VncClient.get_client('main')
if client.running:
self.redirect('/?error=ever_running')
return
def _on_connect(success):
if success:
self.redirect('/?connected=true')
else:
self.redirect('/?error=unknown')
client.start(_on_connect, host, port,
password.encode('utf-8') if password else None)
| 0
| 694
| 0
| 6
| 0
| 0
| 0
| 54
| 118
|
3be12f5f6443ad94d4862814b4ddfa13ca970561
| 998
|
py
|
Python
|
pcloudpy/gui/graphics/QVTKWindow.py
|
mmolero/pcloudpy
|
c8e4b342f9180374db97af3d87d60ece683b7bc0
|
[
"BSD-3-Clause"
] | 39
|
2015-09-30T18:59:22.000Z
|
2020-10-28T01:52:41.000Z
|
pcloudpy/gui/graphics/QVTKWindow.py
|
mmolero/pcloudpy
|
c8e4b342f9180374db97af3d87d60ece683b7bc0
|
[
"BSD-3-Clause"
] | 3
|
2017-01-05T20:53:54.000Z
|
2017-11-30T06:57:13.000Z
|
pcloudpy/gui/graphics/QVTKWindow.py
|
mmolero/pcloudpy
|
c8e4b342f9180374db97af3d87d60ece683b7bc0
|
[
"BSD-3-Clause"
] | 19
|
2017-01-05T20:33:59.000Z
|
2021-09-25T09:19:28.000Z
|
#Author: Miguel Molero <[email protected]>
if __name__ == "__main__":
from vtk import vtkConeSource
from vtk import vtkPolyDataMapper, vtkActor
app = QApplication(['QVTKWindow'])
win = QVTKMainWindow()
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
win.vtkWidget.renderer.AddActor(coneActor)
# show the widget
win.show()
# start event processing
app.exec_()
| 26.972973
| 55
| 0.709419
|
#Author: Miguel Molero <[email protected]>
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pcloudpy.gui.graphics.QVTKWidget import QVTKWidget
class QVTKMainWindow(QMainWindow):
def __init__(self, parent = None):
super(QVTKMainWindow, self).__init__(parent)
self.vtkWidget = QVTKWidget(self)
self.setCentralWidget(self.vtkWidget)
self.setWindowTitle("QVTKMainWindow")
self.setGeometry(50,50, 800,800)
if __name__ == "__main__":
from vtk import vtkConeSource
from vtk import vtkPolyDataMapper, vtkActor
app = QApplication(['QVTKWindow'])
win = QVTKMainWindow()
cone = vtkConeSource()
cone.SetResolution(8)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
win.vtkWidget.renderer.AddActor(coneActor)
# show the widget
win.show()
# start event processing
app.exec_()
| 0
| 0
| 0
| 280
| 0
| 0
| 0
| 52
| 112
|
839732c105e90217381325571c730da38f09602e
| 3,377
|
py
|
Python
|
service/docs/source/conf.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | 2
|
2016-07-23T18:05:45.000Z
|
2020-07-24T17:55:24.000Z
|
service/docs/source/conf.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | null | null | null |
service/docs/source/conf.py
|
dannosliwcd/geopm
|
3ec0d223e700350ff37f6d10adde7b9bfbdba286
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'GEOPM Service'
copyright = '2021, Intel (R) Corporation'
author = 'Intel (R) Corporation'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
]
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_logo = 'https://geopm.github.io/images/geopm-logo-clear.png'
logo_only = True
| 39.267442
| 79
| 0.70151
|
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'GEOPM Service'
copyright = '2021, Intel (R) Corporation'
author = 'Intel (R) Corporation'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx_rtd_theme',
]
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_logo = 'https://geopm.github.io/images/geopm-logo-clear.png'
logo_only = True
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 22
|
7641bdb92e4dd1311b939ff97255a4f2bfe9d25c
| 1,295
|
py
|
Python
|
predict.py
|
don6105/OCR-Captcha-Recognition
|
f9d3088b4937218e2675ad19832cd6cdf333d683
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
don6105/OCR-Captcha-Recognition
|
f9d3088b4937218e2675ad19832cd6cdf333d683
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
don6105/OCR-Captcha-Recognition
|
f9d3088b4937218e2675ad19832cd6cdf333d683
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import cv2
import numpy as np
import os
import process_img
import pickle
from shutil import copyfile
# model = cv2.ml.KNearest_create()
# model.load('model.xml')
model = cv2.ml.KNearest_load('model.xml')
img_area = 40 * 40
#
f = open('id_label_map.txt', 'rb')
try:
id_label_map = pickle.load(f)
except EOFError:
pass
f.close()
filenames = os.listdir('img')
for filename in filenames:
filelist = [ f for f in os.listdir('predict')]
for f in filelist:
os.remove(os.path.join('predict', f))
copyfile(os.path.join('img', filename), os.path.join('predict', filename))
img_captcha = cv2.imread(os.path.join('predict', filename))
process_img.run('predict', 'result', 1)
predict = sorted(os.listdir('result'))
r = []
for p in predict:
img = cv2.imread(os.path.join('result', p), cv2.IMREAD_GRAYSCALE)
sample = img.reshape((1, img_area)).astype(np.float32)
ret, results, neighbours, distances = model.findNearest(sample, k = 3)
label_id = int(results[0, 0])
label = id_label_map[label_id]
r.append(label)
print(' '.join(r))
cv2.imshow('image', img_captcha)
key = cv2.waitKey(0)
if key == 27:
exit()
else :
cv2.destroyAllWindows()
| 25.9
| 78
| 0.643243
|
#!/usr/bin/python3
import cv2
import numpy as np
import os
import process_img
import pickle
from shutil import copyfile
# model = cv2.ml.KNearest_create()
# model.load('model.xml')
model = cv2.ml.KNearest_load('model.xml')
img_area = 40 * 40
# 将序列化的内容加载到内存中
f = open('id_label_map.txt', 'rb')
try:
id_label_map = pickle.load(f)
except EOFError:
pass
f.close()
filenames = os.listdir('img')
for filename in filenames:
filelist = [ f for f in os.listdir('predict')]
for f in filelist:
os.remove(os.path.join('predict', f))
copyfile(os.path.join('img', filename), os.path.join('predict', filename))
img_captcha = cv2.imread(os.path.join('predict', filename))
process_img.run('predict', 'result', 1)
predict = sorted(os.listdir('result'))
r = []
for p in predict:
img = cv2.imread(os.path.join('result', p), cv2.IMREAD_GRAYSCALE)
sample = img.reshape((1, img_area)).astype(np.float32)
ret, results, neighbours, distances = model.findNearest(sample, k = 3)
label_id = int(results[0, 0])
label = id_label_map[label_id]
r.append(label)
print(' '.join(r))
cv2.imshow('image', img_captcha)
key = cv2.waitKey(0)
if key == 27:
exit()
else :
cv2.destroyAllWindows()
| 39
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bd4cecd93bd9c57a578f054d6684869c8da3f50d
| 210
|
py
|
Python
|
AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
AlgorithmTest/PROGRAMMERS_PYTHON/Lv1/Prog_12934.py
|
bluesky0960/AlgorithmTest
|
35e6c01b1c25bf13d4c034c047f3dd3b67f1578e
|
[
"MIT"
] | null | null | null |
#https://programmers.co.kr/learn/courses/30/lessons/12934
| 23.333333
| 57
| 0.533333
|
#https://programmers.co.kr/learn/courses/30/lessons/12934
def solution(n):
answer = 0
if int(n**0.5)**2 == n:
answer = (int(n**0.5)+1)**2
else:
answer = -1
return answer
| 0
| 0
| 0
| 0
| 0
| 131
| 0
| 0
| 22
|
be56fe3a0855a11c83234a8075eb53f6c7ee860e
| 12,531
|
py
|
Python
|
main.py
|
sem-onyalo/knowledge-graph-loader
|
7beadc3fe0f159e5386639d8fa9aeccffa23950c
|
[
"MIT"
] | null | null | null |
main.py
|
sem-onyalo/knowledge-graph-loader
|
7beadc3fe0f159e5386639d8fa9aeccffa23950c
|
[
"MIT"
] | null | null | null |
main.py
|
sem-onyalo/knowledge-graph-loader
|
7beadc3fe0f159e5386639d8fa9aeccffa23950c
|
[
"MIT"
] | null | null | null |
from pyopenie import OpenIE5
from queue import Queue
from spacy.lang.en import English
from typing import List
ENCODING = "utf-8"
DATA_DIRECTORY = "./data"
CACHE_DIRECTORY = "cache/"
CACHED_CONNECTIONS_FILE = "entity_connections.cache"
CACHED_FILTERED_CONNECTIONS_FILE = "entity_connections_filtered.cache"
QUEUE_WAIT_TIMEOUT = 5
CONNECTION_BUILDER_THREADS = 5
RELATIONSHIP_EXTRACTION_SERVICE_RETRIES = 5
RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT = 3
RELATIONSHIP_EXTRACTION_SERVICE_URL = 'http://localhost:8000'
NEO4J_URL = "bolt://localhost:7687"
NEO4J_CREDENTIALS_FILE = ".credentials"
GRAPH_LOADER_THREADS = 1
nlp:English = None
extractor:OpenIE5 = None
sentence_queue:Queue = None
connection_list:List[EntityConnection] = None
query_queue:Queue = None
loader:Loader = None
connection_cache_source:int = 0
if __name__ == "__main__":
main()
| 34.905292
| 168
| 0.677201
|
import csv
import logging
import neo4j
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from pyopenie import OpenIE5
from queue import Empty, Queue
from spacy.lang.en import English
from time import sleep
from typing import List
ENCODING = "utf-8"
DATA_DIRECTORY = "./data"
CACHE_DIRECTORY = "cache/"
CACHED_CONNECTIONS_FILE = "entity_connections.cache"
CACHED_FILTERED_CONNECTIONS_FILE = "entity_connections_filtered.cache"
QUEUE_WAIT_TIMEOUT = 5
CONNECTION_BUILDER_THREADS = 5
RELATIONSHIP_EXTRACTION_SERVICE_RETRIES = 5
RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT = 3
RELATIONSHIP_EXTRACTION_SERVICE_URL = 'http://localhost:8000'
NEO4J_URL = "bolt://localhost:7687"
NEO4J_CREDENTIALS_FILE = ".credentials"
GRAPH_LOADER_THREADS = 1
class Document:
file_name:str
sentences:list
def __init__(self, file_name, sentences) -> None:
self.file_name = file_name
self.sentences = sentences
class DocumentSentence:
document:Document
sentence:str
def __init__(self, document, sentence) -> None:
self.document = document
self.sentence = sentence
class EntityConnection:
from_entity:str
to_entity:str
relationship:str
confidence:float
file_name:str
def __str__(self) -> str:
return f"from_entity={self.from_entity}, to_entity={self.to_entity}, relationship={self.relationship}, confidence={self.confidence}, file_name={self.file_name}"
def __eq__(self, __o: object) -> bool:
if isinstance(__o, self.__class__):
other:EntityConnection = __o
return (self.from_entity == other.from_entity
and self.to_entity == other.to_entity
and self.relationship == other.relationship
and self.confidence == other.confidence
and self.file_name == other.file_name)
else:
return False
class Neo4jAuth:
url:str
username:str
password:str
def __init__(self) -> None:
self.url = NEO4J_URL
with open(NEO4J_CREDENTIALS_FILE, encoding=ENCODING) as fd:
self.username = fd.readline().strip()
self.password = fd.readline().strip()
class Loader:
auth:Neo4jAuth
def __init__(self, auth:Neo4jAuth) -> None:
self.auth = auth
self.driver = neo4j.GraphDatabase.driver(self.auth.url, auth=(self.auth.username, self.auth.password))
def load_queries(self, queries:Queue) -> None:
with ThreadPoolExecutor(max_workers=GRAPH_LOADER_THREADS) as executor:
args = ((uuid.uuid4(), queries) for _ in range(GRAPH_LOADER_THREADS))
futures = executor.map(lambda p: self.load_query(*p), args)
for future in futures:
logging.debug(f"Load query thread result {future}")
def load_query(self, threadId:str, queries:Queue) -> None:
logging.info(f"[{threadId}] Loader thread started")
queries_loaded = 0
while True:
try:
query:str = queries.get(timeout=QUEUE_WAIT_TIMEOUT)
except Empty:
logging.info(f"[{threadId}] Loader thread exiting, queue empty, processed {queries_loaded} queries")
return queries_loaded, threadId
with self.driver.session() as session:
session.write_transaction((lambda tx, query: tx.run(query)), query)
queries_loaded += 1
nlp:English = None
extractor:OpenIE5 = None
sentence_queue:Queue = None
connection_list:List[EntityConnection] = None
query_queue:Queue = None
loader:Loader = None
connection_cache_source:int = 0
def init_logger(level=logging.DEBUG):
logging.basicConfig(
format="[%(asctime)s]\t[%(levelname)s]\t[%(name)s]\t%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=level,
)
def init_cache():
cache_dir = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY)
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
def init_sentencizer() -> None:
global nlp
nlp = English()
nlp.add_pipe("sentencizer")
def init_sentence_queue() -> None:
global sentence_queue
sentence_queue = Queue()
def init_connection_list() -> None:
global connection_list
cache_connections = get_cache_connections()
connection_list = cache_connections if cache_connections != None else list()
def init_query_queue() -> None:
global query_queue
query_queue = Queue()
def init_relationship_extractor() -> None:
global extractor
extractor = OpenIE5(RELATIONSHIP_EXTRACTION_SERVICE_URL)
def init_loader() -> None:
global loader
auth = Neo4jAuth()
loader = Loader(auth)
def cache_data(file:str) -> None:
path = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY, file)
with open(path, mode="w", encoding=ENCODING) as fd:
writer = csv.writer(fd)
for c in connection_list:
row = [c.from_entity, c.to_entity, c.relationship, c.confidence, c.file_name]
# TODO: fix extra CRLF at end of line
writer.writerow(row)
def cache_connections() -> None:
cache_data(CACHED_CONNECTIONS_FILE)
def cache_filtered_connections() -> None:
cache_data(CACHED_FILTERED_CONNECTIONS_FILE)
def get_cache_connections() -> List[EntityConnection]:
FROM_ENTITY_IDX = 0
TO_ENTITY_IDX = 1
RELATIONSHIP_IDX = 2
CONFIDENCE_IDX = 3
FILE_NAME_IDX = 4
path = os.path.join(DATA_DIRECTORY, CACHE_DIRECTORY, CACHED_CONNECTIONS_FILE)
if os.path.isfile(path):
connections = list()
with open(path, mode="r", encoding=ENCODING) as fd:
reader = csv.reader(fd)
for row in reader:
if len(row) == 0:
continue
connection = EntityConnection()
connection.from_entity = row[FROM_ENTITY_IDX]
connection.to_entity = row[TO_ENTITY_IDX]
connection.relationship = row[RELATIONSHIP_IDX]
connection.confidence = float(row[CONFIDENCE_IDX])
connection.file_name = row[FILE_NAME_IDX]
connections.append(connection)
return connections
def extract_sentences_from_data(data) -> list:
document = nlp(data)
return [s.text for s in document.sents]
def extract_data_from_file(file_path) -> str:
with open(file_path, encoding=ENCODING) as fd:
data = fd.read()
return data
def build_documents_from_files(data_files) -> List[Document]:
documents = list()
for data_file in data_files:
data = extract_data_from_file(data_file)
sentences = extract_sentences_from_data(data)
documents.append(Document(data_file, sentences))
return documents
def build_connection_from_extraction(extraction:dict, document:Document) -> None:
if len(extraction["extraction"]["arg2s"]) > 0:
connection = EntityConnection()
connection.from_entity = extraction["extraction"]["arg1"]["text"]
# TODO: add logic for handling multiple arg2s
connection.to_entity = extraction["extraction"]["arg2s"][0]["text"]
connection.relationship = extraction["extraction"]["rel"]["text"]
connection.confidence = float(extraction["confidence"])
connection.file_name = os.path.basename(document.file_name.replace("\\", os.sep))
connection_list.append(connection)
def build_connections_from_document(threadId:str) -> None:
logging.info(f"[{threadId}] Connection builder thread started")
sentences_processed = 0
while True:
try:
docSentence:DocumentSentence = sentence_queue.get(timeout=QUEUE_WAIT_TIMEOUT)
except Empty:
logging.info(f"[{threadId}] Connection builder thread exiting, queue empty, processed {sentences_processed} sentences")
return sentences_processed, threadId
got_extractions = False
current_try = RELATIONSHIP_EXTRACTION_SERVICE_RETRIES
while current_try > 0:
try:
extractions = extractor.extract(docSentence.sentence)
got_extractions = True
sentences_processed += 1
break
except Exception as e:
logging.debug(f"[{threadId}] Connection builder thread service exception on try {current_try}: {e}")
sleep(RELATIONSHIP_EXTRACTION_SERVICE_TIMEOUT)
current_try -= 1
if not got_extractions:
logging.error(f"[{threadId}] Connection builder thread skipping item, could not process sentence: {docSentence.sentence}")
continue
for extraction in extractions:
build_connection_from_extraction(extraction, docSentence.document)
def build_connections_from_documents(documents:List[Document]) -> List[EntityConnection]:
if len(connection_list) > 0:
logging.info("Skipping build connections, list populated by cache")
return
sentences_count = 0
for document in documents:
for sentence in document.sentences:
sentence_queue.put(DocumentSentence(document, sentence))
sentences_count += 1
sentences_processed = 0
with ThreadPoolExecutor(max_workers=CONNECTION_BUILDER_THREADS) as executor:
threadIds = [uuid.uuid4() for _ in range(CONNECTION_BUILDER_THREADS)]
futures = executor.map(build_connections_from_document, threadIds)
for future in futures:
logging.debug(f"Thread result {future}")
sentences_processed += int(future[0])
logging.info(f"{sentences_processed} of {sentences_count} sentences processed")
cache_connections()
def filter_connections_stop_words(connections:List[EntityConnection]):
i = 0
items_removed = 0
current_length = len(connections)
while i < current_length:
connection = connections[i]
if connection.from_entity.lower() in nlp.Defaults.stop_words or connection.to_entity.lower() in nlp.Defaults.stop_words:
logging.debug(f"removing connection for stop word: {connection}")
connections.remove(connection)
items_removed += 1
current_length -= 1
else:
i += 1
logging.info(f"{items_removed} entity connections removed because of stop words")
def filter_connections_dups(connections:List[EntityConnection]):
i = 0
items_removed = 0
no_dup_list = list()
current_length = len(connections)
while i < current_length:
connection = connections[i]
if connection in no_dup_list:
logging.debug(f"removing connection for duplicate: {connection}")
connections.remove(connection)
items_removed += 1
current_length -= 1
else:
i += 1
no_dup_list.append(connection)
logging.info(f"{items_removed} entity connections removed because of duplicates")
def filter_connections(connections:List[EntityConnection]):
length_before = len(connections)
filter_connections_dups(connections)
filter_connections_stop_words(connections)
length_after = len(connections)
logging.info(f"New length after filters: {length_after}, {length_before - length_after} items removed")
cache_filtered_connections()
def build_queries_from_connections(connections:List[EntityConnection], queries:Queue) -> None:
for connection in connections:
from_entity = connection.from_entity.replace('"', '\\"')
to_entity = connection.to_entity.replace('"', '\\"')
relationship = connection.relationship.replace('"', '\\"')
query = ""
query += f'MERGE (f:Entity {{ name: "{from_entity}" }}) '
query += f'MERGE (t:Entity {{ name: "{to_entity}" }}) '
query += f'MERGE (f)-[:RELATION {{ name: "{relationship}", confidence: {connection.confidence} }}]->(t);'
queries.put(query)
logging.debug(f"Built query {query}")
def main():
init_logger()
init_cache()
init_sentencizer()
init_sentence_queue()
init_connection_list()
init_query_queue()
init_relationship_extractor()
init_loader()
data_files = [os.path.join(DATA_DIRECTORY, f) for f in os.listdir(DATA_DIRECTORY) if os.path.isfile(os.path.join(DATA_DIRECTORY, f))]
documents = build_documents_from_files(data_files)
build_connections_from_documents(documents)
filter_connections(connection_list)
build_queries_from_connections(connection_list, query_queue)
loader.load_queries(query_queue)
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 2,538
| 0
| 8,360
| 0
| -13
| 798
|
9c2ab7ec270dc8209d5b75adadfdc279f77d4441
| 270
|
py
|
Python
|
polls/scraping_db/wadi_fashion.py
|
young-ha713/TeamProject
|
f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84
|
[
"Apache-2.0"
] | null | null | null |
polls/scraping_db/wadi_fashion.py
|
young-ha713/TeamProject
|
f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84
|
[
"Apache-2.0"
] | null | null | null |
polls/scraping_db/wadi_fashion.py
|
young-ha713/TeamProject
|
f98bbfbb7cab1b292f83f48a926dc6fd8b3eaf84
|
[
"Apache-2.0"
] | 2
|
2021-08-12T01:51:32.000Z
|
2021-08-17T05:16:37.000Z
|
import pandas as pd
df = pd.read_excel('C:/Users/gkdud/PycharmProjects/TeamProject/Scraping/files/fashion_scraping.xlsx')
import sqlite3
connect = sqlite3.connect('./wadizdb.sqlite3')
df.to_sql('table_fashion', connect, if_exists='append', index=False)
connect.close()
| 33.75
| 101
| 0.788889
|
import pandas as pd
df = pd.read_excel('C:/Users/gkdud/PycharmProjects/TeamProject/Scraping/files/fashion_scraping.xlsx')
import sqlite3
connect = sqlite3.connect('./wadizdb.sqlite3')
df.to_sql('table_fashion', connect, if_exists='append', index=False)
connect.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
15506c9d3b917a6a1bc46dffb7f880578de51951
| 5,177
|
py
|
Python
|
static_compress/mixin.py
|
RentFreeMedia/django-static-compress
|
b56940b9246714401bdd0b24c2f9595419dc6671
|
[
"MIT"
] | 8
|
2017-10-23T07:32:43.000Z
|
2019-12-16T16:25:02.000Z
|
static_compress/mixin.py
|
RentFreeMedia/django-static-compress
|
b56940b9246714401bdd0b24c2f9595419dc6671
|
[
"MIT"
] | 90
|
2018-06-02T07:37:29.000Z
|
2022-03-31T13:01:24.000Z
|
static_compress/mixin.py
|
RentFreeMedia/django-static-compress
|
b56940b9246714401bdd0b24c2f9595419dc6671
|
[
"MIT"
] | 8
|
2018-07-25T13:56:40.000Z
|
2022-02-11T17:18:17.000Z
|
from . import compressors
__all__ = ["CompressMixin"]
DEFAULT_METHODS = ["gz", "br"]
METHOD_MAPPING = {
"gz": compressors.ZopfliCompressor,
"br": compressors.BrotliCompressor,
"gz+zlib": compressors.ZlibCompressor,
# gz+zlib and gz cannot be used at the same time, because they produce the same file extension.
}
| 40.131783
| 114
| 0.618119
|
import os
from os.path import getatime, getctime, getmtime
import errno
from django.core.exceptions import ImproperlyConfigured
from . import compressors
__all__ = ["CompressMixin"]
DEFAULT_METHODS = ["gz", "br"]
METHOD_MAPPING = {
"gz": compressors.ZopfliCompressor,
"br": compressors.BrotliCompressor,
"gz+zlib": compressors.ZlibCompressor,
# gz+zlib and gz cannot be used at the same time, because they produce the same file extension.
}
class CompressMixin:
allowed_extensions = []
compress_methods = []
keep_original = True
compressors = []
minimum_kb = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We access Django settings lately here, to allow our app to be imported without
# defining DJANGO_SETTINGS_MODULE.
from django.conf import settings
self.allowed_extensions = getattr(settings, "STATIC_COMPRESS_FILE_EXTS", ["js", "css", "svg"])
self.compress_methods = getattr(settings, "STATIC_COMPRESS_METHODS", DEFAULT_METHODS)
self.keep_original = getattr(settings, "STATIC_COMPRESS_KEEP_ORIGINAL", True)
self.minimum_kb = getattr(settings, "STATIC_COMPRESS_MIN_SIZE_KB", 30)
valid = [i for i in self.compress_methods if i in METHOD_MAPPING]
if not valid:
raise ImproperlyConfigured("No valid method is defined in STATIC_COMPRESS_METHODS setting.")
if "gz" in valid and "gz+zlib" in valid:
raise ImproperlyConfigured("STATIC_COMPRESS_METHODS: gz and gz+zlib cannot be used at the same time.")
self.compressors = [METHOD_MAPPING[k]() for k in valid]
def get_alternate_compressed_path(self, name):
for compressor in self.compressors:
ext = compressor.extension
if name.endswith(".{}".format(ext)):
path = self.path(name)
else:
path = self.path("{}.{}".format(name, ext))
if os.path.exists(path):
return path
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
def get_accessed_time(self, name):
if self.keep_original:
return super().get_accessed_time(name)
return self._datetime_from_timestamp(getatime(self.get_alternate_compressed_path(name)))
def get_created_time(self, name):
if self.keep_original:
return super().get_created_time(name)
return self._datetime_from_timestamp(getctime(self.get_alternate_compressed_path(name)))
def get_modified_time(self, name):
if self.keep_original:
return super().get_modified_time(name)
alt = self.get_alternate_compressed_path(name)
return self._datetime_from_timestamp(getmtime(alt))
def post_process(self, paths, dry_run=False, **options):
if hasattr(super(), "post_process"):
yield from super().post_process(paths, dry_run, **options)
if dry_run:
return
for name in paths.keys():
if not self._is_file_allowed(name):
continue
source_storage, path = paths[name]
# Process if file is big enough
if os.path.getsize(self.path(path)) < self.minimum_kb * 1024:
continue
src_mtime = source_storage.get_modified_time(path)
dest_path = self._get_dest_path(path)
with self._open(dest_path) as file:
for compressor in self.compressors:
dest_compressor_path = "{}.{}".format(dest_path, compressor.extension)
# Check if the original file has been changed.
# If not, no need to compress again.
full_compressed_path = self.path(dest_compressor_path)
try:
dest_mtime = self._datetime_from_timestamp(getmtime(full_compressed_path))
file_is_unmodified = dest_mtime.replace(microsecond=0) >= src_mtime.replace(microsecond=0)
except FileNotFoundError:
file_is_unmodified = False
if file_is_unmodified:
continue
# Delete old gzip file, or Nginx will pick the old file to serve.
# Note: Django won't overwrite the file, so we have to delete it ourselves.
if self.exists(dest_compressor_path):
self.delete(dest_compressor_path)
out = compressor.compress(path, file)
if out:
self._save(dest_compressor_path, out)
if not self.keep_original:
self.delete(name)
yield dest_path, dest_compressor_path, True
file.seek(0)
def _get_dest_path(self, path):
if hasattr(self, "hashed_name"):
return self.hashed_name(path)
return path
def _is_file_allowed(self, file):
for extension in self.allowed_extensions:
if file.endswith("." + extension):
return True
return False
| 0
| 0
| 0
| 4,691
| 0
| 0
| 0
| 40
| 112
|
0069c9e2e22ac4791dcf0c3156a7d75e7be45e71
| 346
|
py
|
Python
|
old files/problem0009.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
old files/problem0009.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
old files/problem0009.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
# Project Euler Problem 9
# Special Pythagorean triplet
#
# Code by Kevin Marciniak
###########################
total = 1000
product = 0
for c in range(1, 1000):
for b in range(1, c):
for a in range(1, b):
if (a + b + c) == 1000:
if ((a * a) + (b * b)) == (c * c):
product = a * b * c
print(product)
| 18.210526
| 38
| 0.471098
|
###########################
# Project Euler Problem 9
# Special Pythagorean triplet
#
# Code by Kevin Marciniak
###########################
total = 1000
product = 0
for c in range(1, 1000):
for b in range(1, c):
for a in range(1, b):
if (a + b + c) == 1000:
if ((a * a) + (b * b)) == (c * c):
product = a * b * c
print(product)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a147e22d5aeaabe35ccc4c56ea5539f536e24407
| 3,685
|
py
|
Python
|
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | null | null | null |
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | 110
|
2018-11-26T05:41:35.000Z
|
2021-08-03T15:37:20.000Z
|
lbrynet/wallet/ledger.py
|
ttkopec/lbry
|
03415415ed397730e6f691f527f51b429a834ed5
|
[
"MIT"
] | 1
|
2018-09-20T22:15:59.000Z
|
2018-09-20T22:15:59.000Z
|
import logging
log = logging.getLogger(__name__)
| 34.12037
| 101
| 0.735414
|
import logging
from six import int2byte
from binascii import unhexlify
from twisted.internet import defer
from .resolve import Resolver
from lbryschema.error import URIParseError
from lbryschema.uri import parse_lbry_uri
from torba.baseledger import BaseLedger
from .account import Account
from .network import Network
from .database import WalletDatabase
from .transaction import Transaction
from .header import Headers, UnvalidatedHeaders
log = logging.getLogger(__name__)
class MainNetLedger(BaseLedger):
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
account_class = Account
database_class = WalletDatabase
headers_class = Headers
network_class = Network
transaction_class = Transaction
secret_prefix = int2byte(0x1c)
pubkey_address_prefix = int2byte(0x55)
script_address_prefix = int2byte(0x7a)
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 200000
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
@property
def resolver(self):
return Resolver(self.headers.claim_trie_root, self.headers.height, self.transaction_class,
hash160_to_address=self.hash160_to_address, network=self.network)
@defer.inlineCallbacks
def resolve(self, page, page_size, *uris):
for uri in uris:
try:
parse_lbry_uri(uri)
except URIParseError as err:
defer.returnValue({'error': err.message})
resolutions = yield self.network.get_values_for_uris(self.headers.hash().decode(), *uris)
return (yield self.resolver._handle_resolutions(resolutions, uris, page, page_size))
@defer.inlineCallbacks
def get_claim_by_claim_id(self, claim_id):
result = (yield self.network.get_claims_by_ids(claim_id)).pop(claim_id, {})
return (yield self.resolver.get_certificate_and_validate_result(result))
@defer.inlineCallbacks
def get_claim_by_outpoint(self, txid, nout):
claims = (yield self.network.get_claims_in_tx(txid)) or []
for claim in claims:
if claim['nout'] == nout:
return (yield self.resolver.get_certificate_and_validate_result(claim))
return 'claim not found'
@defer.inlineCallbacks
def start(self):
yield super().start()
yield defer.DeferredList([
a.maybe_migrate_certificates() for a in self.accounts
])
class TestNetLedger(MainNetLedger):
network_name = 'testnet'
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
class RegTestLedger(MainNetLedger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = int2byte(111)
script_address_prefix = int2byte(196)
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
| 0
| 1,299
| 0
| 1,833
| 0
| 0
| 0
| 162
| 337
|
226c32b59ca6bfd5663903c81b43098c8f2f31df
| 1,870
|
py
|
Python
|
emontranslator_v0.py
|
mkaiserpm/emonpython
|
f5e7d70b83f1c528fc485556464ce1b4f8553d9b
|
[
"MIT"
] | null | null | null |
emontranslator_v0.py
|
mkaiserpm/emonpython
|
f5e7d70b83f1c528fc485556464ce1b4f8553d9b
|
[
"MIT"
] | null | null | null |
emontranslator_v0.py
|
mkaiserpm/emonpython
|
f5e7d70b83f1c528fc485556464ce1b4f8553d9b
|
[
"MIT"
] | null | null | null |
'''
Created on 01.05.2017
@author: mario
Emontranslator
Receive messages from serial/uart
Generate JSON Emon Input Messages
Insert via EMON API / APIKEY to emoncms on locahost (running on pi)
'''
import serial
import httplib
import time
domain = "localhost"
emoncmspath = "emoncms"
apikey = "2eba96e51f6b41534f52110ad063b0c8"
nodeid = 10
conn = httplib.HTTPConnection(domain)
# Set this to the serial port of your emontx and baud rate, 9600 is standard emontx baud rate
ser = serial.Serial('/dev/ttyS0', 9600)
while 1:
try:
# Read in line of readings from serial / uart
linestr = ser.readline()
linestr = linestr.rstrip()
#print linestr
nodeid,temp,humid,voltage=parseLine(linestr)
if nodeid:
params = ("{temp:%.2f,humid:%.2f,voltage:%.2f}"%(temp,humid,voltage))
print params
print "nodeid:"+str(nodeid)
# Send to emoncms
conn.connect()
conn.request("GET", "/"+emoncmspath+"/input/post.json?&node="+str(nodeid)+"&json="+params+"&apikey="+apikey)
response = conn.getresponse()
print response.read()
except KeyboardInterrupt:
raise
except Exception as e:
print e.__doc__
print e.message
pass
time.sleep(1)
| 26.338028
| 120
| 0.578075
|
'''
Created on 01.05.2017
@author: mario
Emontranslator
Receive messages from serial/uart
Generate JSON Emon Input Messages
Insert via EMON API / APIKEY to emoncms on locahost (running on pi)
'''
import serial
import httplib
import time
domain = "localhost"
emoncmspath = "emoncms"
apikey = "2eba96e51f6b41534f52110ad063b0c8"
nodeid = 10
conn = httplib.HTTPConnection(domain)
# Set this to the serial port of your emontx and baud rate, 9600 is standard emontx baud rate
ser = serial.Serial('/dev/ttyS0', 9600)
def parseLine(linestr):
nodeid = None
temp = 0
humid = 0
voltage = 0
if "BAD-CRC" not in linestr:
if len(linestr) > 2:
data = linestr.split(" ")
print linestr
print data
nodeid = int(data[0])
temp = float(data[1])
temp = temp/ 100.
humid = float(data[2])
humid = humid / 100.
voltage = float(data[3])
voltage = voltage / 100.
return nodeid,temp,humid,voltage
while 1:
try:
# Read in line of readings from serial / uart
linestr = ser.readline()
linestr = linestr.rstrip()
#print linestr
nodeid,temp,humid,voltage=parseLine(linestr)
if nodeid:
params = ("{temp:%.2f,humid:%.2f,voltage:%.2f}"%(temp,humid,voltage))
print params
print "nodeid:"+str(nodeid)
# Send to emoncms
conn.connect()
conn.request("GET", "/"+emoncmspath+"/input/post.json?&node="+str(nodeid)+"&json="+params+"&apikey="+apikey)
response = conn.getresponse()
print response.read()
except KeyboardInterrupt:
raise
except Exception as e:
print e.__doc__
print e.message
pass
time.sleep(1)
| 0
| 0
| 0
| 0
| 0
| 529
| 0
| 0
| 23
|
515fea6b09cfd40afa2a167b2e7a719933d9dd52
| 3,970
|
py
|
Python
|
client.py
|
AvaCity/avacity-async
|
d600bf3914ab13c918d33a17b1c70df8d2af6913
|
[
"BSD-3-Clause"
] | 10
|
2020-08-14T03:41:13.000Z
|
2021-12-12T20:04:08.000Z
|
client.py
|
oopss1k/1
|
78fc1d2cdd001630d80a065a4243e1745f6ba876
|
[
"BSD-3-Clause"
] | 6
|
2020-08-28T17:27:55.000Z
|
2022-02-25T20:39:02.000Z
|
client.py
|
AvaCity/avacity-async
|
d600bf3914ab13c918d33a17b1c70df8d2af6913
|
[
"BSD-3-Clause"
] | 5
|
2020-08-13T20:40:16.000Z
|
2022-02-25T20:28:43.000Z
|
PUFFIN_SUB = ["107.178.32.0/20", "45.33.128.0/20", "101.127.206.0/23",
"101.127.208.0/23"]
| 32.276423
| 79
| 0.517884
|
import logging
import asyncio
import binascii
import time
import struct
from ipaddress import ip_network, ip_address
import protocol
import const
PUFFIN_SUB = ["107.178.32.0/20", "45.33.128.0/20", "101.127.206.0/23",
"101.127.208.0/23"]
def is_puffin(ip):
for net in PUFFIN_SUB:
net = ip_network(net)
if ip_address(ip) in net:
return True
return False
class Client():
def __init__(self, server):
self.server = server
self.user_data = {}
self.uid = None
self.drop = False
self.debug = False
self.encrypted = False
self.compressed = False
self.checksummed = False
self.room = ""
self.position = (0, 0)
self.dimension = 4
self.state = 0
self.action_tag = ""
self.canyon_lid = None
self.last_msg = time.time()
async def handle(self, reader, writer):
self.reader = reader
self.writer = writer
self.addr = writer.get_extra_info('peername')[0]
if not is_puffin(self.addr):
self.user_data["ip_address"] = self.addr
buffer = b""
while True:
await asyncio.sleep(0.2)
try:
data = await reader.read(1024)
except OSError:
break
if not data:
break
data = protocol.BytesWithPosition(buffer+data)
buffer = b""
if data.hex() == "3c706f6c6963792d66696c652d726571756573742f3e00":
writer.write(const.XML + b"\x00")
await writer.drain()
continue
while len(data) - data.pos > 4:
length = data.read_i32()
if len(data) - data.pos < length:
data.pos = 0
break
try:
final_data = protocol.processFrame(data.read(length), True)
except Exception:
print("Произошла ошибка у "+self.uid)
data.pos = len(data)
break
if final_data:
try:
await self.server.process_data(final_data, self)
except Exception as e:
logging.exception("Ошибка при обработке данных")
if len(data) - data.pos > 0:
buffer = data.read(len(data) - data.pos)
await self._close_connection()
async def send(self, msg, type_=34):
if self.drop:
return
data = struct.pack(">b", type_)
data += protocol.encodeArray(msg)
data = self._make_header(data) + data
try:
self.writer.write(data)
await self.writer.drain()
except (BrokenPipeError, ConnectionResetError, AssertionError,
TimeoutError, OSError, AttributeError):
self.writer.close()
def _make_header(self, msg):
header_length = 1
mask = 0
if self.encrypted:
mask |= (1 << 1)
if self.compressed:
mask |= (1 << 2)
if self.checksummed:
mask |= (1 << 3)
header_length += 4
buf = struct.pack(">i", len(msg)+header_length)
buf += struct.pack(">B", mask)
if self.checksummed:
buf += struct.pack(">I", binascii.crc32(msg))
return buf
async def _close_connection(self):
self.drop = True
self.writer.close()
if self.uid:
if self.uid in self.server.online:
del self.server.online[self.uid]
if self.room:
await self.server.modules["h"].leave_room(self)
if self.uid in self.server.inv:
self.server.inv[self.uid].expire = time.time()+30
await self.server.redis.set(f"uid:{self.uid}:lvt",
int(time.time()))
del self
| 80
| 0
| 2,484
| 1,017
| 0
| 129
| 0
| -30
| 222
|
6f9cf2d8cd0d99cb21323c1e981144539e4f1b93
| 255,228
|
py
|
Python
|
gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
gbpservice/neutron/tests/unit/services/grouppolicy/test_apic_mapping.py
|
ashutosh-mishra/my-test
|
51c82af293f291b9182204392e7d21bda27786d1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opflexagent import constants as ocst
from gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic import driver
APIC_L2_POLICY = 'l2_policy'
APIC_L3_POLICY = 'l3_policy'
APIC_POLICY_RULE_SET = 'policy_rule_set'
APIC_POLICY_TARGET_GROUP = 'policy_target_group'
APIC_POLICY_RULE = 'policy_rule'
APIC_EXTERNAL_RID = '1.0.0.1'
APIC_EXTERNAL_EPG = 'ext-epg'
APIC_PRE_L3OUT_TENANT = 'common'
APIC_PRE_VRF_TENANT = APIC_PRE_L3OUT_TENANT
APIC_PRE_VRF = 'pre-vrf'
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_TYPE_DVS = driver.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
BOOKED_PORT_VALUE = 'myBookedPort'
| 48.002257
| 79
| 0.584552
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
import sys
import mock
import netaddr
import webob.exc
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
from apic_ml2.neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
from apicapi import apic_mapper
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 as n_db
from neutron.db import model_base
from neutron.extensions import portbindings
from neutron import manager
from opflexagent import constants as ocst
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.plugins.ml2.drivers.grouppolicy.apic import driver
from gbpservice.neutron.services.grouppolicy import (
group_policy_context as p_context)
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
apic_mapping as amap)
from gbpservice.neutron.services.l3_router import l3_apic
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
APIC_L2_POLICY = 'l2_policy'
APIC_L3_POLICY = 'l3_policy'
APIC_POLICY_RULE_SET = 'policy_rule_set'
APIC_POLICY_TARGET_GROUP = 'policy_target_group'
APIC_POLICY_RULE = 'policy_rule'
APIC_EXTERNAL_RID = '1.0.0.1'
APIC_EXTERNAL_EPG = 'ext-epg'
APIC_PRE_L3OUT_TENANT = 'common'
APIC_PRE_VRF_TENANT = APIC_PRE_L3OUT_TENANT
APIC_PRE_VRF = 'pre-vrf'
AGENT_TYPE = ocst.AGENT_TYPE_OPFLEX_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_TYPE_DVS = driver.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
BOOKED_PORT_VALUE = 'myBookedPort'
def echo(context, string, prefix=''):
return prefix + string
class MockCallRecorder(mock.Mock):
recorded_call_set = set()
def __call__(self, *args, **kwargs):
self.recorded_call_set.add(self.generate_entry(*args, **kwargs))
return mock.Mock()
def call_happened_with(self, *args, **kwargs):
return self.generate_entry(*args, **kwargs) in self.recorded_call_set
def generate_entry(self, *args, **kwargs):
return args, tuple((x, kwargs[x]) for x in sorted(kwargs.keys()))
class ApicMappingTestCase(
test_rmd.ResourceMappingTestCase,
mocked.ControllerMixin, mocked.ConfigMixin):
def setUp(self, sc_plugin=None, nat_enabled=True,
pre_existing_l3out=False, default_agent_conf=True,
ml2_options=None):
self.saved_apicapi = sys.modules["apicapi"]
sys.modules["apicapi"] = mock.Mock()
if default_agent_conf:
self.agent_conf = AGENT_CONF
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
config.cfg.CONF.set_override('enable_security_group', False,
group='SECURITYGROUP')
n_rpc.create_connection = mock.Mock()
amap.ApicMappingDriver.get_apic_manager = mock.Mock(
return_value=mock.MagicMock(
name_mapper=mock.Mock(),
ext_net_dict={},
per_tenant_nat_epg=False))
self.set_up_mocks()
ml2_opts = ml2_options or {
'mechanism_drivers': ['apic_gbp'],
'type_drivers': ['opflex'],
'tenant_network_types': ['opflex']
}
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.apic_mapping.ApicMappingDriver.'
'_setup_rpc_listeners').start()
nova_client = mock.patch(
'gbpservice.neutron.services.grouppolicy.drivers.cisco.'
'apic.nova_client.NovaClient.get_server').start()
vm = mock.Mock()
vm.name = 'someid'
nova_client.return_value = vm
super(ApicMappingTestCase, self).setUp(
policy_drivers=['implicit_policy', 'apic', 'chain_mapping'],
ml2_options=ml2_opts, sc_plugin=sc_plugin)
engine = db_api.get_engine()
model_base.BASEV2.metadata.create_all(engine)
plugin = manager.NeutronManager.get_plugin()
plugin.remove_networks_from_down_agents = mock.Mock()
plugin.is_agent_down = mock.Mock(return_value=False)
self.driver = manager.NeutronManager.get_service_plugins()[
'GROUP_POLICY'].policy_driver_manager.policy_drivers['apic'].obj
self.l3plugin = l3_apic.ApicGBPL3ServicePlugin()
amap.ApicMappingDriver.get_base_synchronizer = mock.Mock()
self.driver.name_mapper.name_mapper = mock.Mock()
self.driver.name_mapper.name_mapper.tenant = echo
self.driver.name_mapper.name_mapper.l2_policy = echo
self.driver.name_mapper.name_mapper.l3_policy = echo
self.driver.name_mapper.name_mapper.policy_rule_set = echo
self.driver.name_mapper.name_mapper.policy_rule = echo
self.driver.name_mapper.name_mapper.app_profile.return_value = (
mocked.APIC_AP)
self.driver.name_mapper.name_mapper.policy_target_group = echo
self.driver.name_mapper.name_mapper.external_policy = echo
self.driver.name_mapper.name_mapper.external_segment = echo
self.driver.name_mapper.name_mapper.pre_existing = echo
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.driver.notifier = mock.Mock()
self.driver.apic_manager.ext_net_dict = {}
amap.apic_manager.TENANT_COMMON = 'common'
amap.apic_manager.CP_ENTRY = 'os-entry'
self.common_tenant = amap.apic_manager.TENANT_COMMON
self.nat_enabled = nat_enabled
self.driver.l3out_vlan_alloc = mock.Mock()
self.pre_l3out = pre_existing_l3out
self.non_apic_network = False
def echo2(string):
return string
if self.pre_l3out:
self.orig_query_l3out_info = self.driver._query_l3out_info
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': apic_mapper.ApicName(APIC_PRE_L3OUT_TENANT),
'vrf_name': APIC_PRE_VRF,
'vrf_tenant': APIC_PRE_VRF_TENANT,
# fake l3out response from APIC for testing purpose only
'l3out': ([{u'l3extExtEncapAllocator': {}},
{u'l3extInstP': {}},
{u'l3extRtBDToOut': {}},
{u'l3extRsOutToBDPublicSubnetHolder': {}},
{u'l3extRsNdIfPol': {u'tDn': u'',
u'tnNdIfPolName': u''}},
{u'l3extRsDampeningPol':
{u'tDn': u'', u'tnRtctrlProfileName': u''}},
{u'ospfRsIfPol': {u'tDn': u'',
u'tnOspfIfPolName': u''}},
{u'l3extRsEngressQosDppPol':
{u'tDn': u'', u'tnQosDppPolName': u''}},
{u'bfdRsIfPol': {u'tDn': u'',
u'tnBfdIfPolName': u''}},
{u'bgpRsPeerPfxPol': {u'tDn': u'',
u'tnBgpPeerPfxPolName': u''}},
{u'eigrpRsIfPol': {u'tDn': u'',
u'tnEigrpIfPolName': u''}},
{u'l3extLNodeP': {u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP',
u'lcOwn': u'local', u'name': u'Leaf3-4_NP',
u'targetDscp': u'unspecified', u'configIssues': u'',
u'stateQual': u'', u'tCl': u'', u'tContextDn': u'',
u'tRn': u'', u'type': u'', u'rType': u'', u'state': u'',
u'forceResolve': u'', u'tag': u'yellow-green',
u'monPolDn': u'', u'modTs': u'', u'uid': u'15374',
u'encap': u'unknown', u'addr': u'0.0.0.0'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'encap': u'vlan-3101',
u'ifInstT': u'sub-interface'
}}}]}}
]}},
{u'l3extRsEctx':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/rsectx',
u'tDn': u'', u'tnFvCtxName': u'default'}}}])}
self.trimmed_l3out = [{}, {}, {}, {},
{u'l3extRsNdIfPol':
{u'tnNdIfPolName': u''}},
{u'l3extRsDampeningPol':
{u'tnRtctrlProfileName': u''}},
{u'ospfRsIfPol': {u'tnOspfIfPolName': u''}},
{u'l3extRsEngressQosDppPol':
{u'tnQosDppPolName': u''}},
{u'bfdRsIfPol': {u'tnBfdIfPolName': u''}},
{u'bgpRsPeerPfxPol': {u'tnBgpPeerPfxPolName': u''}},
{u'eigrpRsIfPol': {u'tnEigrpIfPolName': u''}},
{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-test-tenant/out-Shd-Sub/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'ifInstT':
u'sub-interface',
u'encap': 'vlan-999'
}}}]}}]}},
{u'l3extRsEctx':
{u'attributes':
{u'dn': u'uni/tn-test-tenant/out-Shd-Sub/rsectx',
u'tnFvCtxName': u'myl3p'}}}]
self.driver.apic_manager.apic.fvTenant.rn = echo2
self.driver.apic_manager.apic.l3extOut.rn = echo2
self.driver.l3out_vlan_alloc.reserve_vlan.return_value = 999
self.driver.apic_manager.apic.fvTenant.name = echo2
self.driver.apic_manager.apic.fvCtx.name = echo2
self._db_plugin = n_db.NeutronDbPluginV2()
def tearDown(self):
sys.modules["apicapi"] = self.saved_apicapi
super(ApicMappingTestCase, self).tearDown()
def _build_external_dict(self, name, cidr_exposed, is_edge_nat=False):
ext_info = {
'enable_nat': 'True' if self.nat_enabled else 'False'
}
if self.pre_l3out:
ext_info['preexisting'] = 'True'
ext_info['external_epg'] = APIC_EXTERNAL_EPG
else:
ext_info.update({
'switch': mocked.APIC_EXT_SWITCH,
'port': mocked.APIC_EXT_MODULE + '/' + mocked.APIC_EXT_PORT,
'encap': mocked.APIC_EXT_ENCAP,
'router_id': APIC_EXTERNAL_RID,
'gateway_ip': str(netaddr.IPNetwork(cidr_exposed)[1]),
'cidr_exposed': cidr_exposed})
if is_edge_nat:
ext_info['edge_nat'] = 'true'
ext_info['vlan_range'] = '2000:2010'
return {name: ext_info}
def _mock_external_dict(self, data, is_edge_nat=False):
self.driver.apic_manager.ext_net_dict = {}
for x in data:
self.driver.apic_manager.ext_net_dict.update(
self._build_external_dict(x[0], x[1], is_edge_nat=is_edge_nat))
def _create_simple_policy_rule(self, direction='bi', protocol='tcp',
port_range=80, shared=False,
action_type='allow', action_value=None):
cls = self.create_policy_classifier(
direction=direction, protocol=protocol,
port_range=port_range, shared=shared)['policy_classifier']
action = self.create_policy_action(
action_type=action_type, shared=shared,
action_value=action_value)['policy_action']
return self.create_policy_rule(
policy_classifier_id=cls['id'], policy_actions=[action['id']],
shared=shared)['policy_rule']
def _bind_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'compute:',
'device_id': 'someid'}}
return super(ApicMappingTestCase, self)._bind_port_to_host(
port_id, host, data=data)
def _bind_dhcp_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'network:dhcp',
'device_id': 'someid'}}
return super(ApicMappingTestCase, self)._bind_port_to_host(
port_id, host, data=data)
class ApicMappingVlanTestCase(ApicMappingTestCase):
def setUp(self, **kwargs):
config.cfg.CONF.set_override(
'network_vlan_ranges', ['physnet1:100:200'], group='ml2_type_vlan')
kwargs['ml2_options'] = {
'mechanism_drivers': ['apic_gbp', 'openvswitch'],
'type_drivers': ['vlan'],
'tenant_network_types': ['vlan']
}
kwargs['default_agent_conf'] = False
super(ApicMappingVlanTestCase, self).setUp(**kwargs)
self.non_apic_network = True
def _get_ptg_shadow_net(self, ptg):
net = self._list_resource('networks', self.api,
tenant_id=ptg['tenant_id'],
name=self.driver._get_ptg_shadow_network_name(ptg))
net = net['networks']
if net:
return net[0]
def _get_ptg_shadow_subnet(self, ptg):
shadow_net = self._get_ptg_shadow_net(ptg)
if shadow_net:
return shadow_net['subnets'][0]
class TestPolicyTarget(ApicMappingTestCase):
def test_policy_target_port_deleted_on_apic(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
self.delete_policy_target(pt['policy_target']['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_policy_target_delete_no_port(self):
ptg = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
pt = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
res = self.new_delete_request('ports', port['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.delete_policy_target(pt['policy_target']['id'],
expected_res_status=404)
def test_delete_policy_target_notification_no_apic_network(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
# Implicit port will be deleted with the PT
self.delete_policy_target(pt1['id'], expected_res_status=204)
# No notification needed
self.assertFalse(self.driver.notifier.port_update.called)
self.driver.notifier.port_update.reset_mock()
subnet = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg) if self.non_apic_network
else ptg['subnets'][0],
self.api)
with self.port(subnet=subnet) as port:
# Create EP with bound port
port = self._bind_port_to_host(port['port']['id'], 'h1')
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'], port_id=port['port']['id'])
# Explicit port won't be deleted with PT
self.delete_policy_target(pt1['policy_target']['id'],
expected_res_status=204)
# Issue notification for the agent
self.assertTrue(self.driver.notifier.port_update.called)
def test_get_vrf_details(self):
l3p = self.create_l3_policy(name='myl3')['l3_policy']
details = self.driver.get_vrf_details(
context.get_admin_context(),
vrf_id=l3p['id'], host='h1')
self.assertEqual(l3p['id'], details['l3_policy_id'])
pool = set([l3p['ip_pool']])
if 'proxy_ip_pool' in l3p:
pool.add(l3p['proxy_ip_pool'])
self.assertEqual(pool, set(details['vrf_subnets']))
self.assertEqual(l3p['tenant_id'], details['vrf_tenant'])
self.assertEqual(l3p['id'], details['vrf_name'])
def _do_test_get_gbp_details(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=True)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_pool='20.20.20.0/24')
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
nsp = self.create_network_service_policy(
network_service_params=[
{"type": "ip_pool", "value": "nat_pool", "name": "test"}])[
'network_service_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'],
network_service_policy_id=nsp['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
req_mapping = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % pt1['port_id'], 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
self.assertEqual(mapping, req_mapping['gbp_details'])
self.assertEqual(pt1['port_id'], mapping['port_id'])
self.assertEqual(ptg['id'], mapping['endpoint_group_name'])
self.assertEqual('someid', mapping['vm-name'])
self.assertTrue(mapping['enable_dhcp_optimization'])
self.assertEqual(1, len(mapping['subnets']))
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
self.assertEqual(subnet['subnet']['cidr'],
mapping['subnets'][0]['cidr'])
self.assertEqual(1, len(mapping['floating_ip']))
fip = mapping['floating_ip'][0]
self.assertEqual(pt1['port_id'], fip['port_id'])
self.assertEqual("NAT-epg-%s" % es['id'], fip['nat_epg_name'])
self.assertEqual(
(es['tenant_id'] if self.driver.per_tenant_nat_epg
else self.common_tenant),
fip['nat_epg_tenant'])
self.assertEqual(l3p['tenant_id'], mapping['vrf_tenant'])
self.assertEqual(l3p['id'], mapping['vrf_name'])
if 'proxy_ip_pool' in l3p:
self.assertEqual([l3p['ip_pool'], l3p['proxy_ip_pool']],
mapping['vrf_subnets'])
else:
self.assertEqual([l3p['ip_pool']], mapping['vrf_subnets'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.2",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
# Verify Neutron details
self.assertEqual(pt1['port_id'],
req_mapping['neutron_details']['port_id'])
# Create event on a second host to verify that the SNAT
# port gets created for this second host
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt2['port_id'], host='h2')
self.assertEqual(pt2['port_id'], mapping['port_id'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.3",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
def test_get_gbp_details(self):
self._do_test_get_gbp_details()
def test_get_gbp_details_ptne(self):
self.driver.per_tenant_nat_epg = True
self._do_test_get_gbp_details()
def test_get_snat_ip_for_vrf(self):
TEST_VRF1 = 'testvrf1'
TEST_VRF2 = 'testvrf2'
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=False)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_pool='20.20.20.0/24')
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
nsp = self.create_network_service_policy(
network_service_params=[
{"type": "ip_pool", "value": "nat_pool", "name": "test"}])[
'network_service_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'],
network_service_policy_id=nsp['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
subnet = self._db_plugin.get_subnet(context.get_admin_context(),
es['subnet_id'])
network = self._db_plugin.get_network(context.get_admin_context(),
subnet['network_id'])
details = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF1, network, es_name=es['name'])
self.assertEqual(es['name'],
details['external_segment_name'])
self.assertEqual("192.168.200.1",
details['gateway_ip'])
self.assertEqual("192.168.200.2",
details['host_snat_ip'])
self.assertEqual(24, details['prefixlen'])
# Verify that the same VRF returns the same SNAT IP
details2 = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF1, network, es_name=es['name'])
self.assertEqual(details, details2)
# Create event on a second VRF to verify that the SNAT
# port gets created for this second VRF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
details = self.driver.get_snat_ip_for_vrf(context.get_admin_context(),
TEST_VRF2, network, es_name = es['name'])
self.assertEqual(es['name'],
details['external_segment_name'])
self.assertEqual("192.168.200.1",
details['gateway_ip'])
self.assertEqual("192.168.200.3",
details['host_snat_ip'])
self.assertEqual(24, details['prefixlen'])
def test_snat_pool_subnet_deletion(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=False)['external_segment']
admin_ctx = context.get_admin_context()
ext_net_id = self._db_plugin.get_subnet(
admin_ctx, es['subnet_id'])['network_id']
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(pt1['port_id'], mapping['port_id'])
self.assertEqual(1, len(mapping['host_snat_ips']))
self.assertEqual(es['name'],
mapping['host_snat_ips'][0]['external_segment_name'])
self.assertEqual("192.168.200.1",
mapping['host_snat_ips'][0]['gateway_ip'])
self.assertEqual("192.168.200.2",
mapping['host_snat_ips'][0]['host_snat_ip'])
self.assertEqual(24, mapping['host_snat_ips'][0]['prefixlen'])
self.update_l3_policy(l3p['id'], external_segments={},
expected_res_status=200)
subnet_filter = {'name': [amap.HOST_SNAT_POOL],
'network_id': [ext_net_id]}
internal_subnets = self._db_plugin.get_subnets(
admin_ctx, filters=subnet_filter)
self.assertEqual(1, len(internal_subnets))
self.delete_external_segment(es['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
internal_subnets = self._db_plugin.get_subnets(
admin_ctx, filters=subnet_filter)
self.assertEqual(0, len(internal_subnets))
def test_snat_port_ip_loss(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24', shared=False)['external_segment']
admin_ctx = context.get_admin_context()
ext_net_id = self._db_plugin.get_subnet(
admin_ctx, es['subnet_id'])['network_id']
l3p = self.create_l3_policy(name='myl3',
external_segments={es['id']: ['']})['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
mapping = self.driver.get_gbp_details(admin_ctx,
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(mapping['host_snat_ips']))
snat_ports = self._db_plugin.get_ports(admin_ctx,
filters={'name': [amap.HOST_SNAT_POOL_PORT],
'network_id': [ext_net_id],
'device_id': ['h1']})
self._db_plugin.update_port(admin_ctx,
snat_ports[0]['id'], {'port': {'fixed_ips': []}})
mapping = self.driver.get_gbp_details(admin_ctx,
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(0, len(mapping['host_snat_ips']))
def test_ip_address_owner_update(self):
l3p = self.create_l3_policy(name='myl3')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
net_id = (self._get_ptg_shadow_net(ptg)['id']
if self.non_apic_network else l2p['network_id'])
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
self._bind_port_to_host(pt2['port_id'], 'h2')
ip_owner_info = {'port': pt1['port_id'], 'ip_address_v4': '1.2.3.4'}
self.driver._notify_port_update = mock.Mock()
# set new owner
self.driver.ip_address_owner_update(context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h1')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net_id)
self.assertEqual(pt1['port_id'], obj['port_id'])
self.driver._notify_port_update.assert_called_with(mock.ANY,
pt1['port_id'])
# update existing owner
self.driver._notify_port_update.reset_mock()
ip_owner_info['port'] = pt2['port_id']
self.driver.ip_address_owner_update(context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h2')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net_id)
self.assertEqual(pt2['port_id'], obj['port_id'])
exp_calls = [
mock.call(mock.ANY, pt1['port_id']),
mock.call(mock.ANY, pt2['port_id'])]
self._check_call_list(exp_calls,
self.driver._notify_port_update.call_args_list)
def test_enhanced_subnet_options(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW
self.assertTrue({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP
self.assertTrue(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify no extra routes are leaking inside
self.assertEqual(2, len(details['subnets'][0]['host_routes']))
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
def test_update_l2p_inject_default_route_false(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
# Add one more host_route to the subnet
more_host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
data = {'subnet': {'host_routes': more_host_routes}}
req = self.new_update_request('subnets', data, sub['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(more_host_routes))
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW
self.assertTrue({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP
self.assertTrue(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify additional host_routes are also added:
# GW + Metadata + 1 additional route = 3
self.assertEqual(3, len(details['subnets'][0]['host_routes']))
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is set
self.assertTrue('gateway_ip' in details['subnets'][0])
data = {'l2_policy': {'inject_default_route': False}}
res = self.new_update_request('l2_policies', data, l2p['id'],
self.fmt).get_response(self.ext_api)
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h1')
with self.port(subnet=sub, tenant_id='onetenant'):
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt2['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify Default route via GW is not present
self.assertFalse({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP is not present
self.assertFalse(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify only extra route is present
self.assertEqual(1, len(details['subnets'][0]['host_routes']))
self.assertTrue(
{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'} in
details['subnets'][0]['host_routes'])
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is not set
self.assertFalse('gateway_ip' in details['subnets'][0])
def test_create_l2p_inject_default_route_false(self):
self.driver.enable_metadata_opt = False
l3p = self.create_l3_policy(name='myl3',
ip_pool='192.168.0.0/16')['l3_policy']
l2p = self.create_l2_policy(name='myl2',
l3_policy_id=l3p['id'],
inject_default_route=False)['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
sub = self._get_object('subnets', ptg['subnets'][0],
self.api)
# Add one more host_route to the subnet
more_host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
data = {'subnet': {'host_routes': more_host_routes}}
req = self.new_update_request('subnets', data, sub['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(more_host_routes))
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
if self.non_apic_network:
shadow_sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=shadow_sub, tenant_id='onetenant',
device_owner='network:dhcp'):
pass
dhcp = dhcp['port']
details = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % pt1['port_id'], host='h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW is not present
self.assertFalse({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP is not present
self.assertFalse(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify only extra route is present
self.assertEqual(1, len(details['subnets'][0]['host_routes']))
self.assertTrue(
{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'} in
details['subnets'][0]['host_routes'])
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
# Verify gateway_ip is not set
self.assertFalse('gateway_ip' in details['subnets'][0])
def test_get_gbp_details_error(self):
details = self.driver.get_gbp_details(
context.get_admin_context(), device='tap%s' % 'randomid',
host='h1')
req_details = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % 'randomid', 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
# device was not found
self.assertTrue('port_id' not in details)
self.assertEqual(details, req_details['gbp_details'])
self.assertTrue('port_id' not in req_details['neutron_details'])
ptg = self.create_policy_target_group()['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
self.driver._get_owned_addresses = mock.Mock(side_effect=Exception)
details = self.driver.get_gbp_details(
context.get_admin_context(), device='tap%s' % pt1['port_id'],
host='h1')
req_details = self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % pt1['port_id'], 'host': 'h1',
'timestamp': 0, 'request_id': 'request_id'})
# An exception occurred
self.assertEqual({'device': 'tap%s' % pt1['port_id']}, details)
self.assertIsNone(req_details)
def test_get_gbp_proxy_details(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg_fake = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
# The PT below will be actually bound for a VM
pt_bound = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
l3p_real = self.create_l3_policy(name='myl3')['l3_policy']
l2p_real = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
ptg_real = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real['id'])['policy_target_group']
# The PT below will never be bound
pt_unbound = self.create_policy_target(
policy_target_group_id=ptg_real['id'])['policy_target']
# Change description to link the ports. The bound on will point
# to the unbound one to get its info overridden
self.update_policy_target(
pt_bound['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound['port_id'])
port_unbound = self._get_object('ports', pt_unbound['port_id'],
self.api)['port']
# Bind the first port
self._bind_port_to_host(pt_bound['port_id'], 'h1')
# Get info on bound port
mapping = self.driver.get_gbp_details(context.get_admin_context(),
device='tap%s' % pt_bound['port_id'], host='h1')
# Bound port info
self.assertEqual(pt_bound['port_id'], mapping['port_id'])
self.assertEqual('tap%s' % pt_bound['port_id'], mapping['device'])
# APIC info are from the unbound port
self.assertEqual(ptg_real['id'], mapping['endpoint_group_name'])
self.assertEqual(l3p_real['tenant_id'], mapping['vrf_tenant'])
self.assertEqual(l3p_real['id'], mapping['vrf_name'])
self.assertEqual(port_unbound['fixed_ips'], mapping['fixed_ips'])
def test_get_gbp_details_shadow(self):
l2p = self.create_l2_policy()['l2_policy']
network = self._get_object('networks', l2p['network_id'], self.api)
with self.subnet(network=network) as sub:
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % port['port']['id'], host='h1')
self.assertEqual(port['port']['id'], mapping['port_id'])
self.assertEqual(amap.SHADOW_PREFIX + l2p['id'],
mapping['endpoint_group_name'])
def test_explicit_port(self):
with self.network() as net:
with self.subnet(network=net) as sub:
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
if self.non_apic_network:
sub = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg), self.api)
with self.port(subnet=sub) as port:
self._bind_port_to_host(port['port']['id'], 'h1')
self.create_policy_target(
port_id=port['port']['id'],
policy_target_group_id=ptg['id'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_port_update_changed_ptg(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=ptg['l2_policy_id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt['port_id'], 'h1')
if not self.non_apic_network:
self.driver.notifier.port_update.reset_mock()
self.update_policy_target(pt['id'],
policy_target_group_id=ptg2['id'])
self.assertTrue(self.driver.notifier.port_update.called)
else:
res = self.update_policy_target(pt['id'],
policy_target_group_id=ptg2['id'],
expected_res_status=400)
self.assertEqual('PTGChangeDisallowedWithNonOpFlexNetwork',
res['NeutronError']['type'])
def test_update_ptg_failed(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
res = self.update_policy_target(
pt['id'], policy_target_group_id=ptg2['id'],
expected_res_status=400)
exp = ('PTGChangeDisallowedWithNonOpFlexNetwork'
if self.non_apic_network else 'InvalidPortForPTG')
self.assertEqual(exp, res['NeutronError']['type'])
def test_port_notified_on_subnet_change(self):
ptg = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt['port_id'], 'h1')
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
subnet2 = copy.deepcopy(subnet)
subnet2['subnet']['gateway_ip'] = '10.0.0.254'
subnet2['subnet']['allocation_pools'] = [{
'start': '10.0.0.2', 'end': '10.0.0.250'}]
self.driver.apic_manager.reset_mock()
self.driver.notifier.port_update.reset_mock()
self.driver.process_subnet_changed(context.get_admin_context(),
subnet['subnet'], subnet2['subnet'])
self.assertTrue(self.driver.notifier.port_update.called)
def test_get_gbp_proxy_address_ownership(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg_fake = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
# The PT below will be actually bound for a VM. They are in the same
# Network
pt_bound_1 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
pt_bound_2 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
pt_bound_3 = self.create_policy_target(
policy_target_group_id=ptg_fake['id'])['policy_target']
l3p_real = self.create_l3_policy(name='myl3')['l3_policy']
# Build 2 L2Ps in order to get 2 networks.
l2p_real_1 = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
l2p_real_2 = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_real['id'])['l2_policy']
ptg_real_1 = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real_1['id'])['policy_target_group']
ptg_real_2 = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_real_2['id'])['policy_target_group']
# The PTs below will never be bound. They are on different networks
pt_unbound_1 = self.create_policy_target(
policy_target_group_id=ptg_real_1['id'])['policy_target']
pt_unbound_2 = self.create_policy_target(
policy_target_group_id=ptg_real_2['id'])['policy_target']
pt_unbound_2_1 = self.create_policy_target(
policy_target_group_id=ptg_real_2['id'])['policy_target']
# Change description to link the ports. The bound one will point
# to the unbound one to get its info overridden
self.update_policy_target(
pt_bound_1['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_1['port_id'])
self.update_policy_target(
pt_bound_2['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_2['port_id'])
self.update_policy_target(
pt_bound_3['id'],
description=amap.PROXY_PORT_PREFIX + pt_unbound_2_1['port_id'])
# Set up address ownership on the bound ports, and verify that both
# entries exists
# Update address ownership on second port
self.driver.update_ip_owner({'port': pt_bound_1['port_id'],
'ip_address_v4': '1.1.1.1'})
# Same address owned by another port in a different subnet
self.driver.update_ip_owner({'port': pt_bound_2['port_id'],
'ip_address_v4': '1.1.1.1'})
# There are 2 ownership entries for the same address
entries = self.driver.ha_ip_handler.session.query(
ha_ip_db.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
self.assertEqual('1.1.1.1', entries[0].ha_ip_address)
self.assertEqual('1.1.1.1', entries[1].ha_ip_address)
self.driver.update_ip_owner({'port': pt_bound_3['port_id'],
'ip_address_v4': '1.1.1.1'})
entries = self.driver.ha_ip_handler.session.query(
ha_ip_db.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
self.assertEqual('1.1.1.1', entries[0].ha_ip_address)
self.assertEqual('1.1.1.1', entries[1].ha_ip_address)
class TestPolicyTargetVlanNetwork(ApicMappingVlanTestCase,
TestPolicyTarget):
def test_shadow_port(self):
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
shadow_port = self._get_object('ports', pt1['port_id'],
self.api)['port']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
ports = self._list_resource('ports',
self.api, network_id=subnet['subnet']['network_id'])['ports']
self.assertEqual(1, len(ports))
self.assertEqual(shadow_port['mac_address'], ports[0]['mac_address'])
self.assertEqual(len(shadow_port['fixed_ips']),
len(ports[0]['fixed_ips']))
self.assertEqual(shadow_port['fixed_ips'][0]['ip_address'],
ports[0]['fixed_ips'][0]['ip_address'])
self.delete_policy_target(pt1['id'])
self._get_object('ports', pt1['port_id'], self.api,
expected_res_status=404)
self._get_object('ports', ports[0]['id'], self.api,
expected_res_status=404)
def test_shadow_port_for_explicit_port(self):
ptg1 = self.create_policy_target_group()['policy_target_group']
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=shadow_subnet1) as p:
port1 = p['port']
pt1 = self.create_policy_target(policy_target_group_id=ptg1['id'],
port_id=port1['id'])['policy_target']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
ports = self._list_resource('ports',
self.api, network_id=subnet['subnet']['network_id'])['ports']
self.assertEqual(1, len(ports))
self.assertEqual(port1['mac_address'], ports[0]['mac_address'])
self.assertEqual(len(port1['fixed_ips']),
len(ports[0]['fixed_ips']))
self.assertEqual(port1['fixed_ips'][0]['ip_address'],
ports[0]['fixed_ips'][0]['ip_address'])
self.delete_policy_target(pt1['id'])
self._get_object('ports', pt1['port_id'], self.api,
expected_res_status=200)
self._get_object('ports', ports[0]['id'], self.api,
expected_res_status=404)
def test_explicit_port_wrong_network(self):
ptg1 = self.create_policy_target_group()['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=subnet) as port1:
res = self.create_policy_target(policy_target_group_id=ptg1['id'],
port_id=port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortInWrongNetwork',
res['NeutronError']['type'])
def test_explicit_port_overlap_address(self):
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
with self.port(subnet=shadow_subnet1) as p:
shadow_port1 = p
ips = shadow_port1['port']['fixed_ips']
ips[0].pop('subnet_id', None)
with self.port(subnet=subnet, fixed_ips=ips) as p:
res = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=shadow_port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortOverlap',
res['NeutronError']['type'])
res = self.new_delete_request('ports', p['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
with self.port(subnet=subnet,
mac_address=shadow_port1['port']['mac_address']) as p:
res = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=shadow_port1['port']['id'], expected_res_status=400)
self.assertEqual('ExplicitPortOverlap',
res['NeutronError']['type'])
def test_path_static_binding_implicit_port(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), pt1['port_id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1', seg_id,
bd_name=ptg1['l2_policy_id'])
# move port to different host
mgr.ensure_path_created_for_port.reset_mock()
self._bind_port_to_host(pt1['port_id'], 'h2')
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
# create another PT, bind to same host and then delete it
mgr.ensure_path_created_for_port.reset_mock()
mgr.ensure_path_deleted_for_port.reset_mock()
pt2 = self.create_policy_target(
policy_target_group_id=ptg1['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
self.delete_policy_target(pt2['id'])
mgr.ensure_path_deleted_for_port.assert_not_called()
# delete PT
mgr.ensure_path_deleted_for_port.reset_mock()
self.delete_policy_target(pt1['id'])
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2')
def test_path_static_binding_explicit_port(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
shadow_subnet1 = self._get_object('subnets',
self._get_ptg_shadow_subnet(ptg1),
self.api)
with self.port(subnet=shadow_subnet1) as port:
port1 = port
port1 = self._bind_port_to_host(port1['port']['id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), port1['port']['id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_not_called()
# Assign port to a PT
pt1 = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=port1['port']['id'])['policy_target']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1', seg_id,
bd_name=ptg1['l2_policy_id'])
# move port to different host
mgr.ensure_path_created_for_port.reset_mock()
self._bind_port_to_host(pt1['port_id'], 'h2')
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
# create another port & PT, bind to same host and then delete port
mgr.ensure_path_created_for_port.reset_mock()
mgr.ensure_path_deleted_for_port.reset_mock()
with self.port(subnet=shadow_subnet1) as port:
port2 = port
pt2 = self.create_policy_target(
policy_target_group_id=ptg1['id'],
port_id=port2['port']['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2', seg_id,
bd_name=ptg1['l2_policy_id'])
res = self.new_delete_request('ports', port2['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_not_called()
# Delete PT
mgr.ensure_path_deleted_for_port.reset_mock()
self.delete_policy_target(pt1['id'])
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], 'h2')
def test_path_static_binding_for_non_pt(self):
mgr = self.driver.apic_manager
ptg1 = self.create_policy_target_group(
name="ptg1")['policy_target_group']
subnet = self._get_object('subnets', ptg1['subnets'][0], self.api)
with self.port(subnet=subnet) as port:
port1 = port
with self.port(subnet=subnet) as port:
port2 = port
# bind first port
port1 = self._bind_port_to_host(port1['port']['id'], 'h1')
port_ctx = self.driver._core_plugin.get_bound_port_context(
context.get_admin_context(), port1['port']['id'])
seg_id = port_ctx.bottom_bound_segment['segmentation_id']
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1',
seg_id, bd_name=ptg1['l2_policy_id'])
# bind second port
mgr.ensure_path_created_for_port.reset_mock()
port2 = self._bind_port_to_host(port2['port']['id'], 'h1')
mgr.ensure_path_created_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1',
seg_id, bd_name=ptg1['l2_policy_id'])
# delete second port
res = self.new_delete_request('ports', port2['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_not_called()
# delete first port
mgr.ensure_path_deleted_for_port.reset_mock()
res = self.new_delete_request('ports', port1['port']['id'],
self.fmt).get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
mgr.ensure_path_deleted_for_port.assert_called_once_with(
ptg1['tenant_id'], 'Shd-%s' % ptg1['l2_policy_id'], 'h1')
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments):
self._network = network
self._segments = segments
self._plugin_context = mock.Mock()
@property
def current(self):
return self._network
@property
def network_segments(self):
return self._segments
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, network):
self._port = port
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_ports.return_value = []
if network.network_segments:
self._bound_segment = network.network_segments[0]
else:
self._bound_segment = None
self.current = self._port
self.original = self._port
self.network = self._network
self.top_bound_segment = self._bound_segment
self.bottom_bound_segment = self._bound_segment
self.host = self._port.get(portbindings.HOST_ID)
self.original_host = None
self._binding = mock.Mock()
self._binding.segment = self._bound_segment
def set_binding(self, segment_id, vif_type, cap_port_filter):
pass
class TestPolicyTargetDvs(ApicMappingTestCase):
def setUp(self):
super(TestPolicyTargetDvs, self).setUp()
self.driver.apic_manager.app_profile_name = mocked.APIC_AP
plugin = manager.NeutronManager.get_plugin()
self.ml2 = plugin.mechanism_manager.mech_drivers['apic_gbp'].obj
self.ml2._dvs_notifier = mock.MagicMock()
self.ml2.dvs_notifier.bind_port_call = mock.Mock(
return_value=BOOKED_PORT_VALUE)
mapper = self.driver.name_mapper
mapper.name_mapper.policy_taget_group.return_value = 'ptg1'
def _verify_dvs_notifier(self, notifier, port, host):
# can't use getattr() with mock, so use eval instead
try:
dvs_mock = eval('self.ml2.dvs_notifier.' + notifier)
except Exception:
self.assertTrue(False,
"The method " + notifier + " was not called")
return
self.assertTrue(dvs_mock.called)
a1, a2, a3, a4 = dvs_mock.call_args[0]
self.assertEqual(a1['id'], port['id'])
self.assertEqual(a2['id'], port['id'])
self.assertEqual(a4, host)
def _pg_name(self, project, profile, network):
return (str(project) + '|' + str(profile) + '|' + network)
def test_bind_port_dvs(self):
self.agent_conf = AGENT_CONF_DVS
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(ptg['tenant_id'], mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
def test_bind_port_dvs_with_opflex_different_hosts(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
self.agent_conf = AGENT_CONF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_port_to_host(pt2['port_id'], 'h2')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self.agent_conf = AGENT_CONF_DVS
self.ml2._dvs_notifier.reset_mock()
newp1 = self._bind_port_to_host(pt1['port_id'], 'h2')
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(ptg['tenant_id'], mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h2')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h2')
def test_bind_ports_opflex_same_host(self):
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt1['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.ml2.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
self.ml2.dvs_notifier.reset_mock()
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_port_to_host(pt2['port_id'], 'h1')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_ports_dvs_with_opflex_same_host(self):
self.agent_conf = AGENT_CONF_DVS
l3p_fake = self.create_l3_policy(name='myl3')['l3_policy']
l2p_fake = self.create_l2_policy(
name='myl2', l3_policy_id=l3p_fake['id'])['l2_policy']
ptg = self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_fake['id'])['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt1['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
self.ml2.dvs_notifier.reset_mock()
self.agent_conf = AGENT_CONF
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp2 = self._bind_dhcp_port_to_host(pt2['port_id'], 'h1')
vif_details = newp2['port']['binding:vif_details']
self.assertIsNone(vif_details.get('dvs_port_group_name'))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.ml2.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
dvs_mock = self.ml2.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_port_dvs_shared(self):
self.agent_conf = AGENT_CONF_DVS
ptg = self.create_policy_target_group(shared=True,
name="ptg1")['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
newp1 = self._bind_port_to_host(pt['port_id'], 'h1')
vif_details = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_details.get('dvs_port_group_name'))
pg = self._pg_name(amap.apic_manager.TENANT_COMMON,
mocked.APIC_AP, ptg['name'])
self.assertEqual(pg, vif_details.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call',
newp1['port'], 'h1')
net_ctx = FakeNetworkContext(mock.Mock(), [mock.Mock()])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.ml2.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', newp1['port'], 'h1')
class TestPolicyTargetGroup(ApicMappingTestCase):
def _test_policy_target_group_created_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(tenant, ptg['id'], bd_name=ptg['l2_policy_id'],
bd_owner=tenant),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
bd_name=ptg['l2_policy_id'], bd_owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_epg_created.call_args_list)
def test_policy_target_group_created_on_apic(self):
self._test_policy_target_group_created_on_apic()
def test_policy_target_group_created_on_apic_shared(self):
self._test_policy_target_group_created_on_apic(shared=True)
def _test_ptg_policy_rule_set_created(self, provider=True, shared=False):
cntr = self.create_policy_rule_set(name='c',
shared=shared)['policy_rule_set']
l2p = self.create_l2_policy()['l2_policy']
mgr = self.driver.apic_manager
mgr.set_contract_for_epg.reset_mock()
if provider:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
provided_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
else:
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
consumed_policy_rule_sets={cntr['id']: 'scope'})[
'policy_target_group']
# Verify that the apic call is issued
ct_owner = self.common_tenant if shared else cntr['tenant_id']
expected_calls = [
mock.call(
ptg['tenant_id'], ptg['id'], cntr['id'],
transaction=mock.ANY, contract_owner=ct_owner,
provider=provider),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.SERVICE_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=True),
mock.call(
ptg['tenant_id'], ptg['id'],
amap.IMPLICIT_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY, contract_owner=ptg['tenant_id'],
provider=False)]
self._check_call_list(expected_calls,
mgr.set_contract_for_epg.call_args_list)
def _test_ptg_policy_rule_set_updated(self, provider=True, shared=False):
p_or_c = {True: 'provided_policy_rule_sets',
False: 'consumed_policy_rule_sets'}
cntr = self.create_policy_rule_set(
name='c1', shared=shared)['policy_rule_set']
new_cntr = self.create_policy_rule_set(
name='c2', shared=shared)['policy_rule_set']
if provider:
ptg = self.create_policy_target_group(
provided_policy_rule_sets={cntr['id']: 'scope'})
else:
ptg = self.create_policy_target_group(
consumed_policy_rule_sets={cntr['id']: 'scope'})
data = {'policy_target_group': {p_or_c[provider]:
{new_cntr['id']: 'scope'}}}
req = self.new_update_request('policy_target_groups', data,
ptg['policy_target_group']['id'],
self.fmt)
ptg = self.deserialize(self.fmt, req.get_response(self.ext_api))
ptg = ptg['policy_target_group']
mgr = self.driver.apic_manager
ct_owner = self.common_tenant if shared else cntr['tenant_id']
mgr.set_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], new_cntr['id'],
contract_owner=ct_owner, transaction=mock.ANY,
provider=provider)
mgr.unset_contract_for_epg.assert_called_with(
ptg['tenant_id'], ptg['id'], cntr['id'],
contract_owner=ct_owner,
transaction=mock.ANY, provider=provider)
def test_ptg_policy_rule_set_provider_created(self):
self._test_ptg_policy_rule_set_created()
def test_ptg_policy_rule_set_provider_updated(self):
self._test_ptg_policy_rule_set_updated()
def test_ptg_policy_rule_set_consumer_created(self):
self._test_ptg_policy_rule_set_created(False)
def test_ptg_policy_rule_set_consumer_updated(self):
self._test_ptg_policy_rule_set_updated(False)
def test_ptg_policy_rule_set_provider_created_shared(self):
self._test_ptg_policy_rule_set_created(shared=True)
def test_ptg_policy_rule_set_provider_updated_shared(self):
self._test_ptg_policy_rule_set_updated(shared=True)
def test_ptg_policy_rule_set_consumer_created_shared(self):
self._test_ptg_policy_rule_set_created(False, shared=True)
def test_ptg_policy_rule_set_consumer_updated_shared(self):
self._test_ptg_policy_rule_set_updated(False, shared=True)
def _test_policy_target_group_deleted_on_apic(self, shared=False):
ptg = self.create_policy_target_group(
name="ptg1", shared=shared)['policy_target_group']
req = self.new_delete_request('policy_target_groups',
ptg['id'], self.fmt)
req.get_response(self.ext_api)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
expected_calls = [
mock.call(tenant, ptg['id']),
mock.call(tenant, amap.SHADOW_PREFIX + ptg['l2_policy_id'],
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_epg_for_network.call_args_list)
def test_policy_target_group_deleted_on_apic(self):
self._test_policy_target_group_deleted_on_apic()
def test_policy_target_group_deleted_on_apic_shared(self):
self._test_policy_target_group_deleted_on_apic(shared=True)
def _test_policy_target_group_subnet_created_on_apic(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
mgr = self.driver.apic_manager
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_created_on_apic(self):
self._test_policy_target_group_subnet_created_on_apic()
def test_policy_target_group_subnet_created_on_apic_shared(self):
self._test_policy_target_group_subnet_created_on_apic(shared=True)
def _test_policy_target_group_subnet_added(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
l2p = self._get_object('l2_policies', ptg['l2_policy_id'],
self.ext_api)
network = self._get_object('networks', l2p['l2_policy']['network_id'],
self.api)
with self.subnet(network=network, cidr='10.0.1.0/24') as subnet:
data = {'policy_target_group':
{'subnets': ptg['subnets'] + [subnet['subnet']['id']]}}
mgr = self.driver.apic_manager
self.new_update_request('policy_target_groups', data, ptg['id'],
self.fmt).get_response(self.ext_api)
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.1.1/24',
transaction=mock.ANY)
def test_policy_target_group_subnet_added(self):
self._test_policy_target_group_subnet_added()
def test_policy_target_group_subnet_added_shared(self):
self._test_policy_target_group_subnet_added(shared=True)
def _test_process_subnet_update(self, shared=False):
ptg = self._create_explicit_subnet_ptg('10.0.0.0/24', shared=shared)
subnet = self._get_object('subnets', ptg['subnets'][0], self.api)
subnet2 = copy.deepcopy(subnet)
subnet2['subnet']['gateway_ip'] = '10.0.0.254'
mgr = self.driver.apic_manager
mgr.reset_mock()
self.driver.process_subnet_changed(context.get_admin_context(),
subnet['subnet'], subnet2['subnet'])
tenant = self.common_tenant if shared else ptg['tenant_id']
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
tenant, ptg['l2_policy_id'], '10.0.0.254/24',
transaction=mock.ANY)
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
tenant, ptg['l2_policy_id'], '10.0.0.1/24',
transaction=mock.ANY)
def test_process_subnet_update(self):
self._test_process_subnet_update()
def test_process_subnet_update_shared(self):
self._test_process_subnet_update(shared=True)
def test_multiple_ptg_per_l2p(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg1['subnets'], ptg2['subnets'])
def test_force_add_subnet(self):
l2p = self.create_l2_policy()['l2_policy']
# Create first PTG
ptg1 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
ctx = p_context.PolicyTargetGroupContext(
self.driver.gbp_plugin, context.get_admin_context(), ptg2)
# Emulate force add
self.driver._use_implicit_subnet(ctx, force_add=True)
# There now a new subnet, and it's added to both the PTGs
self.assertEqual(2, len(ctx.current['subnets']))
ptg1 = self.show_policy_target_group(ptg1['id'])['policy_target_group']
self.assertEqual(2, len(ptg1['subnets']))
ptg2 = self.show_policy_target_group(ptg2['id'])['policy_target_group']
self.assertEqual(2, len(ptg2['subnets']))
self.assertEqual(set(ptg1['subnets']), set(ptg2['subnets']))
self.assertNotEqual(ptg2['subnets'][0], ptg2['subnets'][1])
def test_subnets_unique_per_l3p(self):
l3p = self.create_l3_policy(shared=True, tenant_id='admin',
is_admin_context=True)['l3_policy']
l2p1 = self.create_l2_policy(
tenant_id='hr', l3_policy_id=l3p['id'])['l2_policy']
l2p2 = self.create_l2_policy(
tenant_id='eng', l3_policy_id=l3p['id'])['l2_policy']
ptg1 = self.create_policy_target_group(
tenant_id='hr', l2_policy_id=l2p1['id'])['policy_target_group']
ptg2 = self.create_policy_target_group(
tenant_id='eng', l2_policy_id=l2p2['id'])['policy_target_group']
sub_ptg_1 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg1['subnets'])
sub_ptg_2 = set(self._get_object('subnets',
x, self.api)['subnet']['cidr']
for x in ptg2['subnets'])
self.assertNotEqual(sub_ptg_1, sub_ptg_2)
self.assertFalse(sub_ptg_1 & sub_ptg_2)
def test_preexisting_l2p_no_service_contracts(self):
# Circumvent name validation
self.driver.name_mapper.has_valid_name = (
self.driver.name_mapper._is_apic_reference)
self.driver.name_mapper.tenant = mock.Mock(
return_value=self._tenant_id)
self.driver.name_mapper.dn_manager.decompose_bridge_domain = mock.Mock(
return_value=['preexisting'])
self.driver._configure_epg_service_contract = mock.Mock()
self.driver._configure_epg_implicit_contract = mock.Mock()
l2p = self.create_l2_policy(name='apic:preexisting')['l2_policy']
self.create_policy_target_group(l2_policy_id=l2p['id'])
self.assertFalse(self.driver._configure_epg_service_contract.called)
self.assertFalse(self.driver._configure_epg_implicit_contract.called)
# Use non-preexisting L2P
self.create_policy_target_group()
self.assertTrue(self.driver._configure_epg_service_contract.called)
self.assertTrue(self.driver._configure_epg_implicit_contract.called)
def _create_explicit_subnet_ptg(self, cidr, shared=False, alloc_pool=None):
l2p = self.create_l2_policy(name="l2p", shared=shared)
l2p_id = l2p['l2_policy']['id']
network_id = l2p['l2_policy']['network_id']
network = self._get_object('networks', network_id, self.api)
pool = alloc_pool or [{'start': '10.0.0.2', 'end': '10.0.0.250'}]
with self.subnet(network=network, cidr=cidr,
allocation_pools=pool):
# The subnet creation in the proper network causes the subnet ID
# to be added to the PTG
return self.create_policy_target_group(
name="ptg1", l2_policy_id=l2p_id,
shared=shared)['policy_target_group']
class TestPolicyTargetGroupVlanNetwork(ApicMappingVlanTestCase,
TestPolicyTargetGroup):
def _test_shadow_network(self, shared):
ptg1 = self.create_policy_target_group(
name='ptg1', shared=shared)['policy_target_group']
l2p = self.show_l2_policy(ptg1['l2_policy_id'])['l2_policy']
net = self._get_object('networks', l2p['network_id'],
self.api)['network']
subnet1 = self._get_object('subnets', net['subnets'][0],
self.api)['subnet']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
self.assertIsNotNone(shadow_net1)
self.assertEqual(ptg1['tenant_id'], shadow_net1['tenant_id'])
self.assertEqual(shared, shadow_net1['shared'])
self.assertEqual(1, len(shadow_net1['subnets']))
shadow_subnet1 = self._get_object('subnets',
shadow_net1['subnets'][0], self.api)['subnet']
self.assertEqual(subnet1['cidr'], shadow_subnet1['cidr'])
self.assertEqual(ptg1['tenant_id'], shadow_subnet1['tenant_id'])
self.delete_policy_target_group(ptg1['id'])
self._get_object('subnets', shadow_subnet1['id'], self.api,
expected_res_status=404)
self._get_object('networks', shadow_net1['id'], self.api,
expected_res_status=404)
def test_shadow_network(self):
self._test_shadow_network(False)
def test_shadow_network_shared(self):
self._test_shadow_network(True)
def _test_shadow_subnet(self, shared):
ptg1 = self.create_policy_target_group(
name='ptg1', shared=shared)['policy_target_group']
l2p = self.show_l2_policy(ptg1['l2_policy_id'])['l2_policy']
net = self._get_object('networks', l2p['network_id'],
self.api)['network']
subnet1 = self._get_object('subnets', net['subnets'][0],
self.api)['subnet']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
with self.subnet(cidr='20.0.0.0/26',
network={'network': net}) as subnet2:
subnet2 = subnet2['subnet']
shadow_subnets = self._list_resource(
'subnets', self.api, network_id=shadow_net1['id'])['subnets']
shadow_subnets = sorted(shadow_subnets, key=lambda x: x['cidr'])
self.assertEqual(2, len(shadow_subnets))
self.assertEqual(subnet1['cidr'], shadow_subnets[0]['cidr'])
self.assertEqual(subnet2['cidr'], shadow_subnets[1]['cidr'])
self.assertTrue(shadow_subnets[0]['enable_dhcp'])
self.assertTrue(shadow_subnets[1]['enable_dhcp'])
subnet1 = self._update_resource(subnet1['id'], 'subnet',
expected_res_status=200, api=self.api,
enable_dhcp=False)['subnet']
self.assertFalse(subnet1['enable_dhcp'])
shadow_subnets = self._list_resource(
'subnets', self.api, network_id=shadow_net1['id'])['subnets']
shadow_subnets = sorted(shadow_subnets, key=lambda x: x['cidr'])
self.assertFalse(shadow_subnets[0]['enable_dhcp'])
self.delete_policy_target_group(ptg1['id'])
shadow_subnets = self._list_resource('subnets', self.api,
network_id=shadow_net1['id'], expected_res_status=200)['subnets']
self.assertEqual([], shadow_subnets)
def test_shadow_subnet(self):
self._test_shadow_subnet(False)
def test_shadow_subnet_shared(self):
self._test_shadow_subnet(True)
def test_dhcp_port_disabled_in_shadow(self):
ptg1 = self.create_policy_target_group(
name='ptg1')['policy_target_group']
shadow_net1 = self._get_ptg_shadow_net(ptg1)
shadow_subnet1 = self._get_object('subnets',
shadow_net1['subnets'][0], self.api)
with self.port(subnet=shadow_subnet1,
device_owner='network:dhcp') as port:
port = self._get_object('ports', port['port']['id'], self.api)
self.assertFalse(port['port']['admin_state_up'])
self._update_resource(port['port']['id'], 'port',
expected_res_status=200, api=self.api,
admin_state_up=True)
port = self._get_object('ports', port['port']['id'], self.api)
self.assertFalse(port['port']['admin_state_up'])
class TestL2Policy(ApicMappingTestCase):
def _test_l2_policy_created_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_bd_created_on_apic.assert_called_once_with(
tenant, l2p['id'], ctx_owner=tenant, ctx_name=l2p['l3_policy_id'],
transaction=mock.ANY)
mgr.ensure_epg_created.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'], bd_owner=tenant,
bd_name=l2p['id'], transaction=mock.ANY)
def test_l2_policy_created_on_apic(self):
self._test_l2_policy_created_on_apic()
def test_l2_policy_created_on_apic_shared(self):
self._test_l2_policy_created_on_apic(shared=True)
def _test_l2_policy_deleted_on_apic(self, shared=False):
l2p = self.create_l2_policy(name="l2p", shared=shared)['l2_policy']
req = self.new_delete_request('l2_policies', l2p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l2p['tenant_id']
mgr = self.driver.apic_manager
mgr.delete_bd_on_apic.assert_called_once_with(
tenant, l2p['id'], transaction=mock.ANY)
mgr.delete_epg_for_network.assert_called_once_with(
tenant, amap.SHADOW_PREFIX + l2p['id'],
transaction=mock.ANY)
expected_calls = [
mock.call(amap.IMPLICIT_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY),
mock.call(amap.SERVICE_PREFIX + l2p['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.delete_contract.call_args_list)
def test_l2_policy_deleted_on_apic(self):
self._test_l2_policy_deleted_on_apic()
def test_l2_policy_deleted_on_apic_shared(self):
self._test_l2_policy_deleted_on_apic(shared=True)
def test_pre_existing_subnets_added(self):
with self.network() as net:
with self.subnet(network=net) as sub:
sub = sub['subnet']
l2p = self.create_l2_policy(
network_id=net['network']['id'])['l2_policy']
mgr = self.driver.apic_manager
mgr.ensure_subnet_created_on_apic.assert_called_with(
l2p['tenant_id'], l2p['id'],
sub['gateway_ip'] + '/' + sub['cidr'].split('/')[1],
transaction=mock.ANY)
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertEqual(ptg['subnets'], [sub['id']])
def test_reject_l3p_update(self):
l2p = self.create_l2_policy()['l2_policy']
new_l3p = self.create_l3_policy()['l3_policy']
res = self.update_l2_policy(l2p['id'], l3_policy_id=new_l3p['id'],
expected_res_status=400)
self.assertEqual('L3PolicyUpdateOfL2PolicyNotSupported',
res['NeutronError']['type'])
def test_subnet_deallocated(self):
l2p = self.create_l2_policy()['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
subnet = netaddr.IPSet(
[self._show_subnet(x)['subnet']['cidr'] for x in ptg['subnets']])
self.delete_policy_target_group(ptg['id'])
l2p2 = self.create_l2_policy()['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p2['id'])['policy_target_group']
subnet2 = netaddr.IPSet(
[self._show_subnet(x)['subnet']['cidr'] for x in ptg['subnets']])
self.assertFalse(subnet & subnet2)
class TestL3Policy(ApicMappingTestCase):
def _test_l3_policy_created_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_enforced.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_created_on_apic(self):
self._test_l3_policy_created_on_apic()
def test_l3_policy_created_on_apic_shared(self):
self._test_l3_policy_created_on_apic(shared=True)
def _test_l3_policy_deleted_on_apic(self, shared=False):
l3p = self.create_l3_policy(name="l3p", shared=shared)['l3_policy']
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
req.get_response(self.ext_api)
tenant = self.common_tenant if shared else l3p['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_context_deleted.assert_called_once_with(
tenant, l3p['id'])
def test_l3_policy_deleted_on_apic(self):
self._test_l3_policy_deleted_on_apic()
def test_l3_policy_deleted_on_apic_shared(self):
self._test_l3_policy_deleted_on_apic(shared=True)
def _test_multiple_l3_policy_per_es(self, shared_es=False):
# Verify 2 L3P can be created on same ES if NAT is enabled
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(name='supported',
cidr='192.168.0.0/24', shared=shared_es)['external_segment']
self.create_l3_policy(external_segments={es['id']: ['']},
expected_res_status=201)['l3_policy']
res = self.create_l3_policy(
external_segments={es['id']: ['']},
expected_res_status=201 if self.nat_enabled else 400)
if self.nat_enabled:
es = self.show_external_segment(es['id'])['external_segment']
self.assertEqual(2, len(es['l3_policies']))
else:
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify existing L3P updated to use used ES works if NAT is enabled
sneaky_l3p = self.create_l3_policy()['l3_policy']
self.update_l3_policy(
sneaky_l3p['id'],
expected_res_status=200 if self.nat_enabled else 400,
external_segments={es['id']: ['']})
if self.nat_enabled:
es = self.show_external_segment(es['id'])['external_segment']
self.assertEqual(3, len(es['l3_policies']))
else:
self.assertEqual('OnlyOneL3PolicyIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def test_multiple_l3_policy_per_es(self):
self._test_multiple_l3_policy_per_es(shared_es=False)
def test_multiple_l3_policy_per_es_shared(self):
self._test_multiple_l3_policy_per_es(shared_es=True)
def test_one_l3_policy_ip_on_es(self):
# Verify L3P created with more than 1 IP on ES fails
es = self.create_external_segment(
cidr='192.168.0.0/24')['external_segment']
res = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']},
expected_res_status=400)
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
# Verify L3P updated to more than 1 IP on ES fails
sneaky_l3p = self.create_l3_policy(
external_segments={es['id']: ['192.168.0.2']},
expected_res_status=201)['l3_policy']
res = self.update_l3_policy(
sneaky_l3p['id'], expected_res_status=400,
external_segments={es['id']: ['192.168.0.2', '192.168.0.3']})
self.assertEqual('OnlyOneAddressIsAllowedPerExternalSegment',
res['NeutronError']['type'])
def test_router_interface_no_gateway(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es['id']: ['169.254.0.42']},
expected_res_status=201)['l3_policy']
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual(1, len(l3p['routers']))
subnet = self._show_subnet(ptg['subnets'][0])['subnet']
router_ports = self._list(
'ports',
query_params='device_id=%s' % l3p['routers'][0])['ports']
self.assertEqual(2, len(router_ports))
for port in router_ports:
self.assertEqual(1, len(port['fixed_ips']))
self.assertNotEqual(subnet['gateway_ip'],
port['fixed_ips'][0]['ip_address'])
# One of the two ports is in subnet
self.assertNotEqual(router_ports[0]['fixed_ips'][0]['subnet_id'],
router_ports[1]['fixed_ips'][0]['subnet_id'])
self.assertTrue(
router_ports[0]['fixed_ips'][0]['subnet_id'] == subnet['id'] or
router_ports[1]['fixed_ips'][0]['subnet_id'] == subnet['id'])
def _wrap_up_l3out_request(self, l3out_str, l3p_id, es_id, l3p_owner):
# try to simulate what the implementation does here also for UT purpose
request = {}
request['children'] = self.trimmed_l3out
request['attributes'] = {'rn': u'Shd-Sub'}
final_req = {}
final_req['l3extOut'] = request
final_req = jsonutils.dumps(final_req)
final_req = re.sub('Shd-Sub',
l3out_str % (l3p_id, es_id), final_req)
final_req = re.sub('test-tenant', l3p_owner, final_req)
final_req = re.sub('{},*', '', final_req)
return final_req
def _test_l3p_plugged_to_es_at_creation(self, shared_es,
shared_l3p, is_edge_nat=False):
# Verify L3P is correctly plugged to ES on APIC during create
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
owner = self.common_tenant if shared_es else es['tenant_id']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
mgr.set_contract_for_epg.reset_mock()
l3p = self.create_l3_policy(
name='myl3p',
shared=shared_l3p,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
self.assertEqual(1, len(l3p['external_segments'][es['id']]))
self.assertEqual('169.254.0.2', l3p['external_segments'][es['id']][0])
expected_epg_calls = []
expected_contract_calls = []
expected_nat_epg_tenant = owner
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=owner,
transaction=mock.ANY))
expected_contract_calls.extend([
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], transaction=mock.ANY),
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], provider=True,
transaction=mock.ANY)])
expected_nat_epg_tenant = l3p['tenant_id']
self._check_call_list(expected_epg_calls,
mgr.ensure_epg_created.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_epg.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((expected_nat_epg_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p))
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
call_name = mgr.ensure_external_routed_network_created
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
if self.nat_enabled:
expected_l3out_calls = []
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es['id'], owner=owner,
context="NAT-vrf-%s" % es['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
else:
call_name = mgr.set_context_for_external_routed_network
expected_l3out_calls = [
mock.call(APIC_PRE_L3OUT_TENANT, es['name'], l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls, call_name.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.0.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.0.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=l3p_owner, subnet='0.0.0.0/0',
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=l3p_owner, subnet='128.0.0.0/16',
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es['id']),
transaction=mock.ANY) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY) for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_creation_1(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_2(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_creation_3(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=False,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_1(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_2(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_edge_nat_mode_3(self):
self._test_l3p_plugged_to_es_at_creation(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_creation_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_creation_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_creation(shared_es=True,
shared_l3p=True)
def _test_l3p_plugged_to_es_at_update(self, shared_es,
shared_l3p, is_edge_nat=False):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
l3p = self.create_l3_policy(
name='myl3p',
expected_res_status=201,
tenant_id=es['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p)['l3_policy']
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
mgr.set_contract_for_epg.reset_mock()
# update L3P with ES
l3p = self.update_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'],
external_segments={es['id']: []},
expected_res_status=200)['l3_policy']
self.assertEqual(1, len(l3p['external_segments'][es['id']]))
self.assertEqual('169.254.0.2', l3p['external_segments'][es['id']][0])
owner = self.common_tenant if shared_es else es['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_l3out_calls = []
call_name = mgr.ensure_external_routed_network_created
if self.nat_enabled:
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es['id'], owner=owner,
context="NAT-vrf-%s" % es['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
else:
call_name = mgr.set_context_for_external_routed_network
expected_l3out_calls = [
mock.call(APIC_PRE_L3OUT_TENANT, es['name'], l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls, call_name.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.0.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
expected_route_calls = [
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.254', owner=owner, subnet='0.0.0.0/0',
transaction=mock.ANY),
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=owner, subnet='128.0.0.0/16',
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.0.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.254',
owner=l3p_owner, subnet='0.0.0.0/0',
transaction=mock.ANY))
expected_route_calls.append(
mock.call(l3out_str % (l3p['id'], es['id']),
mocked.APIC_EXT_SWITCH, '192.168.0.1',
owner=l3p_owner, subnet='128.0.0.0/16',
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
self._check_call_list(expected_route_calls,
mgr.ensure_static_route_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es['id'],
es['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es['id'])
) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
expected_epg_calls = []
expected_contract_calls = []
expected_nat_epg_tenant = owner
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=owner,
transaction=mock.ANY))
expected_contract_calls.extend([
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], transaction=mock.ANY),
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es['id'],
"NAT-allow-%s" % es['id'], provider=True,
transaction=mock.ANY)])
expected_nat_epg_tenant = l3p['tenant_id']
self._check_call_list(expected_epg_calls,
mgr.ensure_epg_created.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_epg.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((expected_nat_epg_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_plugged_to_es_at_update_1(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_2(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True)
def test_l3p_plugged_to_es_at_update_3(self):
self._test_l3p_plugged_to_es_at_update(shared_es=False,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_1(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_2(self):
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_edge_nat_mode_3(self):
self._test_l3p_plugged_to_es_at_update(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_plugged_to_es_at_update_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=False)
def test_l3p_plugged_to_es_at_update_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_plugged_to_es_at_update(shared_es=True,
shared_l3p=True)
def _test_l3p_unplugged_from_es_on_delete(self, shared_es,
shared_l3p, is_edge_nat=False):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')],
is_edge_nat)
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es, name='supported2',
cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es1['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
mgr = self.driver.apic_manager
mgr.set_context_for_external_routed_network.reset_mock()
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
owner = self.common_tenant if shared_es else es1['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.append(
mock.call(es1['id'], owner=owner, transaction=mock.ANY))
if self.nat_enabled:
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls.append(
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.nat_enabled:
mgr.unset_l3out_for_bd.assert_called_once_with(owner,
"NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'], transaction=mock.ANY)
if self.pre_l3out and not self.nat_enabled:
call_name = mgr.set_context_for_external_routed_network
call_name.assert_called_once_with(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY)
if is_edge_nat and self.nat_enabled:
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
es1['name'], l3p['id'])
mgr.delete_external_routed_network.reset_mock()
mgr.unset_l3out_for_bd.reset_mock()
self.driver.l3out_vlan_alloc.release_vlan.reset_mock()
expected_epg_calls = []
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es1['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((owner, "NAT-epg-%s" % es1['id']),
self.driver._determine_nat_epg_for_es(ctx, es1, l3p))
# Verify correct deletion for 2 ESs
l3p = self.create_l3_policy(
shared=shared_l3p,
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
external_segments={es1['id']: ['169.254.0.3'],
es2['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
mgr.set_context_for_external_routed_network.reset_mock()
mgr.delete_epg_for_network.reset_mock()
req = self.new_delete_request('l3_policies', l3p['id'], self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.extend([
mock.call(es1['id'], owner=owner, transaction=mock.ANY),
mock.call(es2['id'], owner=owner, transaction=mock.ANY)])
if self.nat_enabled:
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls.extend([
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY),
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY)])
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.nat_enabled:
expected_unset_calls = [
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY),
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY)]
self._check_call_list(
expected_unset_calls, mgr.unset_l3out_for_bd.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], None, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
if is_edge_nat and self.nat_enabled:
expected_release_vlan_calls = [mock.call(es1['name'], l3p['id']),
mock.call(es2['name'], l3p['id'])]
self._check_call_list(
expected_release_vlan_calls,
self.driver.l3out_vlan_alloc.release_vlan.call_args_list)
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es2['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
self.assertEqual((owner, "NAT-epg-%s" % es2['id']),
self.driver._determine_nat_epg_for_es(ctx, es2, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_delete_1(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_2(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_delete_3(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=False,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_1(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_2(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_edge_nat_mode_3(self):
self._test_l3p_unplugged_from_es_on_delete(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_delete_ptne_1(self):
self.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_delete_ptne_2(self):
self.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_delete(shared_es=True,
shared_l3p=True)
def _test_l3p_unplugged_from_es_on_update(self, shared_es,
shared_l3p, is_edge_nat=False):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported', '192.168.1.2/24')],
is_edge_nat)
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
es2 = self.create_external_segment(
shared=shared_es,
name='supported', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
name='myl3p',
tenant_id=es1['tenant_id'] if not shared_es else 'another_tenant',
shared=shared_l3p,
external_segments={es1['id']: ['169.254.0.3']},
expected_res_status=201)['l3_policy']
l2ps = [self.create_l2_policy(name='myl2p-%s' % x,
tenant_id=l3p['tenant_id'],
shared=shared_l3p,
l3_policy_id=l3p['id'])['l2_policy']
for x in range(0, 3)]
mgr = self.driver.apic_manager
owner = self.common_tenant if shared_es else es1['tenant_id']
l3p_owner = self.common_tenant if shared_l3p else l3p['tenant_id']
mgr.ensure_external_routed_network_created.reset_mock()
mgr.set_domain_for_external_routed_network.reset_mock()
mgr.ensure_logical_node_profile_created.reset_mock()
mgr.ensure_static_route_created.reset_mock()
self.driver.l3out_vlan_alloc.reserve_vlan.reset_mock()
mgr.apic.post_body.reset_mock()
mgr.set_context_for_external_routed_network.reset_mock()
mgr.set_l3out_for_bd.reset_mock()
l3p = self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'], expected_res_status=200,
external_segments={es2['id']: ['169.254.0.4']})['l3_policy']
l3out_str = "Shd-%s-%s"
if is_edge_nat:
l3out_str = "Auto-%s-%s"
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.append(
mock.call(es1['id'], owner=owner, transaction=mock.ANY))
if self.nat_enabled:
expected_delete_calls.append(
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], l3p['id'], transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
expected_unset_l3out_for_bd_calls = []
if self.nat_enabled:
if is_edge_nat:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es1['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
else:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es1['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_unset_l3out_for_bd_calls,
mgr.unset_l3out_for_bd.call_args_list)
if is_edge_nat and self.nat_enabled:
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
es1['name'], l3p['id'])
expected_l3out_calls = []
if self.nat_enabled:
if not is_edge_nat or not self.pre_l3out:
expected_l3out_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, context=l3p['id'],
transaction=mock.ANY))
if not self.pre_l3out:
expected_l3out_calls.append(
mock.call(es2['id'], owner=owner,
context="NAT-vrf-%s" % es2['id'],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_l3out_calls = [
mock.call(es2['id'], owner=owner, context=l3p['id'],
transaction=mock.ANY)]
self._check_call_list(expected_l3out_calls,
mgr.ensure_external_routed_network_created.call_args_list)
if is_edge_nat and self.nat_enabled:
(self.driver.l3out_vlan_alloc.
reserve_vlan.assert_called_once_with(
es2['name'], l3p['id']))
if not self.pre_l3out:
expected_set_domain_calls = [
mock.call(es2['id'], owner=owner, transaction=mock.ANY)]
expected_logic_node_calls = [
mock.call(es2['id'], mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, '192.168.1.2/24',
owner=owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY)]
if is_edge_nat and self.nat_enabled:
expected_set_domain_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY))
expected_logic_node_calls.append(
mock.call(l3out_str % (l3p['id'], es2['id']),
mocked.APIC_EXT_SWITCH, mocked.APIC_EXT_MODULE,
mocked.APIC_EXT_PORT, mock.ANY, '192.168.1.2/24',
owner=l3p_owner, router_id=APIC_EXTERNAL_RID,
transaction=mock.ANY))
self._check_call_list(expected_set_domain_calls,
mgr.set_domain_for_external_routed_network.call_args_list)
self._check_call_list(expected_logic_node_calls,
mgr.ensure_logical_node_profile_created.call_args_list)
else:
if is_edge_nat and self.nat_enabled:
final_req = self._wrap_up_l3out_request(l3out_str,
l3p['id'], es2['id'],
l3p_owner)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, l3p_owner,
l3out_str % (l3p['id'], es2['id']))
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_set_l3out_for_bd_calls = []
if self.nat_enabled:
expected_set_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
if is_edge_nat:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es2['id'])
) for l2p in l2ps])
else:
expected_set_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es2['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_set_l3out_for_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
expected_epg_calls = []
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es1['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
ctx = context.get_admin_context()
ctx._plugin_context = ctx
self.assertEqual((owner, "NAT-epg-%s" % es1['id']),
self.driver._determine_nat_epg_for_es(ctx, es1, l3p))
self.driver.l3out_vlan_alloc.release_vlan.reset_mock()
mgr.delete_external_routed_network.reset_mock()
mgr.unset_l3out_for_bd.reset_mock()
self.update_l3_policy(
l3p['id'], expected_res_status=200, tenant_id=l3p['tenant_id'],
external_segments={es1['id']: ['169.254.0.5'],
es2['id']: ['169.254.0.6']})
mgr.set_context_for_external_routed_network.reset_mock()
mgr.delete_epg_for_network.reset_mock()
self.update_l3_policy(
l3p['id'], tenant_id=l3p['tenant_id'],
expected_res_status=200, external_segments={})
expected_delete_calls = []
if not self.pre_l3out:
expected_delete_calls.extend([
mock.call(es1['id'], owner=owner, transaction=mock.ANY),
mock.call(es2['id'], owner=owner, transaction=mock.ANY)])
if self.nat_enabled:
expected_delete_calls.extend([
mock.call(l3out_str % (l3p['id'], es1['id']),
owner=l3p_owner, transaction=mock.ANY),
mock.call(l3out_str % (l3p['id'], es2['id']),
owner=l3p_owner, transaction=mock.ANY)])
self._check_call_list(
expected_delete_calls,
mgr.delete_external_routed_network.call_args_list)
expected_unset_l3out_for_bd_calls = []
if self.nat_enabled:
if is_edge_nat:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es1['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
l3out_str % (l3p['id'], es2['id'])
) for l2p in l2ps])
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es1['id'],
es1['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
expected_unset_l3out_for_bd_calls.append(
mock.call(owner, "NAT-bd-%s" % es2['id'],
es2['name' if self.pre_l3out else 'id'],
transaction=mock.ANY))
else:
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es1['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
expected_unset_l3out_for_bd_calls.extend([
mock.call(l3p_owner, l2p['id'],
es2['name' if self.pre_l3out else 'id'])
for l2p in l2ps])
self._check_call_list(expected_unset_l3out_for_bd_calls,
mgr.unset_l3out_for_bd.call_args_list)
if self.pre_l3out and not self.nat_enabled:
expected_calls = [
mock.call(APIC_PRE_L3OUT_TENANT,
es1['name'], None, transaction=mock.ANY),
mock.call(APIC_PRE_L3OUT_TENANT,
es2['name'], None, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_context_for_external_routed_network.call_args_list)
if is_edge_nat and self.nat_enabled:
expected_release_vlan_calls = [mock.call(es1['name'], l3p['id']),
mock.call(es2['name'], l3p['id'])]
self._check_call_list(
expected_release_vlan_calls,
self.driver.l3out_vlan_alloc.release_vlan.call_args_list)
if self.nat_enabled and shared_es and self.driver.per_tenant_nat_epg:
expected_epg_calls.append(
mock.call(l3p['tenant_id'], "NAT-epg-%s" % es2['id'],
transaction=mock.ANY))
self._check_call_list(expected_epg_calls,
mgr.delete_epg_for_network.call_args_list)
self.assertEqual((owner, "NAT-epg-%s" % es2['id']),
self.driver._determine_nat_epg_for_es(ctx, es2, l3p))
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_l3p_unplugged_from_es_on_update_1(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_2(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True)
def test_l3p_unplugged_from_es_on_update_3(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=False,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_1(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_2(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_edge_nat_mode_3(self):
self._test_l3p_unplugged_from_es_on_update(shared_es=False,
shared_l3p=False,
is_edge_nat=True)
def test_l3p_unplugged_from_es_on_update_ptne_1(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=False)
def test_l3p_unplugged_from_es_on_update_ptne_2(self):
self.driver.per_tenant_nat_epg = True
self._test_l3p_unplugged_from_es_on_update(shared_es=True,
shared_l3p=True)
def test_verify_unsupported_es_noop(self):
# Verify L3P is correctly plugged to ES on APIC during update
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24')['external_segment']
self.create_l3_policy(
external_segments={es['id']: ['192.168.0.3']},
expected_res_status=201)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
def test_l3p_external_address(self):
# Verify auto allocated IP address is assigned to L3P when no
# explicit address is configured
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(
name='supported1', cidr='192.168.0.0/24')['external_segment']
es2 = self.create_external_segment(
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: []},
expected_res_status=201)['l3_policy']
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es1['id']])
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: [], es2['id']: []})['l3_policy']
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es1['id']])
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es2['id']])
# Address IP changed
l3p = self.update_l3_policy(
l3p['id'], expected_res_status=200,
external_segments={es1['id']: ['169.254.0.3'],
es2['id']: []})['l3_policy']
self.assertEqual(['169.254.0.3'], l3p['external_segments'][es1['id']])
self.assertEqual(['169.254.0.2'], l3p['external_segments'][es2['id']])
def _test_multi_es_with_ptg(self, shared_es):
self._mock_external_dict([('supported1', '192.168.0.2/24'),
('supported2', '192.168.1.2/24')])
es1 = self.create_external_segment(shared=shared_es,
name='supported1', cidr='192.168.0.0/24')['external_segment']
es2 = self.create_external_segment(shared=shared_es,
name='supported2', cidr='192.168.1.0/24')['external_segment']
l3p = self.create_l3_policy(
external_segments={es1['id']: [], es2['id']: []},
expected_res_status=201)['l3_policy']
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(name="ptg",
l2_policy_id=l2p['id'],
expected_res_status=201)['policy_target_group']
res = self.new_delete_request('policy_target_groups', ptg['id'],
self.fmt).get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
def test_multi_es_with_ptg_1(self):
self._test_multi_es_with_ptg(False)
def test_multi_es_with_ptg_2(self):
self._test_multi_es_with_ptg(True)
def test_multi_l3p_ptne(self):
self.driver.per_tenant_nat_epg = True
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', shared=True)['external_segment']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
l3ps = []
for x in range(0, 3 if self.nat_enabled else 1):
l3ps.append(self.create_l3_policy(
name='myl3p-%s' % x, tenant_id='another_tenant',
external_segments={es['id']: []},
expected_res_status=201)['l3_policy'])
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'another_tenant', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'],
bd_owner=self.common_tenant, transaction=mock.ANY)
else:
mgr.ensure_epg_created.assert_not_called()
for l3p in l3ps[:-1]:
self.delete_l3_policy(l3p['id'], tenant_id=l3p['tenant_id'])
mgr.delete_epg_for_network.assert_not_called()
self.delete_l3_policy(l3ps[-1]['id'], tenant_id=l3ps[-1]['tenant_id'])
if self.nat_enabled:
mgr.delete_epg_for_network.assert_called_once_with(
'another_tenant', "NAT-epg-%s" % es['id'],
transaction=mock.ANY)
else:
mgr.delete_epg_for_network.assert_not_called()
def test_ptne_upgrade(self):
# Simulate "upgrade" - tenants existing before upgrade should
# continue using non-specific NAT EPG where as new ones use
# specific NAT EPGs
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', shared=True)['external_segment']
mgr = self.driver.apic_manager
mgr.ensure_epg_created.reset_mock()
ctx = context.get_admin_context()
ctx._plugin_context = ctx
l3p_a_1 = self.create_l3_policy(
name='myl3p-a-1', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
mgr.ensure_epg_created.assert_not_called()
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_1))
# "Upgrade" and change to per-tenant NAT EPG
self.driver.per_tenant_nat_epg = True
if self.nat_enabled:
l3p_a_2 = self.create_l3_policy(
name='myl3p-a-2', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
mgr.ensure_epg_created.assert_not_called()
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_2))
self.delete_l3_policy(l3p_a_2['id'],
tenant_id=l3p_a_2['tenant_id'])
self.assertEqual((self.common_tenant, "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_1))
self.delete_l3_policy(l3p_a_1['id'], tenant_id=l3p_a_1['tenant_id'])
mgr.delete_epg_for_network.assert_not_called()
l3p_a_3 = self.create_l3_policy(
name='myl3p-a-3', tenant_id='tenant_a',
external_segments={es['id']: []})['l3_policy']
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'tenant_a', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=self.common_tenant,
transaction=mock.ANY)
self.assertEqual(('tenant_a', "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_a_3))
else:
mgr.ensure_epg_created.assert_not_called()
self.delete_l3_policy(l3p_a_3['id'], tenant_id=l3p_a_3['tenant_id'])
mgr.ensure_epg_created.reset_mock()
l3p_b_1 = self.create_l3_policy(
name='myl3p-b-1', tenant_id='tenant_b',
external_segments={es['id']: []})['l3_policy']
if self.nat_enabled:
mgr.ensure_epg_created.assert_called_once_with(
'tenant_b', "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'], bd_owner=self.common_tenant,
transaction=mock.ANY)
self.assertEqual(('tenant_b', "NAT-epg-%s" % es['id']),
self.driver._determine_nat_epg_for_es(ctx, es, l3p_b_1))
else:
mgr.ensure_epg_created.assert_not_called()
class TestL3PolicyNoNat(TestL3Policy):
def setUp(self):
super(TestL3PolicyNoNat, self).setUp(nat_enabled=False)
class TestL3PolicyPreL3Out(TestL3Policy):
def setUp(self):
super(TestL3PolicyPreL3Out, self).setUp(pre_existing_l3out=True)
class TestL3PolicyNoNatPreL3Out(TestL3Policy):
def setUp(self):
super(TestL3PolicyNoNatPreL3Out, self).setUp(
nat_enabled=False, pre_existing_l3out=True)
class TestPolicyRuleSet(ApicMappingTestCase):
# TODO(ivar): verify rule intersection with hierarchical PRS happens
# on APIC
def _test_policy_rule_set_created_on_apic(self, shared=False):
ct = self.create_policy_rule_set(name="ctr",
shared=shared)['policy_rule_set']
tenant = self.common_tenant if shared else ct['tenant_id']
mgr = self.driver.apic_manager
mgr.create_contract.assert_called_once_with(
ct['id'], owner=tenant, transaction=mock.ANY)
def test_policy_rule_set_created_on_apic(self):
self._test_policy_rule_set_created_on_apic()
def test_policy_rule_set_created_on_apic_shared(self):
self._test_policy_rule_set_created_on_apic(shared=True)
def _test_policy_rule_set_created_with_rules(self, shared=False):
bi, in_d, out = range(3)
rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[x['id'] for x in rules[1:]])[
'policy_rule_set']
rule_owner = self.common_tenant if shared else rules[0]['tenant_id']
# Verify that the in-out rules are correctly enforced on the APIC
mgr = self.driver.apic_manager
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
expected_calls = [
mock.call(ctr['id'], ctr['id'], rules[out]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner),
mock.call(ctr['id'], ctr['id'],
amap.REVERSE_PREFIX + rules[in_d]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY,
unset=False, rule_owner=rule_owner)]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
# Create policy_rule_set with BI rule
ctr = self.create_policy_rule_set(
name="ctr", policy_rules=[rules[bi]['id']])['policy_rule_set']
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], rules[bi]['id'], owner=ctr['tenant_id'],
transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], amap.REVERSE_PREFIX + rules[bi]['id'],
owner=ctr['tenant_id'], transaction=mock.ANY, unset=False,
rule_owner=rule_owner)
def test_policy_rule_set_created_with_rules(self):
self._test_policy_rule_set_created_with_rules()
def test_policy_rule_set_created_with_rules_shared(self):
self._test_policy_rule_set_created_with_rules(shared=True)
def _test_policy_rule_set_updated_with_new_rules(self, shared=False):
bi, in_d, out = range(3)
old_rules = self._create_3_direction_rules(shared=shared)
new_rules = self._create_3_direction_rules(shared=shared)
# exclude BI rule for now
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[x['id'] for x in old_rules[1:]])['policy_rule_set']
data = {'policy_rule_set': {
'policy_rules': [x['id'] for x in new_rules[1:]]}}
rule_owner = (self.common_tenant if shared else
old_rules[in_d]['tenant_id'])
mgr = self.driver.apic_manager
mgr.manage_contract_subject_in_filter = MockCallRecorder()
mgr.manage_contract_subject_out_filter = MockCallRecorder()
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old IN rule unset and new IN rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[in_d]['id'],
rule_owner=rule_owner,
owner=ctr['tenant_id'], transaction='transaction', unset=True))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[in_d]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[out]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
ctr = self.create_policy_rule_set(
name="ctr",
policy_rules=[old_rules[0]['id']])['policy_rule_set']
data = {'policy_rule_set': {'policy_rules': [new_rules[0]['id']]}}
self.new_update_request(
'policy_rule_sets', data, ctr['id'], self.fmt).get_response(
self.ext_api)
# Verify old BI rule unset and new Bu rule set
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], old_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction', unset=True,
rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_in_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
self.assertTrue(
mgr.manage_contract_subject_out_filter.call_happened_with(
ctr['id'], ctr['id'], new_rules[bi]['id'],
owner=ctr['tenant_id'], transaction='transaction',
unset=False, rule_owner=rule_owner))
def test_policy_rule_set_updated_with_new_rules(self):
self._test_policy_rule_set_updated_with_new_rules()
def test_policy_rule_set_updated_with_new_rules_shared(self):
self._test_policy_rule_set_updated_with_new_rules(shared=True)
def _create_3_direction_rules(self, shared=False):
a1 = self.create_policy_action(name='a1',
action_type='allow',
shared=shared)['policy_action']
cl_attr = {'protocol': 'tcp', 'port_range': 80}
cls = []
for direction in ['bi', 'in', 'out']:
if direction == 'out':
cl_attr['protocol'] = 'udp'
cls.append(self.create_policy_classifier(
direction=direction, shared=shared,
**cl_attr)['policy_classifier'])
rules = []
for classifier in cls:
rules.append(self.create_policy_rule(
policy_classifier_id=classifier['id'],
policy_actions=[a1['id']],
shared=shared)['policy_rule'])
return rules
class TestPolicyRule(ApicMappingTestCase):
def _test_policy_rule_created_on_apic(self, shared=False):
pr = self._create_simple_policy_rule('in', 'tcp', 88, shared=shared)
pr1 = self._create_simple_policy_rule('in', 'udp', 53, shared=shared)
pr2 = self._create_simple_policy_rule('in', None, 88, shared=shared)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0', etherT='ip',
prot='tcp', dToPort=88, dFromPort=88,
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry='os-entry-0', etherT='ip', prot='tcp', sToPort=88,
sFromPort=88, tcpRules='est', transaction=mock.ANY),
mock.call(pr1['id'], owner=tenant, entry='os-entry-0',
etherT='ip', prot='udp', dToPort=53, dFromPort=53,
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner=tenant,
entry='os-entry-0', etherT='ip', prot='udp', sToPort=53,
sFromPort=53, transaction=mock.ANY),
mock.call(pr2['id'], owner=tenant, entry='os-entry-0',
etherT='unspecified', dToPort=88, dFromPort=88,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
mgr.reset_mock()
pr = self._create_simple_policy_rule('bi', None, None, shared=shared)
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0',
etherT='unspecified', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
def test_policy_rule_created_on_apic(self):
self._test_policy_rule_created_on_apic()
def test_policy_rule_created_on_apic_shared(self):
self._test_policy_rule_created_on_apic(shared=True)
def _test_policy_rule_deleted_on_apic(self, shared=False):
pr = self._create_simple_policy_rule(shared=shared)
pr1 = self._create_simple_policy_rule('in', 'udp', 53, shared=shared)
self.delete_policy_rule(pr['id'], expected_res_status=204)
tenant = self.common_tenant if shared else pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
mgr.delete_tenant_filter.reset_mock()
self.delete_policy_rule(pr1['id'], expected_res_status=204)
expected_calls = [
mock.call(pr1['id'], owner=tenant, transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner=tenant,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
def test_policy_rule_deleted_on_apic(self):
self._test_policy_rule_deleted_on_apic()
def test_policy_rule_deleted_on_apic_shared(self):
self._test_policy_rule_deleted_on_apic(shared=True)
def test_policy_classifier_updated(self):
pa = self.create_policy_action(
action_type='allow', is_admin_context=True,
tenant_id='admin', shared=True)['policy_action']
pc = self.create_policy_classifier(
direction='in', protocol='udp', port_range=80,
shared=True, is_admin_context=True,
tenant_id='admin')['policy_classifier']
pr1 = self.create_policy_rule(
policy_classifier_id=pc['id'], policy_actions=[pa['id']],
shared=True, is_admin_context=True,
tenant_id='admin')['policy_rule']
pr2 = self.create_policy_rule(policy_classifier_id=pc['id'],
policy_actions=[pa['id']])['policy_rule']
prs1 = self.create_policy_rule_set(
policy_rules=[pr1['id']])['policy_rule_set']
prs2 = self.create_policy_rule_set(
policy_rules=[pr2['id'], pr1['id']])['policy_rule_set']
mgr = self.driver.apic_manager
mgr.reset_mock()
# Remove Classifier port, should just delete and create the filter
self.update_policy_classifier(pc['id'], port_range=None,
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='udp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='udp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
etherT='ip', prot='udp', entry='os-entry-0',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
etherT='ip', prot='udp', entry='os-entry-0',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
self.assertFalse(mgr.manage_contract_subject_in_filter.called)
self.assertFalse(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol, to not revertible
self.update_policy_classifier(pc['id'], protocol=None,
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='unspecified',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='unspecified',
entry='os-entry-0', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
# Protocol went from revertible to non-revertible
self.assertTrue(mgr.manage_contract_subject_in_filter.called)
self.assertTrue(mgr.manage_contract_subject_out_filter.called)
mgr.reset_mock()
# Change Classifier protocol to revertible
self.update_policy_classifier(pc['id'], protocol='tcp',
is_admin_context=True)
expected_calls = [
mock.call(pr1['id'], owner='common', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.delete_tenant_filter.call_args_list)
expected_calls = [
mock.call(pr1['id'], owner='common', etherT='ip', prot='tcp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(pr2['id'], owner='test-tenant', etherT='ip', prot='tcp',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr1['id'], owner='common',
etherT='ip', prot='tcp', tcpRules='est',
entry='os-entry-0', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr2['id'], owner='test-tenant',
etherT='ip', prot='tcp', tcpRules='est',
entry='os-entry-0', transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
expected_calls = [
# Unset PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=True, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=True,
rule_owner='test-tenant'),
# SET PR1 and PR2 IN
mock.call(prs1['id'], prs1['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr1['id'], owner='test-tenant',
transaction=mock.ANY, unset=False, rule_owner='common'),
mock.call(prs2['id'], prs2['id'], pr2['id'], owner='test-tenant',
transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_in_filter.call_args_list)
# SET Reverse PR1 and PR2 OUT
expected_calls = [
mock.call(prs1['id'], prs1['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr1['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='common'),
mock.call(prs2['id'], prs2['id'], amap.REVERSE_PREFIX + pr2['id'],
owner='test-tenant', transaction=mock.ANY, unset=False,
rule_owner='test-tenant')
]
self._check_call_list(
expected_calls,
mgr.manage_contract_subject_out_filter.call_args_list)
def test_icmp_rule_created_on_apic(self):
pr = self._create_simple_policy_rule('in', 'icmp', None)
tenant = pr['tenant_id']
mgr = self.driver.apic_manager
expected_calls = [
mock.call(pr['id'], owner=tenant, entry='os-entry-0', etherT='ip',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='echo-rep',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='dst-unreach',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='src-quench',
prot='icmp', transaction=mock.ANY),
mock.call(amap.REVERSE_PREFIX + pr['id'], owner=tenant,
entry=mock.ANY, etherT='ip', icmpv4T='time-exceeded',
prot='icmp', transaction=mock.ANY)]
# verify that entry is always different
found = set()
for call in mgr.create_tenant_filter.call_args_list:
# Only for reverse filters
if call[0][0].startswith(amap.REVERSE_PREFIX):
self.assertFalse(call[1]['entry'] in found)
found.add(call[1]['entry'])
self._check_call_list(
expected_calls, mgr.create_tenant_filter.call_args_list)
class TestExternalSegment(ApicMappingTestCase):
def test_pat_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
# Verify Rejected on create
res = self.create_external_segment(
name='supported', port_address_translation=True,
expected_res_status=400)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
# Verify Rejected on Update
es = self.create_external_segment(
name='supported', expected_res_status=201,
port_address_translation=False)['external_segment']
res = self.update_external_segment(
es['id'], expected_res_status=400, port_address_translation=True)
self.assertEqual('PATNotSupportedByApicDriver',
res['NeutronError']['type'])
def test_edge_nat_invalid_vlan_range_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver.l3out_vlan_alloc.l3out_vlan_ranges = {}
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatBadVlanRange', res['NeutronError']['type'])
ext_info = self.driver.apic_manager.ext_net_dict.get('supported')
del ext_info['vlan_range']
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatVlanRangeNotFound',
res['NeutronError']['type'])
def _test_create_delete(self, shared=False, is_edge_nat=False):
mgr = self.driver.apic_manager
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
mgr.ext_net_dict['supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
cidr='192.168.0.2/24',
expected_res_status=201, shared=shared)['external_segment']
self.create_external_segment(name='unsupport', expected_res_status=201,
shared=shared)
self.assertEqual('192.168.0.2/24', es['cidr'])
self.assertIsNotNone(es['subnet_id'])
subnet = self._get_object('subnets', es['subnet_id'],
self.api)['subnet']
self.assertEqual('169.254.0.0/16', subnet['cidr'])
owner = es['tenant_id'] if not shared else self.common_tenant
prs = "NAT-allow-%s" % es['id']
if self.nat_enabled:
ctx = "NAT-vrf-%s" % es['id']
ctx_owner = owner
contract_owner = owner
if self.pre_l3out:
ctx = APIC_PRE_VRF
ctx_owner = APIC_PRE_VRF_TENANT
contract_owner = APIC_PRE_L3OUT_TENANT
self.assertFalse(mgr.ensure_context_enforced.called)
else:
mgr.ensure_context_enforced.assert_called_with(
owner=owner, ctx_id=ctx,
transaction=mock.ANY)
if not is_edge_nat:
mgr.ensure_bd_created_on_apic(
owner, "NAT-bd-%s" % es['id'], ctx_owner=ctx_owner,
ctx_name=ctx, transaction=mock.ANY)
mgr.ensure_epg_created.assert_called_with(
owner, "NAT-epg-%s" % es['id'],
bd_name="NAT-bd-%s" % es['id'],
transaction=mock.ANY)
else:
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
mgr.create_tenant_filter.assert_called_with(
prs, owner=contract_owner,
entry="allow-all", transaction=mock.ANY)
mgr.manage_contract_subject_bi_filter.assert_called_with(
prs, prs, prs, owner=contract_owner, transaction=mock.ANY)
if not is_edge_nat:
expected_calls = [
mock.call(owner, "NAT-epg-%s" % es['id'], prs,
transaction=mock.ANY),
mock.call(owner, "NAT-epg-%s" % es['id'], prs,
provider=True, transaction=mock.ANY)]
self._check_call_list(expected_calls,
mgr.set_contract_for_epg.call_args_list)
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.set_contract_for_epg.called)
ctx = context.get_admin_context()
internal_subnets = self._db_plugin.get_subnets(
ctx, filters={'name': [amap.HOST_SNAT_POOL]})
self.assertEqual(1, len(internal_subnets))
else:
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.create_tenant_filter.called)
self.assertFalse(mgr.manage_contract_subject_bi_filter.called)
self.assertFalse(mgr.set_contract_for_epg.called)
subnet_id = es['subnet_id']
self.delete_external_segment(es['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
self._get_object('subnets', subnet_id, self.api,
expected_res_status=404)
if self.nat_enabled:
ctx = "NAT-vrf-%s" % es['id']
ctx_owner = owner
contract_owner = owner
if self.pre_l3out:
ctx = APIC_PRE_VRF
ctx_owner = APIC_PRE_VRF_TENANT
contract_owner = APIC_PRE_L3OUT_TENANT
self.assertFalse(mgr.ensure_context_enforced.called)
else:
mgr.ensure_context_deleted.assert_called_with(
ctx_owner, ctx, transaction=mock.ANY)
if not is_edge_nat:
mgr.delete_bd_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], transaction=mock.ANY)
mgr.delete_epg_for_network.assert_called_with(
owner, "NAT-epg-%s" % es['id'], transaction=mock.ANY)
else:
self.assertFalse(mgr.delete_bd_on_apic.called)
self.assertFalse(mgr.delete_epg_for_network.called)
mgr.delete_contract.assert_called_with(
prs, owner=contract_owner, transaction=mock.ANY)
mgr.delete_tenant_filter.assert_called_with(
prs, owner=contract_owner, transaction=mock.ANY)
else:
self.assertFalse(mgr.delete_bd_on_apic.called)
self.assertFalse(mgr.delete_epg_for_network.called)
self.assertFalse(mgr.delete_contract.called)
self.assertFalse(mgr.delete_tenant_filter.called)
def test_create_delete_unshared(self):
self._test_create_delete(False)
def test_create_delete_shared(self):
self._test_create_delete(True)
def test_create_delete_unshared_edge_nat(self):
self._test_create_delete(False, is_edge_nat=True)
def test_create_delete_shared_edge_nat(self):
self._test_create_delete(True, is_edge_nat=True)
def test_update_unsupported_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupport', cidr='192.168.0.0/24',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[])
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
def _test_route_update_remove(self, shared_es, is_edge_nat=False):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}],
expected_res_status=201)['external_segment']
# create L3-policies
if self.pre_l3out and not self.nat_enabled:
tenants = [es['tenant_id']]
else:
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled and shared_es
else [es['tenant_id']])
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# Attach external policy
f = self.create_external_policy
eps = [f(external_segments=[es['id']],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
for x in xrange(len(tenants))]
mgr = self.driver.apic_manager
owner = es['tenant_id'] if not shared_es else self.common_tenant
mgr.ensure_external_epg_created.reset_mock()
mgr.ensure_static_route_created.reset_mock()
# Remove route completely
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'}])
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
mgr = self.driver.apic_manager
if not self.pre_l3out:
expected_delete_calls = []
expected_delete_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'128.0.0.0/16', owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH,
'128.0.0.0/16', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_static_route_deleted.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_deleted.called)
expected_delete_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, subnets=['128.0.0.0/16'],
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(
expected_delete_calls,
mgr.ensure_external_epg_routes_deleted.call_args_list)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Remove nexthop only
mgr.ensure_static_route_deleted.reset_mock()
mgr.ensure_external_epg_routes_deleted.reset_mock()
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '0.0.0.0/0',
'nexthop': None}])
if not self.pre_l3out:
expected_delete_calls = []
expected_create_calls = []
expected_delete_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '0.0.0.0/0',
'192.168.0.254', owner=owner, transaction=mock.ANY))
# Being the new nexthop 'None', the default one is used
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '0.0.0.0/0',
'192.168.0.254', owner=tenant,
transaction=mock.ANY))
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_next_hop_deleted.call_args_list)
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
expected_delete_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_delete_calls.append(
mock.call(l3out, subnet='0.0.0.0/0',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_delete_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_remove_1(self):
self._test_route_update_remove(shared_es=True)
def test_route_update_remove_2(self):
self._test_route_update_remove(shared_es=False)
def test_route_update_remove_edge_nat_mode_1(self):
self._test_route_update_remove(shared_es=True, is_edge_nat=True)
def test_route_update_remove_edge_nat_mode_2(self):
self._test_route_update_remove(shared_es=False, is_edge_nat=True)
def _test_route_update_add(self, shared_es, is_edge_nat=False):
# Verify routes are updated correctly
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
external_routes=[], expected_res_status=201)['external_segment']
if self.pre_l3out and not self.nat_enabled:
tenants = [es['tenant_id']]
else:
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled and shared_es
else [es['tenant_id']])
# create L3-policies
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={es['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# Attach external policies
f = self.create_external_policy
eps = [f(external_segments=[es['id']],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
for x in xrange(len(tenants))]
mgr = self.driver.apic_manager
mgr.ensure_static_route_created.reset_mock()
mgr.ensure_external_epg_created.reset_mock()
owner = es['tenant_id'] if not shared_es else self.common_tenant
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
if not self.pre_l3out:
expected_create_calls = []
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.254', subnet='128.0.0.0/16',
owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH,
'192.168.0.254', subnet='128.0.0.0/16',
owner=tenant, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
expected_create_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, subnet='128.0.0.0/16',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
mgr.ensure_static_route_created.reset_mock()
mgr.ensure_external_epg_created.reset_mock()
# Verify Route added with default gateway
self.update_external_segment(es['id'], expected_res_status=200,
external_routes=[
{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'},
{'destination': '0.0.0.0/0',
'nexthop': None}])
if not self.pre_l3out:
expected_create_calls = []
expected_create_calls.append(
mock.call(es['id'], mocked.APIC_EXT_SWITCH,
'192.168.0.1', subnet='0.0.0.0/0',
owner=owner, transaction=mock.ANY))
if self.nat_enabled and is_edge_nat:
for x in range(len(tenants)):
l3p = l3p_list[x]
l3out = sub_str % (l3p['id'], es['id'])
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, mocked.APIC_EXT_SWITCH, '192.168.0.1',
subnet='0.0.0.0/0', owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_static_route_created.call_args_list)
else:
self.assertFalse(mgr.ensure_static_route_created.called)
expected_create_calls = []
for x in range(len(tenants)):
ep = eps[x]
l3p = l3p_list[x]
l3out = es['name' if self.pre_l3out else 'id']
ext_epg = ep['id']
tenant = APIC_PRE_L3OUT_TENANT if self.pre_l3out else owner
if self.nat_enabled:
l3out = sub_str % (l3p['id'], es['id'])
ext_epg = sub_str % (l3p['id'], ext_epg)
tenant = tenants[x]
expected_create_calls.append(
mock.call(l3out, subnet='0.0.0.0/0',
external_epg=ext_epg, owner=tenant,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self.assertFalse(mgr.ensure_static_route_deleted.called)
self.assertFalse(mgr.ensure_external_epg_routes_deleted.called)
self.assertFalse(mgr.ensure_next_hop_deleted.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_route_update_add_1(self):
self._test_route_update_add(shared_es=True)
def test_route_update_add_2(self):
self._test_route_update_add(shared_es=False)
def test_route_update_add_edge_nat_mode_1(self):
self._test_route_update_add(shared_es=True, is_edge_nat=True)
def test_route_update_add_edge_nat_mode_2(self):
self._test_route_update_add(shared_es=False, is_edge_nat=True)
def test_es_create_no_cidr_with_routes(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
nh = '172.16.0.1' if self.pre_l3out else '192.168.0.254'
self.create_external_segment(
name='supported',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': nh}],
expected_res_status=201)
def test_implicit_es_router_gw_ip(self):
self._mock_external_dict([('default', '192.168.0.2/24')])
es = self.create_external_segment(
name='default',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': None}])['external_segment']
l3p = self.create_l3_policy()['l3_policy']
self.assertEqual(es['id'],
l3p['external_segments'].keys()[0])
self.assertEqual('169.254.0.2',
l3p['external_segments'][es['id']][0])
def _do_test_plug_l3p_to_es_with_multi_ep(self):
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled else ['tenant_a'])
self._mock_external_dict([('supported', '192.168.0.2/24')])
ext_routes = ['128.0.0.0/24', '128.0.1.0/24']
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201,
external_routes=[{
'destination': ext_routes[x],
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(2)]
ep_list = []
for x in range(len(tenants)):
ep = self.create_external_policy(
name=(x < 2 and APIC_EXTERNAL_EPG or 'other-ext-epg'),
external_segments=[e['id'] for e in es_list],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
ep_list.append(ep)
mgr = self.driver.apic_manager
mgr.ensure_external_epg_created.reset_mock()
mgr.set_contract_for_external_epg.reset_mock()
ep = ep_list[0]
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[0],
external_segments={x['id']: [] for x in es_list},
expected_res_status=201)['l3_policy']
expected_create_calls = []
expected_assoc_calls = []
expected_contract_calls = []
if self.nat_enabled:
for es in es_list:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg='default-%s' % es['id'],
owner=self.common_tenant,
transaction=mock.ANY))
expected_create_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
subnet=es['external_routes'][0]['destination'],
external_epg="Shd-%s-%s" % (l3p['id'], ep['id']),
owner=l3p['tenant_id'],
transaction=mock.ANY))
expected_assoc_calls.append(
mock.call(l3p['tenant_id'],
"Shd-%s-%s" % (l3p['id'], es['id']),
"Shd-%s-%s" % (l3p['id'], ep['id']),
"NAT-epg-%s" % es['id'],
target_owner=(l3p['tenant_id']
if self.driver.per_tenant_nat_epg
else self.common_tenant),
transaction=mock.ANY))
l3out = es['name' if self.pre_l3out else 'id']
l3out_owner = (APIC_PRE_L3OUT_TENANT
if self.pre_l3out else self.common_tenant)
nat_contract = "NAT-allow-%s" % es['id']
ext_epg = (ep['name']
if self.pre_l3out else ('default-%s' % es['id']))
expected_contract_calls.append(
mock.call(l3out, nat_contract,
external_epg=ext_epg,
owner=l3out_owner,
provided=True, transaction=mock.ANY))
expected_contract_calls.append(
mock.call(l3out, nat_contract,
external_epg=ext_epg,
owner=l3out_owner,
provided=False, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
self._check_call_list(expected_assoc_calls,
mgr.associate_external_epg_to_nat_epg.call_args_list)
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
def test_plug_l3p_to_es_with_multi_ep(self):
self._do_test_plug_l3p_to_es_with_multi_ep()
def test_plug_l3p_to_es_with_multi_ep_ptne(self):
self.driver.per_tenant_nat_epg = True
self._do_test_plug_l3p_to_es_with_multi_ep()
class TestExternalSegmentNoNat(TestExternalSegment):
def setUp(self):
super(TestExternalSegmentNoNat, self).setUp(nat_enabled=False)
class TestExternalSegmentPreL3Out(TestExternalSegment):
def setUp(self, **kwargs):
kwargs['pre_existing_l3out'] = True
super(TestExternalSegmentPreL3Out, self).setUp(**kwargs)
def test_query_l3out_info(self):
self.driver._query_l3out_info = self.orig_query_l3out_info
ctx1 = [{
'l3extRsEctx': {'attributes': {'tDn': 'uni/tn-foo/ctx-foobar'}}}]
mgr = self.driver.apic_manager
mgr.apic.l3extOut.get_subtree.return_value = ctx1
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual('bar_tenant', info['l3out_tenant'])
self.assertEqual('foobar', info['vrf_name'])
self.assertEqual('foo', info['vrf_tenant'])
mgr.apic.l3extOut.get_subtree.reset_mock()
mgr.apic.l3extOut.get_subtree.return_value = []
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual(None, info)
expected_calls = [
mock.call('bar_tenant', 'l3out'),
mock.call('common', 'l3out')]
self._check_call_list(
expected_calls, mgr.apic.l3extOut.get_subtree.call_args_list)
def test_l3out_tenant(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver._query_l3out_info.return_value['l3out_tenant'] = (
apic_mapper.ApicName('some_other_tenant'))
res = self.create_external_segment(name='supported',
tenant_id='a_tenant', cidr='192.168.0.2/24',
expected_res_status=400)
self.assertEqual('PreExistingL3OutInIncorrectTenant',
res['NeutronError']['type'])
self.create_external_segment(name='supported',
tenant_id='some_other_tenant', cidr='192.168.0.2/24',
expected_res_status=201)
def test_edge_nat_wrong_L3out_IF_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'ifInstT': u'l3-port'
}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutIFType',
res['NeutronError']['type'])
def test_edge_nat_wrong_L3out_OSPF_Auth_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'ospfIfP':
{u'attributes':
{u'authType': u'simple'
}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutAuthTypeForOSPF',
res['NeutronError']['type'])
def test_edge_nat_wrong_L3out_BGP_Auth_type_rejected(self):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat=True)
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'sha1'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=400)
self.assertEqual('EdgeNatWrongL3OutAuthTypeForBGP',
res['NeutronError']['type'])
# try again with a good input
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'none'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}]}}])
res = self.create_external_segment(
name='supported', expected_res_status=201)
class TestExternalSegmentNoNatPreL3Out(TestExternalSegmentPreL3Out):
def setUp(self):
super(TestExternalSegmentNoNatPreL3Out, self).setUp(
nat_enabled=False)
class TestExternalPolicy(ApicMappingTestCase):
def test_creation_noop(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], expected_res_status=201)['external_segment']
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']], expected_res_status=201)
# Verify called with default route always
mgr = self.driver.apic_manager
if self.nat_enabled and not self.pre_l3out:
mgr.ensure_external_epg_created.assert_called_once_with(
es['id'], subnet='0.0.0.0/0',
external_epg=("default-%s" % es['id']), owner=es['tenant_id'],
transaction=mock.ANY)
else:
self.assertFalse(mgr.ensure_external_epg_created.called)
mgr.ensure_external_epg_created.reset_mock()
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
self.create_external_policy(
external_segments=[es['id']], expected_res_status=201)
# Verify noop on unsupported
self.assertFalse(mgr.ensure_external_epg_created.called)
def test_create_shared(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], shared=True,
expected_res_status=201)['external_segment']
res = self.create_external_policy(
external_segments=[es['id']], shared=True,
expected_res_status=400)
self.assertEqual('SharedExternalPolicyUnsupported',
res['NeutronError']['type'])
def test_update_shared(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], shared=True,
expected_res_status=201)['external_segment']
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
res = self.update_external_policy(
ep['id'], shared=True, expected_res_status=400)
self.assertEqual('SharedExternalPolicyUnsupported',
res['NeutronError']['type'])
def _test_creation_no_prs(self, shared_es, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[x['id'] for x in es_list],
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_create_calls = []
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'], owner=owner,
transaction=mock.ANY))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
subnet='128.0.0.0/16',
external_epg=(sub_str % (l3p['id'], ep['id'])),
owner=l3p_owner,
transaction=mock.ANY))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'], owner=owner,
transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
if self.nat_enabled:
expected_contract_calls = []
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out
else owner)
for x in range(len(es_list)):
es = es_list[x]
ext_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else "default-%s" % es['id'])
es_name = es['name' if self.pre_l3out else 'id']
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.set_contract_for_external_epg.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_creation_no_prs_1(self):
self._test_creation_no_prs(shared_es=True)
def test_creation_no_prs_2(self):
self._test_creation_no_prs(shared_es=False)
def test_creation_no_prs_edge_nat_mode_1(self):
self._test_creation_no_prs(shared_es=True, is_edge_nat=True)
def test_creation_no_prs_edge_nat_mode_2(self):
self._test_creation_no_prs(shared_es=False, is_edge_nat=True)
def _test_update_no_prs(self, shared_es, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[x['id'] for x in es_list])['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_create_calls = []
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'],
owner=owner, transaction=mock.ANY))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
subnet='128.0.0.0/16',
external_epg=sub_str % (l3p['id'], ep['id']),
owner=l3p_owner, transaction=mock.ANY))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], subnet='128.0.0.0/16',
external_epg=ep['id'],
owner=owner, transaction=mock.ANY))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_created.call_args_list)
if self.nat_enabled:
expected_contract_calls = []
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out
else owner)
for x in range(len(es_list)):
es = es_list[x]
ext_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else "default-%s" % es['id'])
es_name = es['name' if self.pre_l3out else 'id']
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es_name, nat_contract,
external_epg=ext_epg, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.set_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.set_contract_for_external_epg.called)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
external_segments=[])['external_policy']
mgr = self.driver.apic_manager
expected_create_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if self.nat_enabled:
if not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], owner=owner,
external_epg="default-%s" % es['id']))
expected_create_calls.append(
mock.call(sub_str % (l3p['id'], es['id']),
owner=l3p_owner,
external_epg=sub_str % (l3p['id'], ep['id'])))
elif not self.pre_l3out:
expected_create_calls.append(
mock.call(es['id'], owner=owner, external_epg=ep['id']))
self._check_call_list(expected_create_calls,
mgr.ensure_external_epg_deleted.call_args_list)
if self.nat_enabled and self.pre_l3out:
expected_contract_calls = []
ext_epg_tenant = APIC_PRE_L3OUT_TENANT
for x in range(len(es_list)):
es = es_list[x]
nat_contract = "NAT-allow-%s" % es['id']
expected_contract_calls.extend([
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG, owner=ext_epg_tenant,
provided=True, transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG, owner=ext_epg_tenant,
provided=False, transaction=mock.ANY)])
self._check_call_list(expected_contract_calls,
mgr.unset_contract_for_external_epg.call_args_list)
else:
self.assertFalse(mgr.unset_contract_for_external_epg.called)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_no_prs_1(self):
self._test_update_no_prs(shared_es=True)
def test_update_no_prs_2(self):
self._test_update_no_prs(shared_es=False)
def test_update_no_prs_edge_nat_mode_1(self):
self._test_update_no_prs(shared_es=True, is_edge_nat=True)
def test_update_no_prs_edge_nat_mode_2(self):
self._test_update_no_prs(shared_es=False, is_edge_nat=True)
def _test_create_with_prs(self, shared_es, shared_prs, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs,
tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''},
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
external_segments=[x['id'] for x in es_list],
expected_res_status=201)['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
nat = self.nat_enabled
external_epg = APIC_EXTERNAL_EPG if self.pre_l3out else (
("default-%s" % es['id']) if nat else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
if nat:
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_create_with_prs_1(self):
self._test_create_with_prs(shared_es=True, shared_prs=True)
def test_create_with_prs_2(self):
self._test_create_with_prs(shared_es=True, shared_prs=False)
def test_create_with_prs_3(self):
self._test_create_with_prs(shared_es=False, shared_prs=False)
def test_create_with_prs_4(self):
self._test_create_with_prs(shared_es=False, shared_prs=True)
def test_create_with_prs_edge_nat_mode_1(self):
self._test_create_with_prs(shared_es=True, shared_prs=True,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_2(self):
self._test_create_with_prs(shared_es=True, shared_prs=False,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_3(self):
self._test_create_with_prs(shared_es=False, shared_prs=False,
is_edge_nat=True)
def test_create_with_prs_edge_nat_mode_4(self):
self._test_create_with_prs(shared_es=False, shared_prs=True,
is_edge_nat=True)
def _test_update_add_prs(self, shared_es, shared_prs, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=shared_es,
expected_res_status=201,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(3)]
l3p_list = []
for x in xrange(len(es_list)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=shared_es and 'another' or es_list[x]['tenant_id'],
external_segments={es_list[x]['id']: []},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
prov = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
cons = self._create_policy_rule_set_on_shared(
shared=shared_prs, tenant_id=es_list[0]['tenant_id'] if not (
shared_es | shared_prs) else 'another')
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[x['id'] for x in es_list],
tenant_id=es_list[0]['tenant_id'] if not shared_es else 'another',
expected_res_status=201)['external_policy']
ep = self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
owner = (es_list[0]['tenant_id'] if not shared_es
else self.common_tenant)
l3p_owner = l3p_list[0]['tenant_id']
expected_calls = []
nat = self.nat_enabled
sub_str = "Shd-%s-%s"
if is_edge_nat:
sub_str = "Auto-%s-%s"
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
external_epg = APIC_EXTERNAL_EPG if self.pre_l3out else (
("default-%s" % es['id']) if nat else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name,
("NAT-allow-%s" % es['id']) if nat else cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
if nat:
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
ep = self.update_external_policy(
ep['id'], expected_res_status=200, provided_policy_rule_sets={},
consumed_policy_rule_sets={},
tenant_id=ep['tenant_id'])['external_policy']
expected_calls = []
for x in range(len(es_list)):
es = es_list[x]
l3p = l3p_list[x]
if nat:
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), prov['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=True, owner=l3p_owner,
transaction=mock.ANY))
expected_calls.append(
mock.call(sub_str % (l3p['id'], es['id']), cons['id'],
external_epg=(sub_str % (l3p['id'], ep['id'])),
provided=False, owner=l3p_owner,
transaction=mock.ANY))
else:
external_epg = (APIC_EXTERNAL_EPG if self.pre_l3out
else ep['id'])
ext_epg_tenant = (APIC_PRE_L3OUT_TENANT if self.pre_l3out else
owner)
es_name = es['name' if self.pre_l3out else 'id']
expected_calls.append(
mock.call(es_name, prov['id'],
external_epg=external_epg,
provided=True, owner=ext_epg_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call(es_name, cons['id'],
external_epg=external_epg,
provided=False, owner=ext_epg_tenant,
transaction=mock.ANY))
self._check_call_list(
expected_calls, mgr.unset_contract_for_external_epg.call_args_list)
# Although the naming convention used here has been chosen poorly,
# I'm separating the tests in order to get the mock re-set.
def test_update_add_prs_1(self):
self._test_update_add_prs(shared_es=True, shared_prs=True)
def test_update_add_prs_2(self):
self._test_update_add_prs(shared_es=True, shared_prs=False)
def test_update_add_prs_3(self):
self._test_update_add_prs(shared_es=False, shared_prs=False)
def test_update_add_prs_4(self):
self._test_update_add_prs(shared_es=False, shared_prs=True)
def test_update_add_prs_edge_nat_mode_1(self):
self._test_update_add_prs(shared_es=True, shared_prs=True,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_2(self):
self._test_update_add_prs(shared_es=True, shared_prs=False,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_3(self):
self._test_update_add_prs(shared_es=False, shared_prs=False,
is_edge_nat=True)
def test_update_add_prs_edge_nat_mode_4(self):
self._test_update_add_prs(shared_es=False, shared_prs=True,
is_edge_nat=True)
def test_update_add_prs_unsupported(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='unsupported', cidr='192.168.0.0/24', expected_res_status=201,
external_routes=[{'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
prov = self._create_policy_rule_set_on_shared()
cons = self._create_policy_rule_set_on_shared()
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
self.update_external_policy(
ep['id'], expected_res_status=200, tenant_id=ep['tenant_id'],
provided_policy_rule_sets={prov['id']: ''},
consumed_policy_rule_sets={cons['id']: ''})['external_policy']
mgr = self.driver.apic_manager
self.assertFalse(mgr.set_contract_for_external_epg.called)
def _test_multi_policy_single_tenant(self, shared_es):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
expected_res_status=201, shared=shared_es,
external_routes=[{
'destination': '128.0.0.0/16',
'nexthop': '192.168.0.254'}])['external_segment']
owner = 'another' if shared_es else es['tenant_id']
self.create_external_policy(
external_segments=[es['id']],
tenant_id=owner,
expected_res_status=201)
res = self.create_external_policy(
external_segments=[es['id']],
tenant_id=owner,
expected_res_status=400)
self.assertEqual('MultipleExternalPoliciesForL3Policy',
res['NeutronError']['type'])
# create another external policy and update it to use external-segment
ep2 = self.create_external_policy(
tenant_id=owner,
expected_res_status=201)['external_policy']
res = self.update_external_policy(
ep2['id'], external_segments=[es['id']],
tenant_id=owner,
expected_res_status=400)
self.assertEqual('MultipleExternalPoliciesForL3Policy',
res['NeutronError']['type'])
def test_multi_policy_single_tenant_1(self):
self._test_multi_policy_single_tenant(True)
def test_multi_policy_single_tenant_2(self):
self._test_multi_policy_single_tenant(False)
def test_multi_policy_multi_tenant(self):
tenants = (['tenant_a', 'tenant_b', 'tenant_c']
if self.nat_enabled else ['tenant_a'])
self._mock_external_dict([('supported', '192.168.0.2/24')])
ext_routes = ['128.0.0.0/24', '128.0.1.0/24']
es_list = [
self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201,
external_routes=[{
'destination': ext_routes[x],
'nexthop': '192.168.0.254'}])['external_segment']
for x in range(2)]
l3p_list = []
for x in xrange(len(tenants)):
l3p = self.create_l3_policy(
shared=False,
tenant_id=tenants[x],
external_segments={x['id']: [] for x in es_list},
expected_res_status=201)['l3_policy']
l3p_list.append(l3p)
# create external-policy
ep_list = []
mgr = self.driver.apic_manager
for x in range(len(tenants)):
ep = self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[e['id'] for e in es_list],
tenant_id=tenants[x],
expected_res_status=201)['external_policy']
ep_list.append(ep)
l3p = l3p_list[x]
expected_calls = []
for es in es_list:
if self.nat_enabled:
if not self.pre_l3out:
expected_calls.append(
mock.call(es['id'], subnet='0.0.0.0/0',
external_epg="default-%s" % es['id'],
owner=self.common_tenant,
transaction=mock.ANY))
expected_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
subnet=es['external_routes'][0]['destination'],
external_epg=("Shd-%s-%s" % (l3p['id'], ep['id'])),
owner=tenants[x],
transaction=mock.ANY))
elif not self.pre_l3out:
expected_calls.append(
mock.call(es['id'],
subnet=es['external_routes'][0]['destination'],
external_epg=ep['id'], owner=self.common_tenant,
transaction=mock.ANY))
self._check_call_list(expected_calls,
mgr.ensure_external_epg_created.call_args_list)
mgr.ensure_external_epg_created.reset_mock()
# delete external-policy
expected_calls = []
for x in range(len(tenants)):
ep = ep_list[x]
self.delete_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
expected_res_status=webob.exc.HTTPNoContent.code)
l3p = l3p_list[x]
for es in es_list:
if self.nat_enabled:
expected_calls.append(
mock.call("Shd-%s-%s" % (l3p['id'], es['id']),
external_epg=("Shd-%s-%s" % (l3p['id'], ep['id'])),
owner=tenants[x]))
elif not self.pre_l3out:
expected_calls.append(
mock.call(es['id'], external_epg=ep['id'],
owner=self.common_tenant))
if self.nat_enabled and not self.pre_l3out:
for es in es_list:
expected_calls.append(
mock.call(es['id'], external_epg="default-%s" % es['id'],
owner=self.common_tenant))
self._check_call_list(expected_calls,
mgr.ensure_external_epg_deleted.call_args_list)
class TestExternalPolicyNoNat(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyNoNat, self).setUp(nat_enabled=False)
class TestExternalPolicyPreL3Out(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyPreL3Out, self).setUp(
pre_existing_l3out=True)
def test_multi_tenant_delete(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201)['external_segment']
ep_list = [
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']],
tenant_id=tnnt,
expected_res_status=201)['external_policy']
for tnnt in ['tenant_a', 'tenant_b', 'tenant_c']]
for ep in ep_list:
self.delete_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
expected_res_status=webob.exc.HTTPNoContent.code)
nat_contract = "NAT-allow-%s" % es['id']
expected_calls = [
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=True, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=False, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY)
]
mgr = self.driver.apic_manager
self._check_call_list(expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
def test_multi_tenant_update_dissociate(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24', shared=True,
expected_res_status=201)['external_segment']
ep_list = [
self.create_external_policy(
name=APIC_EXTERNAL_EPG,
external_segments=[es['id']],
tenant_id=tnnt,
expected_res_status=201)['external_policy']
for tnnt in ['tenant_a', 'tenant_b', 'tenant_c']]
for ep in ep_list:
self.update_external_policy(
ep['id'], tenant_id=ep['tenant_id'],
external_segments=[],
expected_res_status=200)
nat_contract = "NAT-allow-%s" % es['id']
expected_calls = [
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=True, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY),
mock.call(es['name'], nat_contract,
external_epg=APIC_EXTERNAL_EPG,
provided=False, owner=APIC_PRE_L3OUT_TENANT,
transaction=mock.ANY)
]
mgr = self.driver.apic_manager
self._check_call_list(expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
class TestExternalPolicyNoNatPreL3Out(TestExternalPolicy):
def setUp(self):
super(TestExternalPolicyNoNatPreL3Out, self).setUp(
nat_enabled=False, pre_existing_l3out=True)
class TestNatPool(ApicMappingTestCase):
def test_overlap_nat_pool_create(self):
self._mock_external_dict([('supported', '192.168.0.2/24')])
mgr = self.driver.apic_manager
mgr.ext_net_dict['supported']['host_pool_cidr'] = '192.168.200.1/24'
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
# cidr_exposed overlap
res = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.0.0/24',
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
# host-pool overlap
res = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.200.0/24',
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
def test_overlap_nat_pool_update(self):
self._mock_external_dict([('supported', '192.168.0.2/24'),
('supported1', '192.168.1.2/24')])
es1 = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
es2 = self.create_external_segment(name='supported1',
expected_res_status=webob.exc.HTTPCreated.code)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es1['id'],
ip_version=4, ip_pool='192.168.1.0/24',
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
res = self.update_nat_pool(nat_pool['id'],
external_segment_id=es2['id'],
expected_res_status=webob.exc.HTTPBadRequest.code)
self.assertEqual('NatPoolOverlapsApicSubnet',
res['NeutronError']['type'])
def _test_nat_bd_subnet_created_deleted(self, shared, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24')],
is_edge_nat)
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
owner = es['tenant_id'] if not shared else self.common_tenant
mgr = self.driver.apic_manager
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_created_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.delete_nat_pool(nat_pool['id'],
expected_res_status=webob.exc.HTTPNoContent.code)
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
def test_nat_bd_subnet_create_delete_unshared(self):
self._test_nat_bd_subnet_created_deleted(False)
def test_nat_bd_subnet_create_delete_shared(self):
self._test_nat_bd_subnet_created_deleted(True)
def test_nat_bd_subnet_create_delete_unshared_edge_nat(self):
self._test_nat_bd_subnet_created_deleted(False, is_edge_nat=True)
def test_nat_bd_subnet_create_delete_shared_edge_nat(self):
self._test_nat_bd_subnet_created_deleted(True, is_edge_nat=True)
def _test_nat_bd_subnet_updated(self, shared, is_edge_nat=False):
self._mock_external_dict([('supported', '192.168.0.2/24'),
('supported1', '192.168.10.2/24')],
is_edge_nat)
es1 = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
es2 = self.create_external_segment(name='supported1',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
nat_pool = self.create_nat_pool(
external_segment_id=es1['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)['nat_pool']
owner = es1['tenant_id'] if not shared else self.common_tenant
mgr = self.driver.apic_manager
mgr.ensure_subnet_created_on_apic.reset_mock()
nat_pool = self.update_nat_pool(nat_pool['id'],
external_segment_id=es2['id'],
expected_res_status=webob.exc.HTTPOk.code)['nat_pool']
if self.nat_enabled and not is_edge_nat:
mgr.ensure_subnet_deleted_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es1['id'], '192.168.1.1/24')
mgr.ensure_subnet_created_on_apic.assert_called_with(
owner, "NAT-bd-%s" % es2['id'], '192.168.1.1/24')
else:
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
def test_nat_bd_subnet_update_unshared(self):
self._test_nat_bd_subnet_updated(False)
def test_nat_bd_subnet_update_shared(self):
self._test_nat_bd_subnet_updated(True)
def test_nat_bd_subnet_update_unshared_edge_nat(self):
self._test_nat_bd_subnet_updated(False, is_edge_nat=True)
def test_nat_bd_subnet_update_shared_edge_nat(self):
self._test_nat_bd_subnet_updated(True, is_edge_nat=True)
def _test_create_fip(self, shared):
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(name='supported',
expected_res_status=webob.exc.HTTPCreated.code,
shared=shared)['external_segment']
self.create_nat_pool(external_segment_id=es['id'],
ip_version=4, ip_pool='192.168.1.0/24', shared=shared,
expected_res_status=webob.exc.HTTPCreated.code)
subnet = self._get_object('subnets', es['subnet_id'],
self.api)['subnet']
fip_dict = {'floating_network_id': subnet['network_id']}
fip = self.l3plugin.create_floatingip(
context.get_admin_context(),
{'floatingip': fip_dict,
'tenant_id': es['tenant_id']})
self.assertIsNotNone(fip)
self.assertTrue(
netaddr.IPAddress(fip['floating_ip_address']) in
netaddr.IPNetwork('192.168.1.0/24'))
def test_create_fip(self):
self._test_create_fip(False)
def test_create_fip_shared(self):
self._test_create_fip(True)
class TestNatPoolNoNat(TestNatPool):
def setUp(self):
super(TestNatPoolNoNat, self).setUp(nat_enabled=False)
| 0
| 90
| 0
| 251,776
| 0
| 43
| 0
| 551
| 1,175
|
87484e3ece20f96eee0532619677e15accd1c4e4
| 4,760
|
py
|
Python
|
ResourceMonitor.py
|
Bot-7037/Resource-Monitor
|
44c96606784d6138bbfbc0fd8254252bb676dbfc
|
[
"MIT"
] | 1
|
2021-11-21T05:26:06.000Z
|
2021-11-21T05:26:06.000Z
|
ResourceMonitor.py
|
Bot-7037/Resource-Monitor
|
44c96606784d6138bbfbc0fd8254252bb676dbfc
|
[
"MIT"
] | null | null | null |
ResourceMonitor.py
|
Bot-7037/Resource-Monitor
|
44c96606784d6138bbfbc0fd8254252bb676dbfc
|
[
"MIT"
] | null | null | null |
from termcolor import colored
import GetInfo
import time
import os
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--columns", default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,n_threads")
parser.add_argument("-s", "--sort-by", dest="sort_by",default="memory_usage")
parser.add_argument("--ascending", action="store_true")
parser.add_argument("-n", default=20)
parser.add_argument("-u", "--live-update", action="store_true")
parser.add_argument("--kill", dest="process_to_close")
parser.add_argument("--after", dest="duration", default=0)
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.ascending
n = int(args.n)
live_update = args.live_update
kill = args.process_to_close
duration = int(args.duration)
# Fix terminal size
if 'nt' in os.name:
while(1):
(width, height) = os.get_terminal_size()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("cls")
else:
while(1):
height, width = os.popen('stty size', 'r').read().split()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("clear")
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
print_header()
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
draw_graph()
while live_update:
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
os.system("cls") if "nt" in os.name else os.system("clear")
print_header()
if n == 0:
print(colored(df.to_string(), 'red','on_white'))
elif n > 0:
print(colored(df.head(n).to_string(), 'red','on_white'))
draw_graph()
time.sleep(1)
if(kill):
kill_process(df.head(n).to_string(), kill, duration*60)
| 35
| 134
| 0.575
|
import psutil
import pandas as pd
from datetime import datetime
from termcolor import colored
import GetInfo
import Notify
import time
import os
def print_header():
print("╔"+"═"*117,end="╗\n║")
print(colored("\t\t\t\t\t\t[= RESOURCE MONITOR =]\t\t\t\t\t\t ", "cyan", attrs=['bold']),end="║\n")
print("╚"+"═"*117+"╝")
def construct_dataframe(processes):
df = pd.DataFrame(processes)
df.set_index('pid', inplace=True)
df.sort_values(sort_by, inplace=True, ascending=descending)
df['memory_usage'] = df['memory_usage'].apply(get_size)
df['write_bytes'] = df['write_bytes'].apply(get_size)
df['read_bytes'] = df['read_bytes'].apply(get_size)
df['create_time'] = df['create_time'].apply(datetime.strftime, args=("%Y-%m-%d %H:%M:%S",)) # Correcting formats
df = df[columns.split(",")]
return df
def get_size(bytes):
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if bytes < 1024:
return f"{bytes:.2f}{unit}B"
bytes /= 1024
def kill_process(df, process, duration):
os.system("cls") if "nt" in os.name else os.system("clear")
while(duration > 0):
print(df)
print(f"Will close {process} in {duration} seconds")
duration-=1
time.sleep(1)
os.system("cls") if "nt" in os.name else os.system("clear")
if(duration == 60):
Notify.Notify("Attention", "Closing the {process} in a minute", 5)
duration -=5
for proc in psutil.process_iter():
if proc.name() == process:
proc.kill()
def draw_graph():
print("\n╔"+"═"*117,end="╗\n║")
# Print CPU Graph
cpu_usage = df['cpu_usage'].sum()
if(cpu_usage>100): cpu_usage=100
if(cpu_usage<1): cpu_usage=1
text = "CPU Usage\t"+"█"*int(cpu_usage) + int(100-cpu_usage+2)*" "
print(colored(text, "magenta", attrs=['bold']),end=" ║\n║")
#Print Memory graph
RAM = round(psutil.virtual_memory().total / (1024.0 **2))
def get_number(x):
if('MB' in x): return float(x[:-2])
else: return float(x[:-2])/1024
RAM_usage = df['memory_usage'].apply(get_number)
RAM_usage = (RAM_usage.sum())*100
text = "Memory Usage \t"+"█"*int(RAM_usage/ RAM) + int(100-int(RAM_usage/ RAM)+2)*" "
print(colored(text, "green", attrs=['bold']),end="║\n")
print("╚"+"═"*117+"╝")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--columns", default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,n_threads")
parser.add_argument("-s", "--sort-by", dest="sort_by",default="memory_usage")
parser.add_argument("--ascending", action="store_true")
parser.add_argument("-n", default=20)
parser.add_argument("-u", "--live-update", action="store_true")
parser.add_argument("--kill", dest="process_to_close")
parser.add_argument("--after", dest="duration", default=0)
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.ascending
n = int(args.n)
live_update = args.live_update
kill = args.process_to_close
duration = int(args.duration)
# Fix terminal size
if 'nt' in os.name:
while(1):
(width, height) = os.get_terminal_size()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("cls")
else:
while(1):
height, width = os.popen('stty size', 'r').read().split()
if(int(height) < 33 or int(width)<120):
print(colored("Terminal size too small. Resize the terminal", 'red', attrs=['bold']))
else:
break
time.sleep(0.5)
os.system("clear")
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
print_header()
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
draw_graph()
while live_update:
processes = GetInfo.get_processes_info()
df = construct_dataframe(processes)
os.system("cls") if "nt" in os.name else os.system("clear")
print_header()
if n == 0:
print(colored(df.to_string(), 'red','on_white'))
elif n > 0:
print(colored(df.head(n).to_string(), 'red','on_white'))
draw_graph()
time.sleep(1)
if(kill):
kill_process(df.head(n).to_string(), kill, duration*60)
| 60
| 0
| 0
| 0
| 0
| 2,105
| 0
| -10
| 217
|
8d272e209965eda5ff24423747a5f6f4591b2b0c
| 1,369
|
py
|
Python
|
shutdown.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 3
|
2018-09-18T03:29:33.000Z
|
2020-01-13T03:34:39.000Z
|
shutdown.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | null | null | null |
shutdown.py
|
liusl104/py_sync_binlog
|
33a67f545159767d38a522d28d2f79b3ac3802ca
|
[
"Apache-2.0"
] | 1
|
2022-01-25T09:39:17.000Z
|
2022-01-25T09:39:17.000Z
|
# encoding=utf8
from sync_binlog.output_log import logger as loging
import time
import sys
from sync_binlog.update_post import update_datetime
try:
import psutil
except ImportError:
print("psutil , pip install psutil ")
sys.exit(0)
# Shutdown complete
if __name__ == "__main__":
print("%sStarting shutdown..." % update_datetime())
shutdown_program()
time.sleep(3)
process_id = judgeprocess('startup.py')
if process_id is not False:
psutil.Process(process_id).kill()
print("%sShutdown complete" % update_datetime())
loging.info("Shutdown complete")
else:
print("%s" % update_datetime())
loging.info("")
| 24.890909
| 63
| 0.637692
|
# encoding=utf8
import os
import socket
from sync_binlog.output_log import logger as loging
import time
import sys
from sync_binlog.update_post import update_datetime
try:
import psutil
except ImportError:
print("psutil 模块不存在,请使用 pip install psutil 安装")
sys.exit(0)
def shutdown_program():
hostname = socket.gethostname()
if os.path.exists('%s.pid' % hostname):
os.remove('%s.pid' % hostname)
loging.info("Starting shutdown...")
else:
print('%s%s.pid 文件不存在' % (update_datetime(), hostname))
loging.warn('%s.pid 文件不存在' % hostname)
def judgeprocess(processname):
pl = psutil.pids()
for pid in pl:
try:
cmdlines = psutil.Process(pid).cmdline()
except Exception:
continue
for cmdline in cmdlines:
if processname in cmdline:
return pid
else:
return False
# Shutdown complete
if __name__ == "__main__":
print("%sStarting shutdown..." % update_datetime())
shutdown_program()
time.sleep(3)
process_id = judgeprocess('startup.py')
if process_id is not False:
psutil.Process(process_id).kill()
print("%sShutdown complete" % update_datetime())
loging.info("Shutdown complete")
else:
print("%s程序自动关闭,请手工检查" % update_datetime())
loging.info("程序自动关闭,请手工检查")
| 132
| 0
| 0
| 0
| 0
| 571
| 0
| -20
| 91
|
e1c34e1f2ca887b5b7509177103c082a02ee4201
| 337
|
py
|
Python
|
example/test/T7_duocaiyinyuebang.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | 2
|
2021-12-18T06:34:26.000Z
|
2022-01-05T05:08:47.000Z
|
example/test/T8_duocaiyinyuebang.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | null | null | null |
example/test/T8_duocaiyinyuebang.py
|
Michael8968/skulpt
|
15956a60398fac92ee1dab25bf661ffc003b2eaf
|
[
"MIT"
] | null | null | null |
import turtle
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.hideturtle()
turtle.pensize(12)
turtle.colormode(255)
s = 50
a = 0
for i in range(10):
turtle.pencolor(200-a, a, 100)
turtle.pu()
turtle.goto(25*i, 0)
turtle.pd()
turtle.forward(s)
a = a + 20
s = s + 10
turtle.done()
| 14.652174
| 34
| 0.62908
|
import turtle
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.hideturtle()
turtle.pensize(12)
turtle.colormode(255)
s = 50
a = 0
for i in range(10):
turtle.pencolor(200-a, a, 100)
turtle.pu()
turtle.goto(25*i, 0)
turtle.pd()
turtle.forward(s)
a = a + 20
s = s + 10
turtle.done()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d73f56d089f03fc6f77ef649bdcfbef43da2b862
| 1,144
|
py
|
Python
|
docs/scripts/cluster_add_k8s.py
|
pramaku/hpecp-python-library
|
55550a1e27259a3132ea0608e66719e9732fb081
|
[
"Apache-2.0"
] | null | null | null |
docs/scripts/cluster_add_k8s.py
|
pramaku/hpecp-python-library
|
55550a1e27259a3132ea0608e66719e9732fb081
|
[
"Apache-2.0"
] | null | null | null |
docs/scripts/cluster_add_k8s.py
|
pramaku/hpecp-python-library
|
55550a1e27259a3132ea0608e66719e9732fb081
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from hpecp import ContainerPlatformClient, APIException
from hpecp.k8s_cluster import K8sClusterHostConfig
import textwrap
client = ContainerPlatformClient(username='admin',
password='admin123',
api_host='127.0.0.1',
api_port=8080,
use_ssl=True,
verify_ssl='/certs/hpecp-ca-cert.pem')
client.create_session()
print( client.k8s_worker.get_k8shosts().tabulate() )
try:
k8shosts_config=[
K8sClusterHostConfig(4, 'worker'),
K8sClusterHostConfig(5, 'master')
]
k8s_cluster_id = client.k8s_cluster.create(name='def', description='my cluster', k8s_version='1.17.0', k8shosts_config=k8shosts_config)
print('creating cluster id: ' + k8s_cluster_id)
except APIException as e:
text = """APIException(
Backend API Response -> {}
HTTP Method -> {}
Request URL -> {}
Request Data -> [{}]
)"""
print( textwrap.dedent(text).format(e.message, e.request_method, e.request_url, e.request_data) )
| 35.75
| 139
| 0.604021
|
#!/usr/bin/env python3
from hpecp import ContainerPlatformClient, APIException
from hpecp.k8s_cluster import K8sClusterHostConfig
import textwrap
client = ContainerPlatformClient(username='admin',
password='admin123',
api_host='127.0.0.1',
api_port=8080,
use_ssl=True,
verify_ssl='/certs/hpecp-ca-cert.pem')
client.create_session()
print( client.k8s_worker.get_k8shosts().tabulate() )
try:
k8shosts_config=[
K8sClusterHostConfig(4, 'worker'),
K8sClusterHostConfig(5, 'master')
]
k8s_cluster_id = client.k8s_cluster.create(name='def', description='my cluster', k8s_version='1.17.0', k8shosts_config=k8shosts_config)
print('creating cluster id: ' + k8s_cluster_id)
except APIException as e:
text = """APIException(
Backend API Response -> {}
HTTP Method -> {}
Request URL -> {}
Request Data -> [{}]
)"""
print( textwrap.dedent(text).format(e.message, e.request_method, e.request_url, e.request_data) )
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ff3dcdc0b12675c40e2b4e49025d944716d7d7ae
| 1,195
|
py
|
Python
|
datasets/data_splitter.py
|
AjayMudhai/pytorch-CycleGAN-and-pix2pix
|
64fcf0b926e2125042a559b0fb6a4a57559923c2
|
[
"BSD-3-Clause"
] | null | null | null |
datasets/data_splitter.py
|
AjayMudhai/pytorch-CycleGAN-and-pix2pix
|
64fcf0b926e2125042a559b0fb6a4a57559923c2
|
[
"BSD-3-Clause"
] | null | null | null |
datasets/data_splitter.py
|
AjayMudhai/pytorch-CycleGAN-and-pix2pix
|
64fcf0b926e2125042a559b0fb6a4a57559923c2
|
[
"BSD-3-Clause"
] | null | null | null |
# wbg_pth='/datadrive/Reflection/training_data/wbg'
# img_pth='/datadrive/Reflection/training_data/images'
# dst_pth='/datadrive/Reflection/training_data/valB'
# move_data(wbg_pth,img_pth,dst_pth)
wbg_pth='/datadrive/Reflection/training_data/wbg'
trainA_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainA'
imgs_pth='/datadrive/Reflection/training_data/images'
trainB_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainB'
move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth)
| 35.147059
| 73
| 0.702092
|
import os
import shutil
def move_data(wbg_pth,img_pth,dst_pth):
for root,dirs,files in os.walk(wbg_pth):
for file in files:
op=os.path.join(img_pth,file)
nnp=os.path.join(dst_pth,file)
shutil.move(op,nnp)
# wbg_pth='/datadrive/Reflection/training_data/wbg'
# img_pth='/datadrive/Reflection/training_data/images'
# dst_pth='/datadrive/Reflection/training_data/valB'
# move_data(wbg_pth,img_pth,dst_pth)
def move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth):
for root,dirs,files in os.walk(wbg_pth):
for file in files:
ta_op=os.path.join(root,file)
ta_nnp=os.path.join(trainA_pth,file)
tb_op=os.path.join(imgs_pth,file)
tb_nnp=os.path.join(trainB_pth,file)
if os.path.exists(tb_op):
shutil.move(ta_op,ta_nnp)
shutil.move(tb_op,tb_nnp)
wbg_pth='/datadrive/Reflection/training_data/wbg'
trainA_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainA'
imgs_pth='/datadrive/Reflection/training_data/images'
trainB_pth='/datadrive/pytorch-CycleGAN-and-pix2pix/datasets/cars/trainB'
move_data(wbg_pth,trainA_pth,imgs_pth,trainB_pth)
| 0
| 0
| 0
| 0
| 0
| 620
| 0
| -20
| 92
|
5679b81204b649dc0cd086786518ea8025d21ff4
| 2,711
|
py
|
Python
|
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azureml/_base_sdk_common/field_info.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""A class for storing the field information."""
| 30.806818
| 113
| 0.57986
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""A class for storing the field information."""
class _FieldInfo(object):
"""A class for storing the field information."""
def __init__(self, field_type, documentation, list_element_type=None, user_keys=False, serialized_name=None,
exclude_if_none=False):
"""Class FieldInfo constructor.
:param field_type: The data type of field.
:type field_type: object
:param documentation: The field information
:type documentation: str
:param list_element_type: The type of list element.
:type list_element_type: object
:param user_keys: user_keys=True, if keys in the value of the field are user keys.
user keys are not case normalized.
:type user_keys: bool
:param serialized_name:
:type serialized_name: str
:param exclude_if_none: Exclude from serialized output if value is None.
:type exclude_if_none: bool
"""
self._field_type = field_type
self._documentation = documentation
self._list_element_type = list_element_type
self._user_keys = user_keys
self._serialized_name = serialized_name
self._exclude_if_none = exclude_if_none
@property
def field_type(self):
"""Get field type.
:return: Returns the field type.
:rtype: object
"""
return self._field_type
@property
def documentation(self):
"""Return documentation.
:return: Returns the documentation.
:rtype: str
"""
return self._documentation
@property
def list_element_type(self):
"""Get list element type.
:return: Returns the list element type.
:rtype: object
"""
return self._list_element_type
@property
def user_keys(self):
"""Get user keys setting.
:return: Returns the user keys setting.
:rtype: bool
"""
return self._user_keys
@property
def serialized_name(self):
"""Get serialized name.
:return: Returns the serialized name.
:rtype: str
"""
return self._serialized_name
@property
def exclude_if_none(self):
"""Get whether to exclude None from serialized output.
:return: Returns whether to exclude None form serialized output.
:rtype: bool
"""
return self._exclude_if_none
| 0
| 1,085
| 0
| 1,367
| 0
| 0
| 0
| 0
| 24
|
6f3cd2cc7dc0d6471de7c34578d22cf3a32749f4
| 3,440
|
py
|
Python
|
GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
GmailWrapper_JE/venv/Lib/site-packages/google/auth/crypt/__init__.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cryptography helpers for verifying and signing messages.
The simplest way to verify signatures is using :func:`verify_signature`::
cert = open('certs.pem').read()
valid = crypt.verify_signature(message, signature, cert)
If you're going to verify many messages with the same certificate, you can use
:class:`RSAVerifier`::
cert = open('certs.pem').read()
verifier = crypt.RSAVerifier.from_string(cert)
valid = verifier.verify(message, signature)
To sign messages use :class:`RSASigner` with a private key::
private_key = open('private_key.pem').read()
signer = crypt.RSASigner.from_string(private_key)
signature = signer.sign(message)
The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
Note that these two classes are only available if your `cryptography` dependency
version is at least 1.4.0.
"""
import six
from google.auth.crypt import base
from google.auth.crypt import rsa
try:
from google.auth.crypt import es256
except ImportError: # pragma: NO COVER
es256 = None
if es256 is not None: # pragma: NO COVER
__all__ = [
"ES256Signer",
"ES256Verifier",
"RSASigner",
"RSAVerifier",
"Signer",
"Verifier",
]
else: # pragma: NO COVER
__all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
# Aliases to maintain the v1.0.0 interface, as the crypt module was split
# into submodules.
Signer = base.Signer
Verifier = base.Verifier
RSASigner = rsa.RSASigner
RSAVerifier = rsa.RSAVerifier
if es256 is not None: # pragma: NO COVER
ES256Signer = es256.ES256Signer
ES256Verifier = es256.ES256Verifier
def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
"""Verify an RSA or ECDSA cryptographic signature.
Checks that the provided ``signature`` was generated from ``bytes`` using
the private key associated with the ``cert``.
Args:
message (Union[str, bytes]): The plaintext message.
signature (Union[str, bytes]): The cryptographic signature to check.
certs (Union[Sequence, str, bytes]): The certificate or certificates
to use to check the signature.
verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
class to use for verification. This can be used to select different
algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
Returns:
bool: True if the signature is valid, otherwise False.
"""
if isinstance(certs, (six.text_type, six.binary_type)):
certs = [certs]
for cert in certs:
verifier = verifier_cls.from_string(cert)
if verifier.verify(message, signature):
return True
return False
| 34.059406
| 85
| 0.684302
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cryptography helpers for verifying and signing messages.
The simplest way to verify signatures is using :func:`verify_signature`::
cert = open('certs.pem').read()
valid = crypt.verify_signature(message, signature, cert)
If you're going to verify many messages with the same certificate, you can use
:class:`RSAVerifier`::
cert = open('certs.pem').read()
verifier = crypt.RSAVerifier.from_string(cert)
valid = verifier.verify(message, signature)
To sign messages use :class:`RSASigner` with a private key::
private_key = open('private_key.pem').read()
signer = crypt.RSASigner.from_string(private_key)
signature = signer.sign(message)
The code above also works for :class:`ES256Signer` and :class:`ES256Verifier`.
Note that these two classes are only available if your `cryptography` dependency
version is at least 1.4.0.
"""
import six
from google.auth.crypt import base
from google.auth.crypt import rsa
try:
from google.auth.crypt import es256
except ImportError: # pragma: NO COVER
es256 = None
if es256 is not None: # pragma: NO COVER
__all__ = [
"ES256Signer",
"ES256Verifier",
"RSASigner",
"RSAVerifier",
"Signer",
"Verifier",
]
else: # pragma: NO COVER
__all__ = ["RSASigner", "RSAVerifier", "Signer", "Verifier"]
# Aliases to maintain the v1.0.0 interface, as the crypt module was split
# into submodules.
Signer = base.Signer
Verifier = base.Verifier
RSASigner = rsa.RSASigner
RSAVerifier = rsa.RSAVerifier
if es256 is not None: # pragma: NO COVER
ES256Signer = es256.ES256Signer
ES256Verifier = es256.ES256Verifier
def verify_signature(message, signature, certs, verifier_cls=rsa.RSAVerifier):
"""Verify an RSA or ECDSA cryptographic signature.
Checks that the provided ``signature`` was generated from ``bytes`` using
the private key associated with the ``cert``.
Args:
message (Union[str, bytes]): The plaintext message.
signature (Union[str, bytes]): The cryptographic signature to check.
certs (Union[Sequence, str, bytes]): The certificate or certificates
to use to check the signature.
verifier_cls (Optional[~google.auth.crypt.base.Signer]): Which verifier
class to use for verification. This can be used to select different
algorithms, such as RSA or ECDSA. Default value is :class:`RSAVerifier`.
Returns:
bool: True if the signature is valid, otherwise False.
"""
if isinstance(certs, (six.text_type, six.binary_type)):
certs = [certs]
for cert in certs:
verifier = verifier_cls.from_string(cert)
if verifier.verify(message, signature):
return True
return False
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
139954145bee80fddd7c595da934e0de4728e34e
| 84
|
py
|
Python
|
hw7/ch16/automate_online-materials/sameNameError.py
|
JWiliams/csc221
|
0653dcb5f185e8517be9146e17b580f62d4930e6
|
[
"CC0-1.0"
] | null | null | null |
hw7/ch16/automate_online-materials/sameNameError.py
|
JWiliams/csc221
|
0653dcb5f185e8517be9146e17b580f62d4930e6
|
[
"CC0-1.0"
] | null | null | null |
hw7/ch16/automate_online-materials/sameNameError.py
|
JWiliams/csc221
|
0653dcb5f185e8517be9146e17b580f62d4930e6
|
[
"CC0-1.0"
] | null | null | null |
eggs = 'global'
spam()
| 14
| 24
| 0.571429
|
def spam():
print(eggs) # ERROR!
eggs = 'spam local'
eggs = 'global'
spam()
| 0
| 0
| 0
| 0
| 0
| 39
| 0
| 0
| 22
|
49e036fce5023369b407b2429dceb9099d47e801
| 1,209
|
py
|
Python
|
scripts/release_helper/utils.py
|
DavidZeLiang/azure-sdk-for-python
|
b343247adc7c3c7ff52d6eadeca6b57eb0a23047
|
[
"MIT"
] | null | null | null |
scripts/release_helper/utils.py
|
DavidZeLiang/azure-sdk-for-python
|
b343247adc7c3c7ff52d6eadeca6b57eb0a23047
|
[
"MIT"
] | null | null | null |
scripts/release_helper/utils.py
|
DavidZeLiang/azure-sdk-for-python
|
b343247adc7c3c7ff52d6eadeca6b57eb0a23047
|
[
"MIT"
] | null | null | null |
import logging
REQUEST_REPO = 'Azure/sdk-release-request'
REST_REPO = 'Azure/azure-rest-api-specs'
AUTO_ASSIGN_LABEL = 'assigned'
AUTO_PARSE_LABEL = 'auto-link'
_LOG = logging.getLogger(__name__)
| 31
| 87
| 0.635236
|
from github.Issue import Issue
from github.Repository import Repository
import logging
from typing import List
REQUEST_REPO = 'Azure/sdk-release-request'
REST_REPO = 'Azure/azure-rest-api-specs'
AUTO_ASSIGN_LABEL = 'assigned'
AUTO_PARSE_LABEL = 'auto-link'
_LOG = logging.getLogger(__name__)
def get_origin_link_and_tag(issue_body_list: List[str]) -> (str, str):
link, readme_tag = '', ''
for row in issue_body_list:
if 'link' in row.lower():
link = row.split(":", 1)[-1].strip()
if 'readme tag' in row.lower():
readme_tag = row.split(":", 1)[-1].strip()
if link and readme_tag:
break
if link.count('https') > 1:
link = link.split(']')[0]
link = link.replace('[', "").replace(']', "").replace('(', "").replace(')', "")
return link, readme_tag
class IssuePackage:
issue = None # origin issue instance
rest_repo = None # repo instance: Azure/azure-rest-api-specs
labels_name = {} # name set of issue labels
def __init__(self, issue: Issue, rest_repo: Repository):
self.issue = issue
self.rest_repo = rest_repo
self.labels_name = {label.name for label in issue.labels}
| 0
| 0
| 0
| 345
| 0
| 522
| 0
| 30
| 112
|
11de465540347c176ba804221d719eaf15627d08
| 2,350
|
py
|
Python
|
ex9/api_processor.py
|
Maheliusz/nlp_lab
|
49e5c9dfe81d94bac4323e044502d1b73c99ce3c
|
[
"MIT"
] | null | null | null |
ex9/api_processor.py
|
Maheliusz/nlp_lab
|
49e5c9dfe81d94bac4323e044502d1b73c99ce3c
|
[
"MIT"
] | null | null | null |
ex9/api_processor.py
|
Maheliusz/nlp_lab
|
49e5c9dfe81d94bac4323e044502d1b73c99ce3c
|
[
"MIT"
] | null | null | null |
import argparse
import os
import random
import sys
import time
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to text files with bills', required=True)
parser.add_argument('--count', type=int, help='How much files process', required=False, default=20)
args = parser.parse_args()
url = 'http://ws.clarin-pl.eu/nlprest2'
parsed = {}
id2file = {}
already_parsed = os.listdir(args.path + 'ner/')
count = min(args.count, 100 - len(already_parsed))
directory_contents = random.sample(list(filter(lambda entry: os.path.isfile(args.path + entry)
and entry not in already_parsed,
os.listdir(args.path))),
k=count)
for filename in directory_contents:
with open(args.path + filename, encoding='utf-8') as file:
response = requests.post(url=url + '/base/startTask',
json={'text': file.read(), 'lpmn': 'any2txt|wcrft2|liner2({"model":"n82"})',
'user': ''})
response_string = str(response.content).replace("b\'", "").replace("\'", "")
id2file[response_string] = filename
parsed[response_string] = {"value": None, "status": None}
print("{} read and sent".format(filename))
id_list = list(parsed.keys())
print("Finished reading files")
counter = 0
while len(id_list) > 0:
for id in id_list:
parsed[id] = requests.get(url=url + '/base/getStatus/' + str(id)).json()
if parsed[id]['status'] == 'DONE':
counter += 1
with open(args.path + 'ner/' + id2file[id], 'wb') as file:
for element in parsed[id]['value']:
# print(requests.get(url=url + '/base/download' + element['fileID']).content)
# file.write(str(requests.get(url=url + '/base/download' + element['fileID']).content)[2:-1])
file.write(requests.get(url=url + '/base/download' + element['fileID']).content)
id_list.remove(id)
print("{} finished".format(counter))
elif parsed[id]['status'] == 'ERROR':
print(parsed[id]['value'], file=sys.stderr)
exit(-1)
time.sleep(2)
print('{} docs left'.format(len(id_list)))
| 41.22807
| 113
| 0.570638
|
import argparse
import os
import random
import sys
import time
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, help='Path to text files with bills', required=True)
parser.add_argument('--count', type=int, help='How much files process', required=False, default=20)
args = parser.parse_args()
url = 'http://ws.clarin-pl.eu/nlprest2'
parsed = {}
id2file = {}
already_parsed = os.listdir(args.path + 'ner/')
count = min(args.count, 100 - len(already_parsed))
directory_contents = random.sample(list(filter(lambda entry: os.path.isfile(args.path + entry)
and entry not in already_parsed,
os.listdir(args.path))),
k=count)
for filename in directory_contents:
with open(args.path + filename, encoding='utf-8') as file:
response = requests.post(url=url + '/base/startTask',
json={'text': file.read(), 'lpmn': 'any2txt|wcrft2|liner2({"model":"n82"})',
'user': ''})
response_string = str(response.content).replace("b\'", "").replace("\'", "")
id2file[response_string] = filename
parsed[response_string] = {"value": None, "status": None}
print("{} read and sent".format(filename))
id_list = list(parsed.keys())
print("Finished reading files")
counter = 0
while len(id_list) > 0:
for id in id_list:
parsed[id] = requests.get(url=url + '/base/getStatus/' + str(id)).json()
if parsed[id]['status'] == 'DONE':
counter += 1
with open(args.path + 'ner/' + id2file[id], 'wb') as file:
for element in parsed[id]['value']:
# print(requests.get(url=url + '/base/download' + element['fileID']).content)
# file.write(str(requests.get(url=url + '/base/download' + element['fileID']).content)[2:-1])
file.write(requests.get(url=url + '/base/download' + element['fileID']).content)
id_list.remove(id)
print("{} finished".format(counter))
elif parsed[id]['status'] == 'ERROR':
print(parsed[id]['value'], file=sys.stderr)
exit(-1)
time.sleep(2)
print('{} docs left'.format(len(id_list)))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5dedd4b58a8257ac092a43187da5519e0e4f4069
| 732
|
py
|
Python
|
src/routes/v1/faculties.py
|
university-my/ultimate-schedule-api
|
6dbf2368da8751a8b6105c8d783a4b105f99866d
|
[
"MIT"
] | 5
|
2020-04-18T16:33:50.000Z
|
2021-09-30T09:24:56.000Z
|
src/routes/v1/faculties.py
|
university-my/ultimate-schedule-api
|
6dbf2368da8751a8b6105c8d783a4b105f99866d
|
[
"MIT"
] | 15
|
2020-04-18T13:03:26.000Z
|
2021-12-13T20:44:54.000Z
|
src/routes/v1/faculties.py
|
university-my/ultimate-schedule-api
|
6dbf2368da8751a8b6105c8d783a4b105f99866d
|
[
"MIT"
] | 2
|
2020-05-30T20:51:45.000Z
|
2021-09-28T10:32:12.000Z
|
from fastapi import APIRouter
tag = "Faculties"
router = APIRouter()
| 34.857143
| 85
| 0.79235
|
from fastapi import APIRouter
from src.utils.events import Events
from src.schemas.schema import x_schedule_header
from src.controllers.faculties_controller import get_all_faculties, is_faculty_exists
from src.utils.tracking import track
tag = "Faculties"
router = APIRouter()
@router.get("", tags=[tag])
@track(fmt="", event=Events.GET_ALL_FACULTIES)
async def faculties(*, schedule_url: str = x_schedule_header):
return await get_all_faculties(schedule_url=schedule_url)
@router.get("/exists", tags=[tag])
@track(fmt="query={query}", event=Events.IS_FACULTY_EXISTS)
async def faculty_exists(*, query: str, schedule_url: str = x_schedule_header):
return await is_faculty_exists(schedule_url=schedule_url, query=query)
| 0
| 406
| 0
| 0
| 0
| 0
| 0
| 120
| 134
|
20b200530b1cf1e5a75bb3eada9fd29120296117
| 11,062
|
py
|
Python
|
ckanext/ksext/controllers/MUser.py
|
WilJoey/ckanext-ksext
|
1f3383d34beb35702d5bf0799defa5398f207ce2
|
[
"MIT"
] | null | null | null |
ckanext/ksext/controllers/MUser.py
|
WilJoey/ckanext-ksext
|
1f3383d34beb35702d5bf0799defa5398f207ce2
|
[
"MIT"
] | null | null | null |
ckanext/ksext/controllers/MUser.py
|
WilJoey/ckanext-ksext
|
1f3383d34beb35702d5bf0799defa5398f207ce2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
import logging
from ckan.common import c, request
c = base.c
request = base.request
log = logging.getLogger(__name__)
| 37.120805
| 175
| 0.593835
|
# -*- coding: utf-8 -*-
import ckan.plugins as p
#from ckan.lib.base import BaseController, config
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.new_authz as new_authz
import ckan.lib.captcha as captcha
import ckan.lib.navl.dictization_functions as dictization_functions
import functools
import requests
from sqlalchemy import text
import logging
from pylons import config
from ckan.common import _, c, g, request, response
c = base.c
request = base.request
log = logging.getLogger(__name__)
class MUserController(base.BaseController):
def org_admin_update(self):
result = { "success": False }
user_id = request.POST.get('user', '')
dataset_id = request.POST.get('ds','')
self._update_org_admin(user_id, dataset_id)
result["success"]=True
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(result)
def _update_org_admin(self, user_id, dataset_id):
sql = "update package set creator_user_id=%s where id=%s ;"
model.meta.engine.execute(sql, user_id, dataset_id)
model.Session.commit()
def org_admin(self):
org_id = request.GET.get('org', '')
dataset_id = request.GET.get('ds','')
result = {
"org_users" : self._get_org_users(org_id),
"manager" : self._get_dataset_manager(dataset_id)
}
response.headers['Content-Type'] = 'application/json;charset=utf-8'
return h.json.dumps(result)
def _get_org_users(self, org_id):
sql = '''
select u.id, u.fullname as name from member m left join "user" u on u.id=m.table_id where m.group_id=:org_id and m.state='active' and m.table_name='user' and u.state='active';
'''
dt = model.Session.execute(sql, {'org_id': org_id}).fetchall()
result = [dict(row) for row in dt]
return result
def _get_dataset_manager(self, dataset_id):
sql ='''
select u.fullname as name, u.id from package p left join "user" u on p.creator_user_id=u.id where p.id=:dataset_id;
'''
dt = model.Session.execute(sql, {'dataset_id': dataset_id}).fetchall()
if (len(dt) == 0 ) :
return None
else :
return [dict(row) for row in dt]
def index (self):
LIMIT = 20
page = int(request.params.get('page', 1))
c.q = request.params.get('q', '')
c.order_by = request.params.get('order_by', 'name')
context = {'return_query': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'q': c.q,
'limit': LIMIT,
'offset': (page - 1) * LIMIT,
'order_by': c.order_by}
try:
logic.check_access('user_list', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
users_list = logic.get_action('user_list')(context, data_dict)
c.users = users_list
c.page = h.Page(
collection=users_list,
page=page,
url=h.pager_url,
item_count=users_list.count(),
items_per_page=LIMIT
)
return base.render('muser/index.html')
def new (self, data=None, errors=None, error_summary=None):
#q = model.Session.query(model.User).filter(model.User.sysadmin==True)
#c.sysadmins = [a.name for a in q.all()]
'''GET to display a form for registering a new user.
or POST the form data to actually do the user registration.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj,
'schema': self._new_form_to_db_schema(),
'save': 'save' in request.params}
c.is_sysadmin = new_authz.is_sysadmin(c.user)
if not c.user or not c.is_sysadmin:
return base.render('user/logout_first.html')
try:
logic.check_access('user_create', context)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create a user'))
if context['save'] and not data:
return self._save_new(context)
c.data = data or {}
c.errors = errors or {}
c.error_summary = error_summary or {}
#vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
#c.form = render(self.new_user_form, extra_vars=vars)
#return render('user/new.html')
return base.render('muser/new.html')
def _new_form_to_db_schema(self):
return schema.user_new_form_schema()
def _save_new(self, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
captcha.check_recaptcha(request)
user = logic.get_action('user_create')(context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to create user %s') % '')
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
h.flash_error(error_msg)
return self.new(data_dict)
except logic.ValidationError, e:
c.errors = e.error_dict
c.error_summary = e.error_summary
return self.new(data_dict, c.errors, c.error_summary)
# success
h.flash_success(_('User "%s" is now registered.') % (data_dict['name']))
#return base.render('user/logout_first.html')
return base.render('muser/new.html')
def edit(self, id=None, data=None, errors=None, error_summary=None):
context = {'save': 'save' in request.params,
'schema': self._edit_form_to_db_schema(),
'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj
}
if id is None:
base.abort(400, _('No user specified'))
if not new_authz.is_sysadmin(c.user):
base.abort(401, _('User %s not authorized to edit %s') % (str(c.user), id))
data_dict = {'id': id}
try:
logic.check_access('user_update', context, data_dict)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit a user.'))
if (context['save']) and not data:
return self._save_edit(id, context)
try:
old_data = logic.get_action('user_show')(context, data_dict)
schema = self._db_to_edit_form_schema()
if schema:
old_data, errors = validate(old_data, schema)
c.display_name = old_data.get('display_name')
c.user_name = old_data.get('name')
data = data or old_data
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % '')
except logic.NotFound:
base.abort(404, _('User not found'))
user_obj = context.get('user_obj')
errors = errors or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
self._setup_template_variables({'model': model,
'session': model.Session,
'user': c.user or c.author},
data_dict)
log.warn(vars.__repr__())
log.warn('muser edit: 1')
c.is_myself = True
c.show_email_notifications = h.asbool(
config.get('ckan.activity_streams_email_notifications'))
log.warn('muser edit: 2')
c.form = base.render('muser/edit_user_form.html', extra_vars=vars)
log.warn('muser edit: 3')
return base.render('muser/edit.html')
def _save_edit(self, id, context):
try:
data_dict = logic.clean_dict(dictization_functions.unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
# MOAN: Do I really have to do this here?
if 'activity_streams_email_notifications' not in data_dict:
data_dict['activity_streams_email_notifications'] = False
user = logic.get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
user_index = h.url_for(controller='ckanext.ksext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to edit user %s') % id)
except logic.NotFound, e:
base.abort(404, _('User not found'))
except dictization_functions.DataError:
base.abort(400, _(u'Integrity Error'))
except logic.ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def _setup_template_variables(self, context, data_dict):
c.is_sysadmin = new_authz.is_sysadmin(c.user)
try:
user_dict = logic.get_action('user_show')(context, data_dict)
except logic.NotFound:
base.abort(404, _('User not found'))
except logic.NotAuthorized:
base.abort(401, _('Not authorized to see this page'))
c.user_dict = user_dict
c.is_myself = user_dict['name'] == c.user
c.about_formatted = h.render_markdown(user_dict['about'])
def _db_to_edit_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _edit_form_to_db_schema(self):
return schema.user_edit_form_schema()
def delete(self, id):
'''Delete user with id passed as parameter'''
context = {'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
logic.get_action('user_delete')(context, data_dict)
h.flash_success(_('User deleted!'))
user_index = h.url_for(controller='ckanext.ksext.controllers.MUser:MUserController', action='index')
h.redirect_to(user_index)
except logic.NotAuthorized:
msg = _('Unauthorized to delete user with id "{user_id}".')
base.abort(401, msg.format(user_id=id))
| 0
| 0
| 0
| 10,430
| 0
| 0
| 0
| 25
| 156
|
a156f74140169b47890b5b7f16f2a1189fccdb1f
| 15,863
|
py
|
Python
|
pypeit/core/load.py
|
finagle29/PypeIt
|
418d6d24d24054ad590d2f06c0b4688ea18f492e
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/core/load.py
|
finagle29/PypeIt
|
418d6d24d24054ad590d2f06c0b4688ea18f492e
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/core/load.py
|
finagle29/PypeIt
|
418d6d24d24054ad590d2f06c0b4688ea18f492e
|
[
"BSD-3-Clause"
] | null | null | null |
""" Module for loading PypeIt files
"""
import os
import numpy as np
from astropy import units
from astropy.io import fits
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import linetools.utils
from pypeit import msgs
# TODO I don't think we need this routine
def load_ext_to_array(hdulist, ext_id, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
It will be called by load_1dspec_to_array.
Load one-d spectra from ext_id in the hdulist
Args:
hdulist: FITS HDU list
ext_id: extension name, i.e., 'SPAT1073-SLIT0001-DET03', 'OBJID0001-ORDER0003', 'OBJID0001-ORDER0002-DET01'
ex_value: 'OPT' or 'BOX'
flux_value: if True load fluxed data, else load unfluxed data
Returns:
tuple: Returns wave, flux, ivar, mask
'''
if (ex_value != 'OPT') and (ex_value != 'BOX'):
msgs.error('{:} is not recognized. Please change to either BOX or OPT.'.format(ex_value))
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_names = []
for ii in range(ntrace0):
idx_names.append(hdulist[ii+1].name) # idx name
# Initialize ext
ext = None
for indx in (idx_names):
if ext_id in indx:
ext = indx
if ext is None:
msgs.error('Can not find extension {:}.'.format(ext_id))
else:
hdu_iexp = hdulist[ext]
wave = hdu_iexp.data['{:}_WAVE'.format(ex_value)]
mask = hdu_iexp.data['{:}_MASK'.format(ex_value)]
# Mask Edges
if nmaskedge is not None:
mask[:int(nmaskedge)] = False
mask[-int(nmaskedge):] = False
if flux_value:
flux = hdu_iexp.data['{:}_FLAM'.format(ex_value)]
ivar = hdu_iexp.data['{:}_FLAM_IVAR'.format(ex_value)]
else:
msgs.warn('Loading unfluxed spectra')
flux = hdu_iexp.data['{:}_COUNTS'.format(ex_value)]
ivar = hdu_iexp.data['{:}_COUNTS_IVAR'.format(ex_value)]
return wave, flux, ivar, mask
# TODO merge this with unpack orders
def load_1dspec_to_array(fnames, gdobj=None, order=None, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
Load the spectra from the 1d fits file into arrays.
If Echelle, you need to specify which order you want to load.
It can NOT load all orders for Echelle data.
Args:
fnames (list): 1D spectra fits file(s)
gdobj (list): extension name (longslit/multislit) or objID (Echelle)
order (None or int): order number
ex_value (str): 'OPT' or 'BOX'
flux_value (bool): if True it will load fluxed spectra, otherwise load counts
Returns:
tuple: Returns the following:
- waves (ndarray): wavelength array of your spectra, see
below for the shape information of this array.
- fluxes (ndarray): flux array of your spectra
- ivars (ndarray): ivars of your spectra
- masks (ndarray, bool): mask array of your spectra
The shapes of all returns are exactly the same.
- Case 1: np.size(fnames)=np.size(gdobj)=1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a single fits file, they are 1D
arrays with the size equal to Nspec
- Case 2: np.size(fnames)=np.size(gdobj)>1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a list of fits files, 2D array,
the shapes are Nspec by Nexp
- Case 3: np.size(fnames)=np.size(gdobj)=1, order=None All
Echelle orders for a single fits file, 2D array, the
shapes are Nspec by Norders
- Case 4: np.size(fnames)=np.size(gdobj)>1, order=None All
Echelle orders for a list of fits files, 3D array, the
shapres are Nspec by Norders by Nexp
'''
# read in the first fits file
if isinstance(fnames, (list, np.ndarray)):
nexp = np.size(fnames)
fname0 = fnames[0]
elif isinstance(fnames, str):
nexp = 1
fname0 = fnames
hdulist = fits.open(fname0)
header = hdulist[0].header
npix = header['NPIX']
pypeline = header['PYPELINE']
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_orders = []
for ii in range(ntrace0):
idx_orders.append(int(hdulist[ii+1].name.split('-')[1][5:])) # slit ID or order ID
if pypeline == "Echelle":
## np.unique automatically sort the returned array which is not what I want!!!
## order_vec = np.unique(idx_orders)
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
norder = np.size(order_vec)
else:
norder = 1
#TODO This is unneccessarily complicated. The nexp=1 case does the same operations as the nexp > 1 case. Refactor
# this so that it just does the same set of operations once and then reshapes the array at the end to give you what
# you want. Let's merge this with unpack orders
## Loading data from a single fits file
if nexp == 1:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
waves = np.zeros((npix, norder,nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves, dtype=bool)
for ii, iord in enumerate(order_vec):
ext_id = gdobj[0]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist, ext_id, ex_value=ex_value,
flux_value=flux_value, nmaskedge=nmaskedge)
waves[:,ii,0] = wave_iord
fluxes[:,ii,0] = flux_iord
ivars[:,ii,0] = ivar_iord
masks[:,ii,0] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[0]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[0]
waves, fluxes, ivars, masks = load_ext_to_array(hdulist, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
## Loading data from a list of fits files
else:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
# store all orders into one single array
waves = np.zeros((npix, norder, nexp))
else:
# store a specific order or longslit
waves = np.zeros((npix, nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves,dtype=bool)
for iexp in range(nexp):
hdulist_iexp = fits.open(fnames[iexp])
# ToDo: The following part can be removed if all data are reduced using the leatest pipeline
if pypeline == "Echelle":
ntrace = np.size(hdulist_iexp) - 1
idx_orders = []
for ii in range(ntrace):
idx_orders.append(int(hdulist_iexp[ii + 1].name.split('-')[1][5:])) # slit ID or order ID
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
# ToDo: The above part can be removed if all data are reduced using the leatest pipeline
if (order is None) and (pypeline == "Echelle"):
for ii, iord in enumerate(order_vec):
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value,
nmaskedge = nmaskedge, flux_value=flux_value)
waves[:,ii,iexp] = wave_iord
fluxes[:,ii,iexp] = flux_iord
ivars[:,ii,iexp] = ivar_iord
masks[:,ii,iexp] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[iexp]
wave, flux, ivar, mask = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
waves[:, iexp] = wave
fluxes[:, iexp] = flux
ivars[:, iexp] = ivar
masks[:, iexp] = mask
return waves, fluxes, ivars, masks, header
def load_spec_order(fname,norder, objid=None, order=None, extract='OPT', flux=True):
"""
Loading single order spectrum from a PypeIt 1D specctrum fits file.
it will be called by ech_load_spec
Args:
fname (str) : The file name of your spec1d file
objid (str) : The id of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
if objid is None:
objid = 0
if order is None:
msgs.error('Please specify which order you want to load')
# read extension name into a list
primary_header = fits.getheader(fname, 0)
nspec = primary_header['NSPEC']
extnames = [primary_header['EXT0001']] * nspec
for kk in range(nspec):
extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
# Figure out which extension is the required data
extnames_array = np.reshape(np.array(extnames),(norder,int(nspec/norder)))
extnames_good = extnames_array[:,int(objid[3:])-1]
extname = extnames_good[order]
try:
exten = extnames.index(extname) + 1
msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
except:
msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))
spectrum = load_1dspec(fname, exten=exten, extract=extract, flux=flux)
# Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
# the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
np.abs(spectrum.flux) > 1e30,
spectrum.sig ** 2 > 1e10,
], axis=0)
# Sometimes Echelle spectra have zero wavelength
bad_wave = spectrum.wavelength < 1000.0*units.AA
bad_all = bad_flux + bad_wave
## trim bad part
wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
#if np.sum(bad_flux):
# msgs.warn("There are some bad flux values in this spectrum. Will zero them out and mask them (not ideal)")
# spectrum.data['flux'][spectrum.select][bad_flux] = 0.
# spectrum.data['sig'][spectrum.select][bad_flux] = 0.
return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
"""
Loading Echelle spectra from a list of PypeIt 1D spectrum fits files
Args:
files (str) : The list of file names of your spec1d file
objid (str) : The id (one per fits file) of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
nfiles = len(files)
if objid is None:
objid = ['OBJ0000'] * nfiles
elif len(objid) == 1:
objid = objid * nfiles
elif len(objid) != nfiles:
msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')
fname = files[0]
ext_first = fits.getheader(fname, 1)
ext_final = fits.getheader(fname, -1)
norder = abs(ext_final['ECHORDER'] - ext_first['ECHORDER']) + 1
msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
if norder <= 1:
msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')
# Load spectra
spectra_list = []
for ii, fname in enumerate(files):
if order is None:
msgs.info('Loading all orders into a gaint spectra')
for iord in range(norder):
spectrum = load_spec_order(fname, norder, objid=objid[ii],order=iord,extract=extract,flux=flux)
# Append
spectra_list.append(spectrum)
elif order >= norder:
msgs.error('order number cannot greater than the total number of orders')
else:
spectrum = load_spec_order(fname,norder, objid=objid[ii], order=order, extract=extract, flux=flux)
# Append
spectra_list.append(spectrum)
# Join into one XSpectrum1D object
spectra = collate(spectra_list)
# Return
return spectra
def load_sens_dict(filename):
"""
Load a full (all slit) wv_calib dict
Includes converting the JSON lists of particular items into ndarray
Fills self.wv_calib and self.par
Args:
filename (str): Master file
Returns:
dict or None: self.wv_calib
"""
# Does the master file exist?
if not os.path.isfile(filename):
msgs.warn("No sensfunc file found with filename {:s}".format(filename))
return None
else:
msgs.info("Loading sensfunc from file {:s}".format(filename))
sens_dict = linetools.utils.loadjson(filename)
# Recast a few items as arrays
for key in sens_dict.keys():
try:
int(key)
except ValueError:
continue
else:
for tkey in sens_dict[key].keys():
if isinstance(sens_dict[key][tkey], list):
sens_dict[key][tkey] = np.array(sens_dict[key][tkey])
return sens_dict
def load_multiext_fits(filename, ext):
"""
Load data and primary header from a multi-extension FITS file
Args:
filename (:obj:`str`):
Name of the file.
ext (:obj:`str`, :obj:`int`, :obj:`list`):
One or more file extensions with data to return. The
extension can be designated by its 0-indexed integer
number or its name.
Returns:
tuple: Returns the image data from each provided extension.
If return_header is true, the primary header is also
returned.
"""
# Format the input and set the tuple for an empty return
_ext = ext if isinstance(ext, list) else [ext]
n_ext = len(_ext)
# Open the file
hdu = fits.open(filename)
head0 = hdu[0].header
# Only one extension
if n_ext == 1:
data = hdu[_ext[0]].data.astype(np.float)
return data, head0
# Multiple extensions
data = tuple([None if hdu[k].data is None else hdu[k].data.astype(np.float) for k in _ext])
# Return
return data+(head0,)
| 38.040767
| 128
| 0.60348
|
""" Module for loading PypeIt files
"""
import os
import warnings
import numpy as np
from astropy import units
from astropy.time import Time
from astropy.io import fits
from astropy.table import Table
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import linetools.utils
from pypeit import msgs
from IPython import embed
from pypeit.core import parse
# TODO I don't think we need this routine
def load_ext_to_array(hdulist, ext_id, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
It will be called by load_1dspec_to_array.
Load one-d spectra from ext_id in the hdulist
Args:
hdulist: FITS HDU list
ext_id: extension name, i.e., 'SPAT1073-SLIT0001-DET03', 'OBJID0001-ORDER0003', 'OBJID0001-ORDER0002-DET01'
ex_value: 'OPT' or 'BOX'
flux_value: if True load fluxed data, else load unfluxed data
Returns:
tuple: Returns wave, flux, ivar, mask
'''
if (ex_value != 'OPT') and (ex_value != 'BOX'):
msgs.error('{:} is not recognized. Please change to either BOX or OPT.'.format(ex_value))
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_names = []
for ii in range(ntrace0):
idx_names.append(hdulist[ii+1].name) # idx name
# Initialize ext
ext = None
for indx in (idx_names):
if ext_id in indx:
ext = indx
if ext is None:
msgs.error('Can not find extension {:}.'.format(ext_id))
else:
hdu_iexp = hdulist[ext]
wave = hdu_iexp.data['{:}_WAVE'.format(ex_value)]
mask = hdu_iexp.data['{:}_MASK'.format(ex_value)]
# Mask Edges
if nmaskedge is not None:
mask[:int(nmaskedge)] = False
mask[-int(nmaskedge):] = False
if flux_value:
flux = hdu_iexp.data['{:}_FLAM'.format(ex_value)]
ivar = hdu_iexp.data['{:}_FLAM_IVAR'.format(ex_value)]
else:
msgs.warn('Loading unfluxed spectra')
flux = hdu_iexp.data['{:}_COUNTS'.format(ex_value)]
ivar = hdu_iexp.data['{:}_COUNTS_IVAR'.format(ex_value)]
return wave, flux, ivar, mask
# TODO merge this with unpack orders
def load_1dspec_to_array(fnames, gdobj=None, order=None, ex_value='OPT', flux_value=True, nmaskedge=None):
'''
Load the spectra from the 1d fits file into arrays.
If Echelle, you need to specify which order you want to load.
It can NOT load all orders for Echelle data.
Args:
fnames (list): 1D spectra fits file(s)
gdobj (list): extension name (longslit/multislit) or objID (Echelle)
order (None or int): order number
ex_value (str): 'OPT' or 'BOX'
flux_value (bool): if True it will load fluxed spectra, otherwise load counts
Returns:
tuple: Returns the following:
- waves (ndarray): wavelength array of your spectra, see
below for the shape information of this array.
- fluxes (ndarray): flux array of your spectra
- ivars (ndarray): ivars of your spectra
- masks (ndarray, bool): mask array of your spectra
The shapes of all returns are exactly the same.
- Case 1: np.size(fnames)=np.size(gdobj)=1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a single fits file, they are 1D
arrays with the size equal to Nspec
- Case 2: np.size(fnames)=np.size(gdobj)>1, order=None for
Longslit or order=N (an int number) for Echelle
Longslit/single order for a list of fits files, 2D array,
the shapes are Nspec by Nexp
- Case 3: np.size(fnames)=np.size(gdobj)=1, order=None All
Echelle orders for a single fits file, 2D array, the
shapes are Nspec by Norders
- Case 4: np.size(fnames)=np.size(gdobj)>1, order=None All
Echelle orders for a list of fits files, 3D array, the
shapres are Nspec by Norders by Nexp
'''
# read in the first fits file
if isinstance(fnames, (list, np.ndarray)):
nexp = np.size(fnames)
fname0 = fnames[0]
elif isinstance(fnames, str):
nexp = 1
fname0 = fnames
hdulist = fits.open(fname0)
header = hdulist[0].header
npix = header['NPIX']
pypeline = header['PYPELINE']
# get the order/slit information
ntrace0 = np.size(hdulist)-1
idx_orders = []
for ii in range(ntrace0):
idx_orders.append(int(hdulist[ii+1].name.split('-')[1][5:])) # slit ID or order ID
if pypeline == "Echelle":
## np.unique automatically sort the returned array which is not what I want!!!
## order_vec = np.unique(idx_orders)
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
norder = np.size(order_vec)
else:
norder = 1
#TODO This is unneccessarily complicated. The nexp=1 case does the same operations as the nexp > 1 case. Refactor
# this so that it just does the same set of operations once and then reshapes the array at the end to give you what
# you want. Let's merge this with unpack orders
## Loading data from a single fits file
if nexp == 1:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
waves = np.zeros((npix, norder,nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves, dtype=bool)
for ii, iord in enumerate(order_vec):
ext_id = gdobj[0]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist, ext_id, ex_value=ex_value,
flux_value=flux_value, nmaskedge=nmaskedge)
waves[:,ii,0] = wave_iord
fluxes[:,ii,0] = flux_iord
ivars[:,ii,0] = ivar_iord
masks[:,ii,0] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[0]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[0]
waves, fluxes, ivars, masks = load_ext_to_array(hdulist, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
## Loading data from a list of fits files
else:
# initialize arrays
if (order is None) and (pypeline == "Echelle"):
# store all orders into one single array
waves = np.zeros((npix, norder, nexp))
else:
# store a specific order or longslit
waves = np.zeros((npix, nexp))
fluxes = np.zeros_like(waves)
ivars = np.zeros_like(waves)
masks = np.zeros_like(waves,dtype=bool)
for iexp in range(nexp):
hdulist_iexp = fits.open(fnames[iexp])
# ToDo: The following part can be removed if all data are reduced using the leatest pipeline
if pypeline == "Echelle":
ntrace = np.size(hdulist_iexp) - 1
idx_orders = []
for ii in range(ntrace):
idx_orders.append(int(hdulist_iexp[ii + 1].name.split('-')[1][5:])) # slit ID or order ID
dum, order_vec_idx = np.unique(idx_orders, return_index=True)
order_vec = np.array(idx_orders)[np.sort(order_vec_idx)]
# ToDo: The above part can be removed if all data are reduced using the leatest pipeline
if (order is None) and (pypeline == "Echelle"):
for ii, iord in enumerate(order_vec):
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(iord)
wave_iord, flux_iord, ivar_iord, mask_iord = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value,
nmaskedge = nmaskedge, flux_value=flux_value)
waves[:,ii,iexp] = wave_iord
fluxes[:,ii,iexp] = flux_iord
ivars[:,ii,iexp] = ivar_iord
masks[:,ii,iexp] = mask_iord
else:
if pypeline == "Echelle":
ext_id = gdobj[iexp]+'-ORDER{:04d}'.format(order)
else:
ext_id = gdobj[iexp]
wave, flux, ivar, mask = load_ext_to_array(hdulist_iexp, ext_id, ex_value=ex_value, flux_value=flux_value,
nmaskedge=nmaskedge)
waves[:, iexp] = wave
fluxes[:, iexp] = flux
ivars[:, iexp] = ivar
masks[:, iexp] = mask
return waves, fluxes, ivars, masks, header
def load_spec_order(fname,norder, objid=None, order=None, extract='OPT', flux=True):
"""
Loading single order spectrum from a PypeIt 1D specctrum fits file.
it will be called by ech_load_spec
Args:
fname (str) : The file name of your spec1d file
objid (str) : The id of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
if objid is None:
objid = 0
if order is None:
msgs.error('Please specify which order you want to load')
# read extension name into a list
primary_header = fits.getheader(fname, 0)
nspec = primary_header['NSPEC']
extnames = [primary_header['EXT0001']] * nspec
for kk in range(nspec):
extnames[kk] = primary_header['EXT' + '{0:04}'.format(kk + 1)]
# Figure out which extension is the required data
extnames_array = np.reshape(np.array(extnames),(norder,int(nspec/norder)))
extnames_good = extnames_array[:,int(objid[3:])-1]
extname = extnames_good[order]
try:
exten = extnames.index(extname) + 1
msgs.info("Loading extension {:s} of spectrum {:s}".format(extname, fname))
except:
msgs.error("Spectrum {:s} does not contain {:s} extension".format(fname, extname))
spectrum = load_1dspec(fname, exten=exten, extract=extract, flux=flux)
# Polish a bit -- Deal with NAN, inf, and *very* large values that will exceed
# the floating point precision of float32 for var which is sig**2 (i.e. 1e38)
bad_flux = np.any([np.isnan(spectrum.flux), np.isinf(spectrum.flux),
np.abs(spectrum.flux) > 1e30,
spectrum.sig ** 2 > 1e10,
], axis=0)
# Sometimes Echelle spectra have zero wavelength
bad_wave = spectrum.wavelength < 1000.0*units.AA
bad_all = bad_flux + bad_wave
## trim bad part
wave_out,flux_out,sig_out = spectrum.wavelength[~bad_all],spectrum.flux[~bad_all],spectrum.sig[~bad_all]
spectrum_out = XSpectrum1D.from_tuple((wave_out,flux_out,sig_out), verbose=False)
#if np.sum(bad_flux):
# msgs.warn("There are some bad flux values in this spectrum. Will zero them out and mask them (not ideal)")
# spectrum.data['flux'][spectrum.select][bad_flux] = 0.
# spectrum.data['sig'][spectrum.select][bad_flux] = 0.
return spectrum_out
def ech_load_spec(files,objid=None,order=None,extract='OPT',flux=True):
"""
Loading Echelle spectra from a list of PypeIt 1D spectrum fits files
Args:
files (str) : The list of file names of your spec1d file
objid (str) : The id (one per fits file) of the object you want to load. (default is the first object)
order (int) : which order you want to load (default is None, loading all orders)
extract (str) : 'OPT' or 'BOX'
flux (bool) : default is True, loading fluxed spectra
Returns:
XSpectrum1D: spectrum_out
"""
nfiles = len(files)
if objid is None:
objid = ['OBJ0000'] * nfiles
elif len(objid) == 1:
objid = objid * nfiles
elif len(objid) != nfiles:
msgs.error('The length of objid should be either 1 or equal to the number of spectra files.')
fname = files[0]
ext_first = fits.getheader(fname, 1)
ext_final = fits.getheader(fname, -1)
norder = abs(ext_final['ECHORDER'] - ext_first['ECHORDER']) + 1
msgs.info('spectrum {:s} has {:d} orders'.format(fname, norder))
if norder <= 1:
msgs.error('The number of orders have to be greater than one for echelle. Longslit data?')
# Load spectra
spectra_list = []
for ii, fname in enumerate(files):
if order is None:
msgs.info('Loading all orders into a gaint spectra')
for iord in range(norder):
spectrum = load_spec_order(fname, norder, objid=objid[ii],order=iord,extract=extract,flux=flux)
# Append
spectra_list.append(spectrum)
elif order >= norder:
msgs.error('order number cannot greater than the total number of orders')
else:
spectrum = load_spec_order(fname,norder, objid=objid[ii], order=order, extract=extract, flux=flux)
# Append
spectra_list.append(spectrum)
# Join into one XSpectrum1D object
spectra = collate(spectra_list)
# Return
return spectra
def load_sens_dict(filename):
"""
Load a full (all slit) wv_calib dict
Includes converting the JSON lists of particular items into ndarray
Fills self.wv_calib and self.par
Args:
filename (str): Master file
Returns:
dict or None: self.wv_calib
"""
# Does the master file exist?
if not os.path.isfile(filename):
msgs.warn("No sensfunc file found with filename {:s}".format(filename))
return None
else:
msgs.info("Loading sensfunc from file {:s}".format(filename))
sens_dict = linetools.utils.loadjson(filename)
# Recast a few items as arrays
for key in sens_dict.keys():
try:
int(key)
except ValueError:
continue
else:
for tkey in sens_dict[key].keys():
if isinstance(sens_dict[key][tkey], list):
sens_dict[key][tkey] = np.array(sens_dict[key][tkey])
return sens_dict
def waveids(fname):
infile = fits.open(fname)
pixels=[]
msgs.info("Loading fitted arc lines")
try:
o = 1
while True:
pixels.append(infile[o].data.astype(np.float))
o+=1
except:
pass
return pixels
def load_multiext_fits(filename, ext):
"""
Load data and primary header from a multi-extension FITS file
Args:
filename (:obj:`str`):
Name of the file.
ext (:obj:`str`, :obj:`int`, :obj:`list`):
One or more file extensions with data to return. The
extension can be designated by its 0-indexed integer
number or its name.
Returns:
tuple: Returns the image data from each provided extension.
If return_header is true, the primary header is also
returned.
"""
# Format the input and set the tuple for an empty return
_ext = ext if isinstance(ext, list) else [ext]
n_ext = len(_ext)
# Open the file
hdu = fits.open(filename)
head0 = hdu[0].header
# Only one extension
if n_ext == 1:
data = hdu[_ext[0]].data.astype(np.float)
return data, head0
# Multiple extensions
data = tuple([None if hdu[k].data is None else hdu[k].data.astype(np.float) for k in _ext])
# Return
return data+(head0,)
| 0
| 0
| 0
| 0
| 0
| 246
| 0
| 24
| 133
|
a43b1675c8e19994c0be2bd6717197a732815f09
| 336
|
py
|
Python
|
welltory/admin.py
|
vasmedvedev/welltory_test
|
9dd1ea35850916a2203241798d0acd9415d762b7
|
[
"MIT"
] | null | null | null |
welltory/admin.py
|
vasmedvedev/welltory_test
|
9dd1ea35850916a2203241798d0acd9415d762b7
|
[
"MIT"
] | null | null | null |
welltory/admin.py
|
vasmedvedev/welltory_test
|
9dd1ea35850916a2203241798d0acd9415d762b7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from welltory import models
admin.site.register(models.Sleep, SleepAdmin)
admin.site.register(models.Steps, StepsAdmin)
admin.site.register(models.Geo, GeoAdmin)
| 16.8
| 45
| 0.779762
|
from django.contrib import admin
from welltory import models
class SleepAdmin(admin.ModelAdmin):
pass
class StepsAdmin(admin.ModelAdmin):
pass
class GeoAdmin(admin.ModelAdmin):
pass
admin.site.register(models.Sleep, SleepAdmin)
admin.site.register(models.Steps, StepsAdmin)
admin.site.register(models.Geo, GeoAdmin)
| 0
| 0
| 0
| 67
| 0
| 0
| 0
| 0
| 69
|
14fe2fcea35b1656d97913ad5e54df7ffd928511
| 125
|
py
|
Python
|
app/recipies/admin.py
|
sourabhsinha396/Rest-api-recipie
|
a9937d5119c706d1193654ece280ed46b599a344
|
[
"MIT"
] | null | null | null |
app/recipies/admin.py
|
sourabhsinha396/Rest-api-recipie
|
a9937d5119c706d1193654ece280ed46b599a344
|
[
"MIT"
] | 9
|
2021-03-30T14:10:47.000Z
|
2021-09-22T19:29:50.000Z
|
app/recipies/admin.py
|
sourabhsinha396/Rest-api-recipie
|
a9937d5119c706d1193654ece280ed46b599a344
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Tag, Ingredient
admin.site.register(Tag)
admin.site.register(Ingredient)
| 25
| 34
| 0.832
|
from django.contrib import admin
from .models import Tag,Ingredient
admin.site.register(Tag)
admin.site.register(Ingredient)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -1
| 0
|
1471e53db5b729dc1c33d31786115ca692ce7cf8
| 671
|
py
|
Python
|
RunProject.py
|
badeaadi/Colorization
|
b18d4267aa611b7e87ddfe8a32fef834259e8a45
|
[
"MIT"
] | null | null | null |
RunProject.py
|
badeaadi/Colorization
|
b18d4267aa611b7e87ddfe8a32fef834259e8a45
|
[
"MIT"
] | null | null | null |
RunProject.py
|
badeaadi/Colorization
|
b18d4267aa611b7e87ddfe8a32fef834259e8a45
|
[
"MIT"
] | null | null | null |
"""
PROIECT
Colorarea imaginilor folosind autoencoder si invatarea automata
Badea Adrian Catalin, grupa 334, anul III, FMI
"""
data_set: DataSet = DataSet()
data_set.scene_name = 'forest'
ae_model: AeModel = AeModel(data_set)
ae_model.define_the_model()
ae_model.compile_the_model()
ae_model.train_the_model()
ae_model.evaluate_the_model()
data_set: DataSet = DataSet()
data_set.scene_name = 'coast'
ae_model: AeModel = AeModel(data_set)
ae_model.define_the_model()
ae_model.compile_the_model()
ae_model.train_the_model()
ae_model.evaluate_the_model()
| 19.735294
| 68
| 0.730253
|
"""
PROIECT
Colorarea imaginilor folosind autoencoder si invatarea automata
Badea Adrian Catalin, grupa 334, anul III, FMI
"""
import pdb
from DataSet import *
from AeModel import *
data_set: DataSet = DataSet()
data_set.scene_name = 'forest'
ae_model: AeModel = AeModel(data_set)
ae_model.define_the_model()
ae_model.compile_the_model()
ae_model.train_the_model()
ae_model.evaluate_the_model()
data_set: DataSet = DataSet()
data_set.scene_name = 'coast'
ae_model: AeModel = AeModel(data_set)
ae_model.define_the_model()
ae_model.compile_the_model()
ae_model.train_the_model()
ae_model.evaluate_the_model()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -11
| 71
|
8f2f930b5a2dacd7576259cb4a0ce498b4941c73
| 359
|
py
|
Python
|
ch02/similarity.py
|
YaGiNA/DLfS2
|
3dbaba7a62c198b50849de2e3b74d92897a4cae7
|
[
"MIT"
] | 1
|
2019-05-15T09:17:23.000Z
|
2019-05-15T09:17:23.000Z
|
ch02/similarity.py
|
YaGiNA/DLfS2
|
3dbaba7a62c198b50849de2e3b74d92897a4cae7
|
[
"MIT"
] | null | null | null |
ch02/similarity.py
|
YaGiNA/DLfS2
|
3dbaba7a62c198b50849de2e3b74d92897a4cae7
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from common.util import preprocess, create_co_matrix, cos_similarity
text = "You say goobye and I say hello."
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
c0 = C[word_to_id["you"]]
c1 = C[word_to_id["i"]]
print(cos_similarity(c0, c1))
| 25.642857
| 69
| 0.721448
|
import sys
sys.path.append("..")
from common.util import preprocess, create_co_matrix, cos_similarity
text = "You say goobye and I say hello."
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
c0 = C[word_to_id["you"]]
c1 = C[word_to_id["i"]]
print(cos_similarity(c0, c1))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
69b5c654c37a2e2d72fe9be3cc0958d62171dbd6
| 9,814
|
py
|
Python
|
pydht/pydht.py
|
scottcunningham/pydht
|
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-01-04T07:02:54.000Z
|
2015-01-04T07:02:54.000Z
|
pydht/pydht.py
|
scottcunningham/pydht
|
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
pydht/pydht.py
|
scottcunningham/pydht
|
9a2ecfc8da3794b2dc6587d17b8d51337a8e7df4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
k = 20
alpha = 3
id_bits = 128
iteration_sleep = 1
keysize = 2048
DEFAULT_TTL = 604800 # = 7 days, in seconds.
| 39.099602
| 151
| 0.635011
|
import math
import json
import random
import uuid
import SocketServer
import threading
import time
import key_derivation
from .bucketset import BucketSet
from .hashing import hash_function, random_id
from .peer import Peer
from .shortlist import Shortlist
k = 20
alpha = 3
id_bits = 128
iteration_sleep = 1
keysize = 2048
DEFAULT_TTL = 604800 # = 7 days, in seconds.
class DHTRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
message = json.loads(self.request[0].strip())
message_type = message["message_type"]
print "Received message of type", message_type, "from", message["peer_id"]
if message_type == "ping":
self.handle_ping(message)
elif message_type == "pong":
self.handle_pong(message)
elif message_type == "find_node":
self.handle_find(message)
elif message_type == "find_value":
self.handle_find(message, find_value=True)
elif message_type == "found_nodes":
self.handle_found_nodes(message)
elif message_type == "found_value":
self.handle_found_value(message)
elif message_type == "store":
print "Request to store"
self.handle_store(message)
elif message_type == "downvote":
print "Asked to downvote an item"
self.handle_downvote(message)
except KeyError, ValueError:
pass
client_host, client_port = self.client_address
peer_id = message["peer_id"]
new_peer = Peer(client_host, client_port, peer_id)
self.server.dht.buckets.insert(new_peer)
def handle_ping(self, message):
client_host, client_port = self.client_address
id = message["peer_id"]
peer = Peer(client_host, client_port, id)
peer.pong(socket=self.server.socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_pong(self, message):
pass
def handle_find(self, message, find_value=False):
key = message["id"]
id = message["peer_id"]
client_host, client_port = self.client_address
peer = Peer(client_host, client_port, id)
response_socket = self.request[1]
if find_value and (key in self.server.dht.data):
value = self.server.dht.data[key]
peer.found_value(id, value, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
else:
nearest_nodes = self.server.dht.buckets.nearest_nodes(id)
if not nearest_nodes:
nearest_nodes.append(self.server.dht.peer)
nearest_nodes = [nearest_peer.astriple() for nearest_peer in nearest_nodes]
peer.found_nodes(id, nearest_nodes, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_found_nodes(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
del self.server.dht.rpc_ids[rpc_id]
nearest_nodes = [Peer(*peer) for peer in message["nearest_nodes"]]
shortlist.update(nearest_nodes)
def handle_found_value(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
del self.server.dht.rpc_ids[rpc_id]
shortlist.set_complete(message["value"])
def handle_store(self, message):
key = message["id"]
print "Asked to store data for id", key
print "Ciphertext is", message["value"]
self.server.dht.data[key] = message["value"]
self.server.dht.ttls[key] = DEFAULT_TTL
def handle_downvote(self, message):
key = message["id"]
print "Downvote for key", key, " -- uuid is ", message["uid"]
self.server.dht.handle_downvote(key, uuid)
class DHTServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
def __init__(self, host_address, handler_cls):
SocketServer.UDPServer.__init__(self, host_address, handler_cls)
self.send_lock = threading.Lock()
class DHT(object):
def __init__(self, host, port, id=None, boot_host=None, boot_port=None):
if not id:
id = random_id()
self.id = id
self.peer = Peer(unicode(host), port, id)
# Data and data decay data structures
self.data = {}
self.recent_downvotes = []
self.downvotes = {}
self.ttls = {}
self.pending_replies = {}
self.buckets = BucketSet(k, id_bits, self.peer.id)
self.rpc_ids = {} # should probably have a lock for this
self.server = DHTServer(self.peer.address(), DHTRequestHandler)
self.server.dht = self
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.bootstrap(unicode(boot_host), boot_port)
def iterative_find_nodes(self, key, boot_peer=None):
shortlist = Shortlist(k, key)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
if boot_peer:
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
boot_peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id)
while (not shortlist.complete()) or boot_peer:
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) ######
time.sleep(iteration_sleep)
boot_peer = None
return shortlist.results()
def iterative_find_value(self, key):
shortlist = Shortlist(k, key)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
while not shortlist.complete():
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_value(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) #####
time.sleep(iteration_sleep)
return shortlist.completion_result()
def bootstrap(self, boot_host, boot_port):
if boot_host and boot_port:
boot_peer = Peer(boot_host, boot_port, 0)
self.iterative_find_nodes(self.peer.id, boot_peer=boot_peer)
def __getitem__(self, key):
hashed_key = hash_function(key)
if hashed_key in self.data:
return self.data[hashed_key]
result = self.iterative_find_value(hashed_key)
if result:
return result
raise KeyError
def __setitem__(self, key, value):
hashed_key = hash_function(key)
nearest_nodes = self.iterative_find_nodes(hashed_key)
if not nearest_nodes:
self.data[hashed_key] = value
for node in nearest_nodes:
node.store(hashed_key, value, socket=self.server.socket, peer_id=self.peer.id)
def publish(self, value):
key = str(uuid.uuid4())
print "Publishing content under new key:", key
hashed_key = hash_function(key)
print "Hashed key is:", hashed_key
# need to encrypt value
ciphertext = key_derivation.do_encrypt(key, value)
print "Cyphertext is:", ciphertext
nearest_nodes = self.iterative_find_nodes(hashed_key)
if not nearest_nodes:
print "Storing data for key {} locally".format(key)
self.data[hashed_key] = ciphertext
for node in nearest_nodes:
print "Sending data for key {} to closer nodes.".format(key)
node.store(hashed_key, ciphertext, socket=self.server.socket, peer_id=self.peer.id)
return key
def retrieve(self, key):
# Retrieve result
print "Looking up key:", key
hashed_key = hash_function(key)
print "Hashed key is", hashed_key
result = None
if hashed_key in self.data:
print "Data for key", "stored locally"
result = self.data[hashed_key]
else:
print "Data stored somewhere else: forwarding request"
result = self.iterative_find_value(hashed_key)
if not result:
print "Key", key, "not found"
raise KeyError
# result is encrypted + hmac'd
# Can throw ValueError if HMAC fails
print "Ciphertext is", result
plaintext = key_derivation.do_decrypt(key, result)
return plaintext
def downvote(self, key):
uid = str(uuid.uuid4())
hashed_key = hash_function(key)
nearest_nodes = self.iterative_find_nodes(hashed_key)
print "Downvoting", key
if not nearest_nodes:
print "Asked myself to downvote a key: {}".format(key)
for node in nearest_nodes:
print "Asking another node to downvote", key
node.downvote(hashed_key, uid, socket=self.server.socket, peer_id=self.peer.id)
def handle_downvote(self, key, uuid):
if uuid in self.recent_downvotes:
return
if key not in self.data:
return
self.downvotes[key] += 1
self.recent_downvotes.append(uuid)
def tick(self):
for (uuid, downvotes) in self.downvotes.items():
downvote_val = math.log(downvotes, 2)
self.ttls[uuid] -= downvote_val
for (uuid, ttl) in self.ttls.items():
if ttl <= 0:
print "UUID", uuid, " past TTL - deleting"
| 0
| 0
| 0
| 9,374
| 0
| 0
| 0
| -8
| 334
|
60baa5af0a80d786e5af5dc6c9b0c70a33113f15
| 913
|
py
|
Python
|
mutability/mutability_exercise2.py
|
dipeshbabu/python-labs-codio
|
40ec0f398dc239b41d4105a8f95b35a22024b63e
|
[
"MIT"
] | 2
|
2021-11-18T01:42:00.000Z
|
2021-11-28T14:55:31.000Z
|
mutability/mutability_exercise2.py
|
dipeshbabu/python-labs-codio
|
40ec0f398dc239b41d4105a8f95b35a22024b63e
|
[
"MIT"
] | null | null | null |
mutability/mutability_exercise2.py
|
dipeshbabu/python-labs-codio
|
40ec0f398dc239b41d4105a8f95b35a22024b63e
|
[
"MIT"
] | null | null | null |
"""
Exercise 2
Using the same CelestialBody class, write a static method closer_to_sun that compares
two CelectialBody objects and returns the name of the object that is closes to the sun.
Expected Output
If the objects mercury and venus are compared, then the method would return Mercury.
"""
mercury = CelestialBody("Mercury", 4879.4, 57909000, 0)
venus = CelestialBody("Venus", 12103.6, 108160000, 0)
| 27.666667
| 87
| 0.665936
|
"""
Exercise 2
Using the same CelestialBody class, write a static method closer_to_sun that compares
two CelectialBody objects and returns the name of the object that is closes to the sun.
Expected Output
If the objects mercury and venus are compared, then the method would return Mercury.
"""
class CelestialBody:
"""Represents a celestial body"""
def __init__(self, name, diameter, distance, moons):
self.name = name
self.diameter = diameter
self.distance = distance
self.moons = moons
@staticmethod
def closer_to_sun(body1, body2):
"""
Returns the name of the body
that is closest to the sun
"""
if body1.distance < body2.distance:
return body1.name
else:
return body2.name
mercury = CelestialBody("Mercury", 4879.4, 57909000, 0)
venus = CelestialBody("Venus", 12103.6, 108160000, 0)
| 0
| 243
| 0
| 240
| 0
| 0
| 0
| 0
| 23
|
4b6841333d077341a6f1d6dc8abf6907abc1a552
| 6,442
|
py
|
Python
|
source/clusters/train_cluster.py
|
microsoft/Aura
|
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
|
[
"MIT"
] | 1
|
2022-03-02T00:21:33.000Z
|
2022-03-02T00:21:33.000Z
|
source/clusters/train_cluster.py
|
microsoft/Aura
|
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
|
[
"MIT"
] | null | null | null |
source/clusters/train_cluster.py
|
microsoft/Aura
|
d95ae0067bcd82e5952e8eed0e46b1a5eaaa7031
|
[
"MIT"
] | 2
|
2022-03-15T03:12:02.000Z
|
2022-03-20T20:49:02.000Z
|
import os
from azureml.core.run import Run
from ..utils.tfrecords import resize, parse_tfrecord
run = Run.get_context()
def get_data_from_tfrecords(args, num_replicas):
"""
Create a tf.data from tf records in args.train_dir/args.validation_dir
:param args:
:param num_replicas:
:return:
"""
num_frames = args.num_frames
num_mel = args.num_mel
num_labels = args.num_labels
batch_size = args.batch_size * num_replicas
autotune = tf.data.AUTOTUNE
train_filenames = tf.io.gfile.glob(f'{args.train_dir}/*.tfrec')
train_dataset = tf.data.TFRecordDataset(train_filenames, num_parallel_reads=autotune) \
.map(lambda example: parse_tfrecord(example,
num_mel=num_mel,
num_frames=num_frames,
snr=args.snr,
labels=args.labels),
num_parallel_calls=autotune) \
.map(lambda example: resize(example, num_frames=num_frames,
num_mel=num_mel,
num_labels=args.num_labels,
labels=args.labels,
snr=args.snr),
num_parallel_calls=autotune) \
.shuffle(10 * batch_size) \
.batch(batch_size) \
.prefetch(autotune) \
.cache()
return train_dataset
def get_model(args, num_replicas):
"""
Construct tensorflow model from checkpoint in args.path_model_tf
and data loader from args.data_dir
"""
model = globals()[args.model_name](nclass=args.num_labels)
if args.path_model_tf is not None:
model.load_weights(tf.train.latest_checkpoint(args.path_model_tf)).expect_partial()
cluster_algorithm = globals()[args.clustering_name](args.num_clusters, args.embed_dim)
clus = ClusterFeatureMap(cluster_algorithm, model, batch_size=args.batch_size * num_replicas)
clus.compile()
print('Compiling model done')
return clus
def train(args):
"""
Iterate over the batch in the dataset and learn the cluster centers
using args.clustering_name and args.model_name feature map.
:param args:
:return:
"""
if run._run_id.startswith("OfflineRun"):
run.number = 0
strategy = tf.distribute.MirroredStrategy()
save_dir = args.save_dir
save_dir = f'{save_dir}/{args.experiment_name}_{run.number}'
os.makedirs(save_dir, exist_ok=True)
with strategy.scope():
model = get_model(args, strategy.num_replicas_in_sync)
train_loader = get_data_from_tfrecords(args, strategy.num_replicas_in_sync)
model.fit(train_loader,
epochs=args.num_epochs,
callbacks=[SaveCluster(save_dir), UpdateCluster()])
| 34.084656
| 132
| 0.658025
|
import numpy as np
import os
from azureml.core.run import Run
from scipy.stats import entropy
from ..utils.tfrecords import resize, parse_tfrecord
from .kmeans import *
from ..models import *
run = Run.get_context()
class ClusterFeatureMap(tf.keras.Model):
""""
This is a clustering class with methods to allow batch clustering
of the latent representation generated by classifier
"""
def __init__(self, clustering, classifier, batch_size=16):
super().__init__()
self.clustering = clustering
self.classifier = classifier
self.batch_size = batch_size
def train_step(self, data):
noisy1, label = data[0], data[1]
_, latent = self.classifier.estimate(noisy1)
latent = tf.reduce_mean(latent, axis=(1))
def get_assign():
return self.clustering.assign(latent)
def get_initialize():
return self.clustering.initialize(latent)
centroid_assignment = tf.cond(self.clustering.initialized, get_assign, lambda: tf.zeros_like(latent[:, 0], dtype=tf.int64))
def get_update():
return self.clustering.update(latent, centroid_assignment, label)
l2_adjustment = self.clustering.compute_distance(latent, centroid_assignment)
labels_distance = self.clustering.compute_distance_labels(label, centroid_assignment)
tf.cond(self.clustering.initialized, get_update, get_initialize)
results = {'cluster_dispersion': tf.reduce_sum(l2_adjustment) / self.batch_size,
'cluster_label_distance': tf.reduce_sum(labels_distance) / self.batch_size}
return results
def call(self, data):
noisy1, label = data[0], data[1]
_, latent = self.classifier(noisy1)
latent = tf.reduce_mean(latent, axis=(1))
centroid_assignment = self.cluster.assign(latent)
return centroid_assignment
class SaveCluster(tf.keras.callbacks.Callback):
"""
A callback class for saving clusters
"""
def __init__(self, save_dir):
super().__init__()
self.save_dir = save_dir
def on_epoch_end(self, epoch, logs={}):
centroids = self.model.clustering.centroids.numpy()
labels = self.model.clustering.cluster_labels.numpy()
if hasattr(self.model.clustering, 'centroids_covariance'):
centroids_covariance = self.model.clustering.centroids_covariance.numpy()
np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels, covariance=centroids_covariance)
else:
np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels)
# -- label entropy per cluster
labels_without_zeros = labels[labels.sum(-1) > 0]
prob_labels = labels_without_zeros / labels_without_zeros.sum(-1)[:, None]
entropy_clusters = entropy(prob_labels, axis=1)
run.log('entropy_label', entropy_clusters.mean())
class UpdateCluster(tf.keras.callbacks.Callback):
"""
A callback class for updating centroid coordinates
"""
def __init__(self):
super().__init__()
def on_epoch_end(self, epoch, logs={}):
tf.cond(self.model.clustering.initialized, self.model.clustering.reset_centroids, lambda: None)
ch_index = self.model.clustering.compute_calinski_harabasz()
db_index = self.model.clustering.compute_davies_bouldin()
db_labels_index = self.model.clustering.compute_davies_bouldin_labels()
run.log('Calinski-Harabasz Index', float(ch_index))
run.log('Davies-Bouldin Index', float(db_index))
run.log('Davies-Bouldin Labels-Based Index', float(db_labels_index))
def get_data_from_tfrecords(args, num_replicas):
"""
Create a tf.data from tf records in args.train_dir/args.validation_dir
:param args:
:param num_replicas:
:return:
"""
num_frames = args.num_frames
num_mel = args.num_mel
num_labels = args.num_labels
batch_size = args.batch_size * num_replicas
autotune = tf.data.AUTOTUNE
train_filenames = tf.io.gfile.glob(f'{args.train_dir}/*.tfrec')
train_dataset = tf.data.TFRecordDataset(train_filenames, num_parallel_reads=autotune) \
.map(lambda example: parse_tfrecord(example,
num_mel=num_mel,
num_frames=num_frames,
snr=args.snr,
labels=args.labels),
num_parallel_calls=autotune) \
.map(lambda example: resize(example, num_frames=num_frames,
num_mel=num_mel,
num_labels=args.num_labels,
labels=args.labels,
snr=args.snr),
num_parallel_calls=autotune) \
.shuffle(10 * batch_size) \
.batch(batch_size) \
.prefetch(autotune) \
.cache()
return train_dataset
def get_model(args, num_replicas):
"""
Construct tensorflow model from checkpoint in args.path_model_tf
and data loader from args.data_dir
"""
model = globals()[args.model_name](nclass=args.num_labels)
if args.path_model_tf is not None:
model.load_weights(tf.train.latest_checkpoint(args.path_model_tf)).expect_partial()
cluster_algorithm = globals()[args.clustering_name](args.num_clusters, args.embed_dim)
clus = ClusterFeatureMap(cluster_algorithm, model, batch_size=args.batch_size * num_replicas)
clus.compile()
print('Compiling model done')
return clus
def train(args):
"""
Iterate over the batch in the dataset and learn the cluster centers
using args.clustering_name and args.model_name feature map.
:param args:
:return:
"""
if run._run_id.startswith("OfflineRun"):
run.number = 0
strategy = tf.distribute.MirroredStrategy()
save_dir = args.save_dir
save_dir = f'{save_dir}/{args.experiment_name}_{run.number}'
os.makedirs(save_dir, exist_ok=True)
with strategy.scope():
model = get_model(args, strategy.num_replicas_in_sync)
train_loader = get_data_from_tfrecords(args, strategy.num_replicas_in_sync)
model.fit(train_loader,
epochs=args.num_epochs,
callbacks=[SaveCluster(save_dir), UpdateCluster()])
| 0
| 0
| 0
| 3,415
| 0
| 0
| 0
| 8
| 157
|
500a827b7b2d2907dc4668b184fb3c0816a2b1e1
| 4,987
|
py
|
Python
|
curriculumBuilder/testDetails.py
|
code-dot-org/curriculumbuilder
|
e40330006145b8528f777a8aec2abff5b309d1c7
|
[
"Apache-2.0"
] | 3
|
2019-10-22T20:21:15.000Z
|
2022-01-12T19:38:48.000Z
|
curriculumBuilder/testDetails.py
|
code-dot-org/curriculumbuilder
|
e40330006145b8528f777a8aec2abff5b309d1c7
|
[
"Apache-2.0"
] | 67
|
2019-09-27T17:04:52.000Z
|
2022-03-21T22:16:23.000Z
|
curriculumBuilder/testDetails.py
|
code-dot-org/curriculumbuilder
|
e40330006145b8528f777a8aec2abff5b309d1c7
|
[
"Apache-2.0"
] | 1
|
2019-10-18T16:06:31.000Z
|
2019-10-18T16:06:31.000Z
|
# pylint: disable=missing-docstring,invalid-name,line-too-long
| 41.214876
| 166
| 0.600361
|
# pylint: disable=missing-docstring,invalid-name,line-too-long
from django.test import TestCase
import markdown
class TestDetails(TestCase):
""" Test details extension. """
def setUp(self):
self.markdown = markdown.Markdown(extensions=['curriculumBuilder.details:DetailsExtension'])
def test_details_can_render(self):
source = '::: details [summary-content]\n' + \
'contents, which are sometimes further block elements\n' + \
':::'
expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_can_span_multiple_blocks(self):
source = '::: details [summary-content]\n' + \
'\n' + \
'contents, which are sometimes further block elements\n' + \
'\n' + \
':::'
expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_can_have_a_variable_number_of_opening_colons(self):
source = ':::::::: details [summary-content]\n' + \
'contents, which are sometimes further block elements\n' + \
':::::::::::::'
expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_can_render_markdown_syntax_in_the_summary(self):
source = '::: details [**summary** _content_]\n' + \
'contents, which are sometimes further block elements\n' + \
':::'
expected = '<details><summary><p><strong>summary</strong> <em>content</em></p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_can_render_markdown_syntax_in_the_body(self):
source = '::: details [summary-content]\n' + \
'\n' + \
'# Contents\n' + \
'- can\n' + \
'- be\n' + \
'- markdown\n' + \
'\n' + \
':::'
expected = '<details><summary><p>summary-content</p></summary><h1>Contents</h1><ul>' + \
'<li>can</li>' + \
'<li>be</li>' + \
'<li>markdown</li>' + \
'</ul></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_ignores_trailing_colons(self):
# Look how pretty this can be!
source = '::::::::::::: details [summary-content] :::::::::::::\n' + \
'contents, which are sometimes further block elements\n' + \
':::::::::::::::::::::::::::::::::::::::::::::::::::::'
expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_ignores_excess_whitespace(self):
source = '::: details [summary-content] \n' + \
'\n' + \
'contents, which are sometimes further block elements\n' + \
'\n' + \
':::'
expected = '<details><summary><p>summary-content</p></summary><p>contents, which are sometimes further block elements</p></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_can_nest(self):
source = ':::: details [outer]\n' + \
'::: details [inner]\n' + \
'innermost content\n' + \
':::\n' + \
'::::'
expected = '<details><summary><p>outer</p></summary><details><summary><p>inner</p></summary><p>innermost content</p></details></details>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_requires_a_summary_block(self):
source = '::: details\n' + \
'contents, which are sometimes further block elements\n' + \
':::'
expected = '<p>::: details\n' + \
'contents, which are sometimes further block elements\n' + \
':::</p>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
def test_details_requires_at_least_three_opening_colons(self):
source = ':: details [summary-content]\n' + \
'contents, which are sometimes further block elements\n' + \
':::'
expected = '<p>:: details [summary-content]\n' + \
'contents, which are sometimes further block elements\n' + \
':::</p>'
rendered = self.markdown.convert(source)
self.assertEqual(rendered, expected)
| 0
| 0
| 0
| 4,851
| 0
| 0
| 0
| 5
| 68
|
88eef38159455835c84e3f2649e3a65d58cd13ef
| 5,079
|
py
|
Python
|
gtunrealdevice/config.py
|
Geeks-Trident-LLC/gtunrealdevice
|
a691f16ed031c342002472fd7c12c96c0e94be45
|
[
"BSD-3-Clause"
] | null | null | null |
gtunrealdevice/config.py
|
Geeks-Trident-LLC/gtunrealdevice
|
a691f16ed031c342002472fd7c12c96c0e94be45
|
[
"BSD-3-Clause"
] | null | null | null |
gtunrealdevice/config.py
|
Geeks-Trident-LLC/gtunrealdevice
|
a691f16ed031c342002472fd7c12c96c0e94be45
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module containing the attributes for gtunrealdevice."""
__version__ = '0.2.8'
version = __version__
__edition__ = 'Community'
edition = __edition__
__all__ = [
'Data',
'version',
'edition'
]
| 35.027586
| 86
| 0.607403
|
"""Module containing the attributes for gtunrealdevice."""
import yaml
from os import path
from textwrap import dedent
from gtunrealdevice.utils import File
__version__ = '0.2.8'
version = __version__
__edition__ = 'Community'
edition = __edition__
__all__ = [
'Data',
'version',
'edition'
]
class Data:
message = ''
# app yaml files
app_directory = File.get_path('.geekstrident', 'gtunrealdevice', is_home=True)
devices_info_filename = File.get_path(app_directory, 'devices_info.yaml')
serialized_filename = File.get_path(app_directory, 'serialized_data.yaml')
# app sample data
sample_devices_info_text = dedent("""
####################################################################
# sample devices info #
# Note: name, login, and configs nodes are optional #
####################################################################
host_address_1:
name: host_name (optional)
description: (optional)
login: |-
output_of_login (optional)
cmdlines:
cmdline_1: |-
line 1 output_of_cmdline_1
...
line n output_of_cmdline_1
cmdline_k_for_multiple_output:
- |-
line 1 - output_of_cmdline_k
...
line n - output_of_cmdline_k
- |-
line 1 - other_output_of_cmdline_k
...
line n - other_output_of_cmdline_k
configs:
cfg_1_reference: |-
line 1 of cfg_1
...
line n of cfg_1
""").strip()
# main app
main_app_text = 'gtunrealdevice v{}'.format(version)
# company
company = 'Geeks Trident LLC'
company_url = 'https://www.geekstrident.com/'
# URL
repo_url = 'https://github.com/Geeks-Trident-LLC/gtunrealdevice'
# TODO: Need to update wiki page for documentation_url instead of README.md.
documentation_url = path.join(repo_url, 'blob/develop/README.md')
license_url = path.join(repo_url, 'blob/develop/LICENSE')
# License
years = '2022-2040'
license_name = 'BSD 3-Clause License'
copyright_text = 'Copyright @ {}'.format(years)
license = dedent(
"""
BSD 3-Clause License
Copyright (c) {}, {}
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""".format(years, company)
).strip()
@classmethod
def get_app_info(cls):
from platform import uname as u, python_version as v
lst = [cls.main_app_text,
'Project : {}'.format(cls.repo_url),
'License : {}'.format(cls.license_name),
'Platform: {0.system} {0.release} - Python {1}'.format(u(), v()),
]
app_info = '\n'.join(lst)
return app_info
@classmethod
def is_devices_info_file_exist(cls):
return File.is_exist(cls.devices_info_filename)
@classmethod
def create_devices_info_file(cls):
is_created = File.create(cls.devices_info_filename)
cls.message = File.message
return is_created
@classmethod
def get_dependency(cls):
dependencies = dict(
pyyaml=dict(
package='pyyaml v{}'.format(yaml.__version__),
url='https://pypi.org/project/PyYAML/'
),
)
return dependencies
| 0
| 862
| 0
| 3,884
| 0
| 0
| 0
| 10
| 114
|
260c878ed68bb23823d5096c9467185ce233fe09
| 2,117
|
py
|
Python
|
semester5/num-methods/lab2/lab2.py
|
gardenappl/uni
|
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
|
[
"WTFPL"
] | null | null | null |
semester5/num-methods/lab2/lab2.py
|
gardenappl/uni
|
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
|
[
"WTFPL"
] | null | null | null |
semester5/num-methods/lab2/lab2.py
|
gardenappl/uni
|
5bc7110946caf16aae2a0c1ddae4e88bfbb25aa8
|
[
"WTFPL"
] | null | null | null |
import numpy as np
a = np.array([
(1, 3, 5, 7),
(3, 5, 7, 1),
(5, 7, 1, 3),
(7, 1, 1, 5)
], dtype=np.float64)
b = np.array([12, 0, 4, 16], dtype=np.float64)
MAX_STEPS = 100
print("Gauss (with selection of main element):", solve_gauss_m(a, b))
print("numpy.linalg.solve:", np.linalg.solve(a, b))
a = np.array([
[3, -1, 1],
[-1, 2, 0.5],
[1, 0.5, 3]
], dtype=np.float64)
b = np.array([1, 1.75, 2.5], dtype=np.float64)
print("Seidel:", solve_seidel(a, b, epsilon=0.0001))
print("numpy.linalg.solve:", np.linalg.solve(a, b))
| 23.786517
| 76
| 0.505432
|
import numpy as np
a = np.array([
(1, 3, 5, 7),
(3, 5, 7, 1),
(5, 7, 1, 3),
(7, 1, 1, 5)
], dtype=np.float64)
b = np.array([12, 0, 4, 16], dtype=np.float64)
def solve_gauss_m(a, b):
a = a.copy()
b = b.copy()
for i in range(0, a.shape[0]):
main_row_index = i + np.argmax(a[range(i, a.shape[1]), i])
main_row_el = a[main_row_index, i]
# Swap main row with ith row
if main_row_index != i:
a[[main_row_index, i], :] = a[[i, main_row_index], :]
b[[main_row_index, i]] = b[[i, main_row_index]]
# print(a)
# print(b)
a[i, :] /= main_row_el
b[i] /= main_row_el
for row_index in range(i + 1, a.shape[1]):
multiplier = a[row_index, i]
a[row_index, :] -= a[i, :] * multiplier
b[row_index] -= b[i] * multiplier
# print(a)
# print(b)
x = np.empty_like(b)
for i in range(0, a.shape[0]):
row_index = x.size - 1 - i
x[row_index] = b[row_index]
for j in range(0, i):
x[row_index] -= a[row_index, a.shape[1]-1 - j] * x[x.size-1 - j]
return x
MAX_STEPS = 100
def solve_seidel(a, b, max_steps=MAX_STEPS, epsilon=1e-04):
x = np.zeros_like(b)
for step in range(max_steps):
x_new = np.zeros_like(x)
for i in range(x.size):
for j in range(0, i):
x_new[i] -= a[i, j] / a[i, i] * x_new[j]
for j in range(i + 1, x.size):
x_new[i] -= a[i, j] / a[i, i] * x[j]
x_new[i] += b[i] / a[i, i]
print("Step", step, ":", x_new)
if np.allclose(x, x_new, atol=epsilon, rtol=0):
return x_new
x = x_new
return x_new
print("Gauss (with selection of main element):", solve_gauss_m(a, b))
print("numpy.linalg.solve:", np.linalg.solve(a, b))
a = np.array([
[3, -1, 1],
[-1, 2, 0.5],
[1, 0.5, 3]
], dtype=np.float64)
b = np.array([1, 1.75, 2.5], dtype=np.float64)
print("Seidel:", solve_seidel(a, b, epsilon=0.0001))
print("numpy.linalg.solve:", np.linalg.solve(a, b))
| 0
| 0
| 0
| 0
| 0
| 1,511
| 0
| 0
| 46
|
429b84c47c7bb1d0236a511a637bb935635a7405
| 8,460
|
py
|
Python
|
ctseg/config.py
|
sandialabs/mcdn-3d-seg
|
6b6a234719d37a11f3f997568c32ca04d62a4b18
|
[
"MIT"
] | 4
|
2021-01-21T21:28:12.000Z
|
2021-09-27T19:39:34.000Z
|
ctseg/config.py
|
sandialabs/mcdn-3d-seg
|
6b6a234719d37a11f3f997568c32ca04d62a4b18
|
[
"MIT"
] | null | null | null |
ctseg/config.py
|
sandialabs/mcdn-3d-seg
|
6b6a234719d37a11f3f997568c32ca04d62a4b18
|
[
"MIT"
] | 2
|
2021-09-01T11:44:49.000Z
|
2022-01-04T02:01:28.000Z
|
"""
"""
import os
from sacred.observers import FileStorageObserver
from sacred import Experiment
from ctseg.ctutil.utils import read_json
def initialize_experiment():
"""Initialize the Sacred Experiment
This method reads a JSON config from mcdn-3d-seg/sacred_config.json with the
following entries:
experiment_name: the name of the sacred experiment
file_observer_base_dir: the directory where run logs are saved to. If relative,
it is assumed relative to mcdn-3d-seg/
"""
# parse the sacred config
repo_dir = os.path.dirname(os.path.dirname(__file__))
sacred_config = read_json(os.path.join(repo_dir, "sacred_config.json"))
# initialize the experiment
ex = Experiment(sacred_config["experiment_name"])
# create a file-based observer to log runs
file_observer_base_dir = os.path.expanduser(sacred_config["file_observer_base_dir"])
if not file_observer_base_dir.startswith("/"):
file_observer_base_dir = os.path.join(repo_dir, file_observer_base_dir)
ex.observers.append(FileStorageObserver.create(file_observer_base_dir))
return ex
ex = initialize_experiment()
DEFAULT_CONFIG = {
"num_gpus": 1,
# the number of output segmentation classes
"num_classes": 4,
# the method used to normalize the data
# options include: ZeroMeanUnitVar, NormLog, MagicNormLog
"normalization": "",
# continuously checks for new inference files and deletes completed files
"production_mode": False,
"check_alignment": -1,
# model architecture
"model_config": {
# specifies the architecture of a new model
"architecture_config": {
# the size of model's input window when sampling volumes (x, y, z)
"input_shape": [240, 240, 240],
"kernel_initializer": "lecun_normal",
"activation": "relu",
"dropout_rate": 0.1,
},
# specifies loading a pre-trained model
"load_config": {
# whether or not to drop the last layer when loading a model
"drop_last_layer": False,
# "best", "latest" or "/PATH/TO/MODEL/CHECKPOINT" to resume training from.
# Leave empty to not resume
"resume_from": "",
# path to a weights file to load the model from. takes precedent over
# `resume_from` if set
"load_weights_from": "",
},
},
# data preprocessing
"data_config": {
# mirrors input chunks in the corresponding dimension
"flip_x": False,
"flip_y": False,
"flip_z": False,
# Flip Validation Axis: None or int or tuple of ints, optional
# Axis or axes along which to flip over. The default,
# axis=None, will flip over all of the axes of the input array.
# If axis is negative it counts from the last to the first axis.
# If axis is a tuple of ints, flipping is performed on all of the axes
# specified in the tuple.
"flip_validation_axis": None,
"sampler_config": {
# the chunk sampling class for during training. one of "OverlapSampler",
# "RandomSampler", "BattleShipSampler"
"sampler_class": "RandomSampler",
# Number of random samples taken from the training data dir when performing
# training. Not used in "overlap" mode.
"n_samples_per_epoch": 3,
# Number of chunks taken from each sample when performing training. Not
# used in "overlap" mode.
"n_chunks_per_sample": 100,
# the amount the input window is translated in the x, y, and z dimensions.
# Used during inference but also during training if sampler_class is
# "OverlapSampler"
"overlap_stride": 240,
}
},
# configuration specific to training
"train_config": {
"inputs": {
# dir containing training the `.npy` data files
"data_dir": "/PATH/TO/TRAIN/DATA",
# dir containing the `.npy` training labels. files are matched by name to
# data, so this dir can have targets for both training and testing
"targets_dir": "/PATH/TO/TRAIN/TARGETS"
},
"outputs": {
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA",
"csv_log_dir": "/PATH/TO/CSV/LOGS",
"tensorboard_log_dir": "/PATH/TO/TENSORBOARD/LOGS",
"models_dir": "/PATH/TO/SAVED/MODELS",
# where normalizer metadata is saved to
"preprocessor_dir": "/PATH/TO/SAVED/PREPROCESSORS",
},
"compilation": {
# name of the optimizer to use
"optimizer": "Adadelta",
# the name of the loss function. Valid names include all Keras defaults
# as well as fully-qualified function names
"loss": "ctseg.ctutil.losses.weighted_categorical_crossentropy",
# kwargs passed to the loss function. replace this kwargs dict with `false`
# to not use
"loss_kwargs": {
"beta": 0.9,
},
# the names of the metrics to track. Valid names include all Keras defaults
# as well as fully-qualified function names
"metrics": [
"accuracy",
"ctseg.ctutil.metrics.per_class_accuracy",
"ctseg.ctutil.metrics.jaccard_index",
],
# indicates whether or not to recompile with the above specified optimizer,
# loss and metrics if a compiled model is loaded.
# Warning: doing this may slow training as it will discard the current state
# of the optimizer
"recompile": False
},
# the max number of epochs to train for
"epochs": 1000,
# Epoch at which to start training
# (useful for resuming a previous training run).
"initial_epoch": 0,
# the training batch size
"batch_size": 1,
},
# configuration specific to testing
"test_config": {
"inputs": {
# dir containing the `.npy` test data files
"data_dir": "/PATH/TO/TEST/DATA",
# dir containing the `.npy` test labels. files are matched by name to data,
# so this dir can have targets for both training and testing
"targets_dir": "/PATH/TO/TEST/TARGETS"
},
"outputs": {
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA"
}
},
# configuration specific to inference
"inference_config": {
"inputs": {
# where the `.npy` files to be processed live
"unprocessed_queue_dir": "/PATH/TO/UNPROCESSED/DATA",
},
"outputs": {
# where files from `unprocessed_queue_dir` are moved to once processed
"processed_data_dir": "/PATH/TO/PROCESSED/DATA",
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA",
# where predictions are written to
"predictions_dir": "/PATH/TO/INFERENCE/PREDICTIONS"
},
# the number of iterations of inference performed per chunk, the results of which
# are averaged and standard deviations are calculated
"inference_iters": 5,
},
# configuration specific to plotting
"plot_config": {
"inputs": {
# dir containing the `.npy` data files
"data_dir": "/PATH/TO/DATA",
# dir containing the `.npy` labels, if available. Leave empty if not. Files
# are matched by name to data
"targets_dir": "/PATH/TO/TARGETS",
# dir containing the `.npy` predictions
"predictions_dir": "/PATH/TO/PREDICTIONS"
},
"outputs": {
"plots_dir": "/PATH/TO/OUTPUT/PLOTS"
}
},
}
ex.add_config(DEFAULT_CONFIG)
| 36.943231
| 89
| 0.610757
|
"""
"""
import os
from sacred.observers import FileStorageObserver
from sacred import Experiment
from ctseg.ctutil.utils import read_json
def initialize_experiment():
"""Initialize the Sacred Experiment
This method reads a JSON config from mcdn-3d-seg/sacred_config.json with the
following entries:
experiment_name: the name of the sacred experiment
file_observer_base_dir: the directory where run logs are saved to. If relative,
it is assumed relative to mcdn-3d-seg/
"""
# parse the sacred config
repo_dir = os.path.dirname(os.path.dirname(__file__))
sacred_config = read_json(os.path.join(repo_dir, "sacred_config.json"))
# initialize the experiment
ex = Experiment(sacred_config["experiment_name"])
# create a file-based observer to log runs
file_observer_base_dir = os.path.expanduser(sacred_config["file_observer_base_dir"])
if not file_observer_base_dir.startswith("/"):
file_observer_base_dir = os.path.join(repo_dir, file_observer_base_dir)
ex.observers.append(FileStorageObserver.create(file_observer_base_dir))
return ex
ex = initialize_experiment()
DEFAULT_CONFIG = {
"num_gpus": 1,
# the number of output segmentation classes
"num_classes": 4,
# the method used to normalize the data
# options include: ZeroMeanUnitVar, NormLog, MagicNormLog
"normalization": "",
# continuously checks for new inference files and deletes completed files
"production_mode": False,
"check_alignment": -1,
# model architecture
"model_config": {
# specifies the architecture of a new model
"architecture_config": {
# the size of model's input window when sampling volumes (x, y, z)
"input_shape": [240, 240, 240],
"kernel_initializer": "lecun_normal",
"activation": "relu",
"dropout_rate": 0.1,
},
# specifies loading a pre-trained model
"load_config": {
# whether or not to drop the last layer when loading a model
"drop_last_layer": False,
# "best", "latest" or "/PATH/TO/MODEL/CHECKPOINT" to resume training from.
# Leave empty to not resume
"resume_from": "",
# path to a weights file to load the model from. takes precedent over
# `resume_from` if set
"load_weights_from": "",
},
},
# data preprocessing
"data_config": {
# mirrors input chunks in the corresponding dimension
"flip_x": False,
"flip_y": False,
"flip_z": False,
# Flip Validation Axis: None or int or tuple of ints, optional
# Axis or axes along which to flip over. The default,
# axis=None, will flip over all of the axes of the input array.
# If axis is negative it counts from the last to the first axis.
# If axis is a tuple of ints, flipping is performed on all of the axes
# specified in the tuple.
"flip_validation_axis": None,
"sampler_config": {
# the chunk sampling class for during training. one of "OverlapSampler",
# "RandomSampler", "BattleShipSampler"
"sampler_class": "RandomSampler",
# Number of random samples taken from the training data dir when performing
# training. Not used in "overlap" mode.
"n_samples_per_epoch": 3,
# Number of chunks taken from each sample when performing training. Not
# used in "overlap" mode.
"n_chunks_per_sample": 100,
# the amount the input window is translated in the x, y, and z dimensions.
# Used during inference but also during training if sampler_class is
# "OverlapSampler"
"overlap_stride": 240,
}
},
# configuration specific to training
"train_config": {
"inputs": {
# dir containing training the `.npy` data files
"data_dir": "/PATH/TO/TRAIN/DATA",
# dir containing the `.npy` training labels. files are matched by name to
# data, so this dir can have targets for both training and testing
"targets_dir": "/PATH/TO/TRAIN/TARGETS"
},
"outputs": {
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA",
"csv_log_dir": "/PATH/TO/CSV/LOGS",
"tensorboard_log_dir": "/PATH/TO/TENSORBOARD/LOGS",
"models_dir": "/PATH/TO/SAVED/MODELS",
# where normalizer metadata is saved to
"preprocessor_dir": "/PATH/TO/SAVED/PREPROCESSORS",
},
"compilation": {
# name of the optimizer to use
"optimizer": "Adadelta",
# the name of the loss function. Valid names include all Keras defaults
# as well as fully-qualified function names
"loss": "ctseg.ctutil.losses.weighted_categorical_crossentropy",
# kwargs passed to the loss function. replace this kwargs dict with `false`
# to not use
"loss_kwargs": {
"beta": 0.9,
},
# the names of the metrics to track. Valid names include all Keras defaults
# as well as fully-qualified function names
"metrics": [
"accuracy",
"ctseg.ctutil.metrics.per_class_accuracy",
"ctseg.ctutil.metrics.jaccard_index",
],
# indicates whether or not to recompile with the above specified optimizer,
# loss and metrics if a compiled model is loaded.
# Warning: doing this may slow training as it will discard the current state
# of the optimizer
"recompile": False
},
# the max number of epochs to train for
"epochs": 1000,
# Epoch at which to start training
# (useful for resuming a previous training run).
"initial_epoch": 0,
# the training batch size
"batch_size": 1,
},
# configuration specific to testing
"test_config": {
"inputs": {
# dir containing the `.npy` test data files
"data_dir": "/PATH/TO/TEST/DATA",
# dir containing the `.npy` test labels. files are matched by name to data,
# so this dir can have targets for both training and testing
"targets_dir": "/PATH/TO/TEST/TARGETS"
},
"outputs": {
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA"
}
},
# configuration specific to inference
"inference_config": {
"inputs": {
# where the `.npy` files to be processed live
"unprocessed_queue_dir": "/PATH/TO/UNPROCESSED/DATA",
},
"outputs": {
# where files from `unprocessed_queue_dir` are moved to once processed
"processed_data_dir": "/PATH/TO/PROCESSED/DATA",
# where cached normalized data is saved to
"normalized_data_dir": "/PATH/TO/NORMALIZED/DATA",
# where predictions are written to
"predictions_dir": "/PATH/TO/INFERENCE/PREDICTIONS"
},
# the number of iterations of inference performed per chunk, the results of which
# are averaged and standard deviations are calculated
"inference_iters": 5,
},
# configuration specific to plotting
"plot_config": {
"inputs": {
# dir containing the `.npy` data files
"data_dir": "/PATH/TO/DATA",
# dir containing the `.npy` labels, if available. Leave empty if not. Files
# are matched by name to data
"targets_dir": "/PATH/TO/TARGETS",
# dir containing the `.npy` predictions
"predictions_dir": "/PATH/TO/PREDICTIONS"
},
"outputs": {
"plots_dir": "/PATH/TO/OUTPUT/PLOTS"
}
},
}
ex.add_config(DEFAULT_CONFIG)
@ex.named_config
def use_8_gpus():
num_gpus=8
batch_size=8
@ex.named_config
def use_2_gpus():
num_gpus=2
batch_size=2
@ex.named_config
def small_chunks():
name="sm33_small_chunk"
x_max=192
y_max=192
z_max=192
overlap_stride=192
@ex.named_config
def small_testing():
num_chunks_per_training_img=20
num_training_imgs_per_epoch=1
| 0
| 283
| 0
| 0
| 0
| 0
| 0
| 0
| 92
|
f0b634f5ff4f75ca1711e6e717af315fda15fb61
| 1,290
|
py
|
Python
|
src/sage/features/pdf2svg.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | null | null | null |
src/sage/features/pdf2svg.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | null | null | null |
src/sage/features/pdf2svg.py
|
kliem/sage-test-27122
|
cc60cfebc4576fed8b01f0fc487271bdee3cefed
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:29:56.000Z
|
2020-07-23T10:29:56.000Z
|
# -*- coding: utf-8 -*-
r"""
Check for pdf2svg
"""
# ****************************************************************************
# Copyright (C) 2021 Sebastien Labbe <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
| 33.947368
| 84
| 0.531008
|
# -*- coding: utf-8 -*-
r"""
Check for pdf2svg
"""
# ****************************************************************************
# Copyright (C) 2021 Sebastien Labbe <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from . import Executable
class pdf2svg(Executable):
r"""
A :class:`sage.features.Feature` describing the presence of ``pdf2svg``
EXAMPLES::
sage: from sage.features.pdf2svg import pdf2svg
sage: pdf2svg().is_present() # optional: pdf2svg
FeatureTestResult('pdf2svg', True)
"""
def __init__(self):
r"""
TESTS::
sage: from sage.features.pdf2svg import pdf2svg
sage: isinstance(pdf2svg(), pdf2svg)
True
"""
Executable.__init__(self, "pdf2svg", executable="pdf2svg",
spkg='pdf2svg',
url="http://www.cityinthesky.co.uk/opensource/pdf2svg/")
| 0
| 0
| 0
| 671
| 0
| 0
| 0
| 3
| 46
|
81823ab9baefc07843a37683b2ffb1db1f4fa33b
| 1,670
|
py
|
Python
|
test.py
|
IrekPrzybylo/DogBreedDNN
|
7c429694c648351cd23544b9b1321665c1866c7c
|
[
"MIT"
] | null | null | null |
test.py
|
IrekPrzybylo/DogBreedDNN
|
7c429694c648351cd23544b9b1321665c1866c7c
|
[
"MIT"
] | 12
|
2021-04-08T16:08:27.000Z
|
2021-06-23T15:10:41.000Z
|
test.py
|
IrekPrzybylo/DogBreedDNN
|
7c429694c648351cd23544b9b1321665c1866c7c
|
[
"MIT"
] | 2
|
2021-04-08T14:55:04.000Z
|
2021-04-22T10:59:28.000Z
|
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
def read_image(path, size):
"""
Load image from local storage
:param path: image path
:param size: image size
:return: loaded image
"""
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (size, size))
image = image / 255.0
image = image.astype(np.float32)
return image
def recognize_image(img_path):
"""
Recognize image
Taking image and predicting breed basing on trained model
:param img_path: Image Path
:return: top 4 matching breeds, most similar breed
"""
path = "input/"
train_path = os.path.join(path, "train/*")
test_path = os.path.join(path, "test/*")
labels_path = os.path.join(path, "labels.csv")
labels_df = pd.read_csv(labels_path)
breed = labels_df["breed"].unique()
id2breed = {i: name for i, name in enumerate(breed)}
## Model
model = tf.keras.models.load_model("model.h5")
image = read_image(img_path, 224)
image = np.expand_dims(image, axis=0)
pred = model.predict(image)[0]
label_idx = np.argmax(pred)
top3 = np.argsort(pred)[-4:][::-1]
possible_breed = list()
print(str(id2breed[top3[0]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[0]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[1]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[2]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[3]]).replace("_", " "))
return str(id2breed[label_idx]).replace("_", " "), possible_breed
if __name__ == "__main__":
print(recognize_image())
| 28.793103
| 69
| 0.649102
|
import os
import cv2
import numpy as np
import pandas as pd
import tensorflow as tf
def read_image(path, size):
"""
Load image from local storage
:param path: image path
:param size: image size
:return: loaded image
"""
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = cv2.resize(image, (size, size))
image = image / 255.0
image = image.astype(np.float32)
return image
def recognize_image(img_path):
"""
Recognize image
Taking image and predicting breed basing on trained model
:param img_path: Image Path
:return: top 4 matching breeds, most similar breed
"""
path = "input/"
train_path = os.path.join(path, "train/*")
test_path = os.path.join(path, "test/*")
labels_path = os.path.join(path, "labels.csv")
labels_df = pd.read_csv(labels_path)
breed = labels_df["breed"].unique()
id2breed = {i: name for i, name in enumerate(breed)}
## Model
model = tf.keras.models.load_model("model.h5")
image = read_image(img_path, 224)
image = np.expand_dims(image, axis=0)
pred = model.predict(image)[0]
label_idx = np.argmax(pred)
top3 = np.argsort(pred)[-4:][::-1]
possible_breed = list()
print(str(id2breed[top3[0]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[0]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[1]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[2]]).replace("_", " "))
possible_breed.append(str(id2breed[top3[3]]).replace("_", " "))
return str(id2breed[label_idx]).replace("_", " "), possible_breed
if __name__ == "__main__":
print(recognize_image())
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0302eb3a4c90c0f51ea5fb46f2c400e93479d849
| 236
|
py
|
Python
|
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | null | null | null |
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 57
|
2019-03-02T10:59:05.000Z
|
2021-12-09T18:57:34.000Z
|
bitbots_misc/bitbots_bringup/scripts/launch_warning.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 1
|
2019-07-28T11:26:47.000Z
|
2019-07-28T11:26:47.000Z
|
#!/usr/bin/env python3
import rospy
rospy.logerr("###\n###\n###\n###\n###\nYou didn't specifiy which robot you want to start!\nPlease add minibot:=true or wolfgang:=true or davros:=true behind you roslaunch.\n###\n###\n###\n###\n###")
| 47.2
| 198
| 0.648305
|
#!/usr/bin/env python3
import rospy
rospy.logerr("###\n###\n###\n###\n###\nYou didn't specifiy which robot you want to start!\nPlease add minibot:=true or wolfgang:=true or davros:=true behind you roslaunch.\n###\n###\n###\n###\n###")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
395e3450ce5675d25cb67b1cbba1201ead7d4bd1
| 612
|
py
|
Python
|
cms_redirects/admin.py
|
mllrsohn/django-cms-redirects
|
3398528e44594adb708aa090d5b7867f619db10e
|
[
"BSD-3-Clause"
] | 8
|
2015-02-10T20:30:26.000Z
|
2020-05-31T20:20:51.000Z
|
cms_redirects/admin.py
|
mllrsohn/django-cms-redirects
|
3398528e44594adb708aa090d5b7867f619db10e
|
[
"BSD-3-Clause"
] | 5
|
2017-04-10T07:41:45.000Z
|
2021-12-20T08:49:35.000Z
|
cms_redirects/admin.py
|
mllrsohn/django-cms-redirects
|
3398528e44594adb708aa090d5b7867f619db10e
|
[
"BSD-3-Clause"
] | 8
|
2015-04-16T21:25:55.000Z
|
2018-09-27T11:15:12.000Z
|
from django.contrib import admin
from cms_redirects.models import CMSRedirect
admin.site.register(CMSRedirect, CMSRedirectAdmin)
| 32.210526
| 97
| 0.625817
|
from django.contrib import admin
from cms_redirects.models import CMSRedirect
class CMSRedirectAdmin(admin.ModelAdmin):
list_display = ('old_path', 'new_path', 'page', 'page_site', 'site', 'actual_response_code',)
list_filter = ('site',)
search_fields = ('old_path', 'new_path', 'page__title_set__title')
radio_fields = {'site': admin.VERTICAL}
fieldsets = [
('Source', {
"fields": ('site','old_path',)
}),
('Destination', {
"fields": ('new_path','page', 'response_code',)
}),
]
admin.site.register(CMSRedirect, CMSRedirectAdmin)
| 0
| 0
| 0
| 459
| 0
| 0
| 0
| 0
| 23
|
06aa52314a9c965d93128b5579494aaf803987c3
| 646
|
py
|
Python
|
cybox/test/objects/win_network_route_entry_test.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/test/objects/win_network_route_entry_test.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | null | null | null |
cybox/test/objects/win_network_route_entry_test.py
|
siemens/python-cybox
|
b692a98c8a62bd696e2a0dda802ada7359853482
|
[
"BSD-3-Clause"
] | 1
|
2019-04-16T18:37:32.000Z
|
2019-04-16T18:37:32.000Z
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
if __name__ == "__main__":
unittest.main()
| 26.916667
| 77
| 0.752322
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_network_route_entry_object import WinNetworkRouteEntry
from cybox.test import EntityTestCase, round_trip
from cybox.test.objects import ObjectTestCase
class TestWinNetworkRouteEntry(ObjectTestCase, unittest.TestCase):
object_type = "WindowsNetworkRouteEntryObjectType"
klass = WinNetworkRouteEntry
_full_dict = {
'nl_route_protocol': u"A protocol",
'nl_route_origin': u"An origin",
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
| 0
| 0
| 0
| 277
| 0
| 0
| 0
| 108
| 91
|
b8dab34e2ddba5fa52d18ceed2c8f8efbaf24b94
| 3,710
|
py
|
Python
|
external/evolver_gff_featurestats2.py
|
dentearl/evolverSimControl
|
b3236debbc8d945a99aecb0988bd1f48f25913c3
|
[
"MIT"
] | 4
|
2018-12-01T13:49:12.000Z
|
2021-02-18T17:55:46.000Z
|
external/evolver_gff_featurestats2.py
|
dentearl/evolverSimControl
|
b3236debbc8d945a99aecb0988bd1f48f25913c3
|
[
"MIT"
] | null | null | null |
external/evolver_gff_featurestats2.py
|
dentearl/evolverSimControl
|
b3236debbc8d945a99aecb0988bd1f48f25913c3
|
[
"MIT"
] | 1
|
2021-04-10T15:05:11.000Z
|
2021-04-10T15:05:11.000Z
|
#!/usr/bin/env python
# Copyright (C) 2008-2011 by
# George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##############################
import sys
import evolverSimControl.lib.evolver_gff as gff
FileName1 = sys.argv[1]
FileName2 = sys.argv[2]
Name1 = FileName1
Name2 = FileName2
GenomeLength1 = -1
GenomeLength2 = -1
if len(sys.argv) > 3:
Name1 = sys.argv[3]
if len(sys.argv) > 4:
Name2 = sys.argv[4]
if len(sys.argv) > 6:
GenomeLength1 = int(sys.argv[5])
GenomeLength2 = int(sys.argv[6])
ConstrainedFeatures = [ "CDS", "UTR", "NXE", "NGE" ]
Counts1, Bases1 = GetCounts(FileName1)
Counts2, Bases2 = GetCounts(FileName2)
Features = [ "CDS", "UTR", "NXE", "NGE", "island", "tandem", "Constrained" ]
Keys = Counts1.keys()
Keys.extend(Counts2.keys())
for Feature in Keys:
if Feature not in Features:
Features.append(Feature)
if GenomeLength1 != -1:
Features.append("Neutral")
Features.append("Total")
Counts1["Neutral"] = 0
Counts2["Neutral"] = 0
Counts1["Total"] = 0
Counts2["Total"] = 0
Bases1["Neutral"] = GenomeLength1 - Bases1["Constrained"]
Bases2["Neutral"] = GenomeLength2 - Bases2["Constrained"]
Bases1["Total"] = GenomeLength1
Bases2["Total"] = GenomeLength2
print " Feature 1=%8.8s 2=%8.8s Nr2-1 2-1 Pct Bases1 Bases2 Bases2-1 2-1 Pct" % (Name1, Name2)
print "================ ========== ========== ========== ======== ========== ========== ========== ========"
for Feature in Features:
n1 = Get(Counts1, Feature)
n2 = Get(Counts2, Feature)
dn = n2 - n1
b1 = Get(Bases1, Feature)
b2 = Get(Bases2, Feature)
db = b2 - b1
pn = PctChg(n1, n2)
pb = PctChg(b1, b2)
s = ""
s += "%16.16s" % Feature
s += " %10u" % n1
s += " %10u" % n2
s += " %+10d" % (n2 - n1)
s += " %7.7s%%" % pn
s += " %10u" % b1
s += " %10u" % b2
s += " %+10d" % (b2-b1)
s += " %7.7s%%" % pb
print s
| 28.106061
| 127
| 0.646092
|
#!/usr/bin/env python
# Copyright (C) 2008-2011 by
# George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##############################
import sys
import evolverSimControl.lib.evolver_gff as gff
FileName1 = sys.argv[1]
FileName2 = sys.argv[2]
Name1 = FileName1
Name2 = FileName2
GenomeLength1 = -1
GenomeLength2 = -1
if len(sys.argv) > 3:
Name1 = sys.argv[3]
if len(sys.argv) > 4:
Name2 = sys.argv[4]
if len(sys.argv) > 6:
GenomeLength1 = int(sys.argv[5])
GenomeLength2 = int(sys.argv[6])
ConstrainedFeatures = [ "CDS", "UTR", "NXE", "NGE" ]
def Die(s):
print >> sys.stderr, sys.argv[0], "***ERROR***", s
sys.exit(1)
def DoRec():
global Counts, Bases
Length = gff.End - gff.Start + 1
Feature = gff.Feature
if Feature not in Bases.keys():
Bases[Feature] = Length
Counts[Feature] = 1
else:
Bases[Feature] += Length
Counts[Feature] += 1
if Feature in ConstrainedFeatures:
Bases["Constrained"] += Length
Counts["Constrained"] += 1
def Get(L, k):
if k in L.keys():
return L[k]
return 0
def PctChg(x, y):
if x == 0:
if y == 0:
return "100"
else:
return "--"
else:
return str(100*(y-x)/x)
def GetCounts(FileName):
global Bases, Counts
Bases = {}
Counts = {}
Bases["Constrained"] = 0
Counts["Constrained"] = 0
gff.GetRecs(FileName, DoRec)
return Counts, Bases
Counts1, Bases1 = GetCounts(FileName1)
Counts2, Bases2 = GetCounts(FileName2)
Features = [ "CDS", "UTR", "NXE", "NGE", "island", "tandem", "Constrained" ]
Keys = Counts1.keys()
Keys.extend(Counts2.keys())
for Feature in Keys:
if Feature not in Features:
Features.append(Feature)
if GenomeLength1 != -1:
Features.append("Neutral")
Features.append("Total")
Counts1["Neutral"] = 0
Counts2["Neutral"] = 0
Counts1["Total"] = 0
Counts2["Total"] = 0
Bases1["Neutral"] = GenomeLength1 - Bases1["Constrained"]
Bases2["Neutral"] = GenomeLength2 - Bases2["Constrained"]
Bases1["Total"] = GenomeLength1
Bases2["Total"] = GenomeLength2
print " Feature 1=%8.8s 2=%8.8s Nr2-1 2-1 Pct Bases1 Bases2 Bases2-1 2-1 Pct" % (Name1, Name2)
print "================ ========== ========== ========== ======== ========== ========== ========== ========"
for Feature in Features:
n1 = Get(Counts1, Feature)
n2 = Get(Counts2, Feature)
dn = n2 - n1
b1 = Get(Bases1, Feature)
b2 = Get(Bases2, Feature)
db = b2 - b1
pn = PctChg(n1, n2)
pb = PctChg(b1, b2)
s = ""
s += "%16.16s" % Feature
s += " %10u" % n1
s += " %10u" % n2
s += " %+10d" % (n2 - n1)
s += " %7.7s%%" % pn
s += " %10u" % b1
s += " %10u" % b2
s += " %+10d" % (b2-b1)
s += " %7.7s%%" % pb
print s
| 0
| 0
| 0
| 0
| 0
| 649
| 0
| 0
| 115
|
62ff44c7a0f8ffe85637457045ee365bbf8e42f1
| 4,116
|
py
|
Python
|
LearningSafeSets/Model/SafeSet.py
|
alexliniger/AdversarialRoadModel
|
14157760687c22acc8b91c39128875005ada7563
|
[
"Apache-2.0"
] | 20
|
2020-07-17T06:32:32.000Z
|
2022-03-27T03:24:26.000Z
|
LearningSafeSets/Model/SafeSet.py
|
alexliniger/AdversarialRoadModel
|
14157760687c22acc8b91c39128875005ada7563
|
[
"Apache-2.0"
] | null | null | null |
LearningSafeSets/Model/SafeSet.py
|
alexliniger/AdversarialRoadModel
|
14157760687c22acc8b91c39128875005ada7563
|
[
"Apache-2.0"
] | 7
|
2020-07-19T07:16:01.000Z
|
2022-01-22T22:58:02.000Z
|
## Copyright 2020 Alexander Liniger
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
###########################################################################
###########################################################################
import torch.nn as nn
| 36.75
| 75
| 0.49757
|
## Copyright 2020 Alexander Liniger
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
###########################################################################
###########################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
class SafeSet(nn.Module):
def __init__(self,config):
super(SafeSet, self).__init__()
self.n_neurons = config['n_neurons']
self.n_batch = config['n_batch']
self.n_inputs = config['n_states']
if config['activation'] == "Softplus":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.Softplus(beta=1),
nn.Linear(self.n_neurons, self.n_neurons),
nn.Softplus(beta=1),
nn.Linear(self.n_neurons, self.n_neurons),
nn.Softplus(beta=1),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
elif config['activation'] == "Tanh":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.Tanh(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.Tanh(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.Tanh(),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
elif config['activation'] == "ReLU":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.ReLU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ReLU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ReLU(),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
elif config['activation'] == "ELU":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.Dropout(0.5),
nn.ELU(),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
elif config['activation'] == "ELU2":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
elif config['activation'] == "ELU6":
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, self.n_neurons),
nn.ELU(),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
else:
self.model = nn.Sequential(
nn.Linear(self.n_inputs, self.n_neurons),
nn.Linear(self.n_neurons, 1),
nn.Sigmoid()
)
def forward(self, input):
return self.model(input)
| 0
| 0
| 0
| 3,285
| 0
| 0
| 0
| 1
| 68
|
920fe3da4f1f82c7e5f82cc9d08809d75841703f
| 12,412
|
py
|
Python
|
arpa-rfp-evaluation/summary_reports.py
|
cityofasheville/abi-vendro-processing
|
72ed24216ee4772d72abd26b956d7f97ed23bdd3
|
[
"MIT"
] | null | null | null |
arpa-rfp-evaluation/summary_reports.py
|
cityofasheville/abi-vendro-processing
|
72ed24216ee4772d72abd26b956d7f97ed23bdd3
|
[
"MIT"
] | 1
|
2021-09-02T19:58:09.000Z
|
2021-09-02T19:58:09.000Z
|
arpa-rfp-evaluation/summary_reports.py
|
cityofasheville/data-processing-scripts
|
72ed24216ee4772d72abd26b956d7f97ed23bdd3
|
[
"MIT"
] | null | null | null |
import json
from os.path import exists
SERVICE_ACCOUNT_FILE = None
SCOPES = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive']
INPUTS_EVAL_MAPPING_ID =None
OUTPUTS_MASTER_ID = None
INPUTS_SPREADSHEET_ID = None
sheetService = None
#########################################################
###########################################################################
inputs = None
if exists('./inputs.json'):
with open('inputs.json', 'r') as file:
inputs = json.load(file)
else:
print('You must create an inputs.json file')
sys.exit()
INPUTS_EVAL_MAPPING_ID = inputs["INPUTS_EVAL_MAPPING_ID"]
OUTPUTS_MASTER_ID = inputs["OUTPUTS_MASTER_ID"]
INPUTS_SPREADSHEET_ID = inputs['INPUTS_SPREADSHEET_ID']
SERVICE_ACCOUNT_FILE = inputs['SERVICE_ACCOUNT_FILE']
print('Set up services')
setUpServices()
sheet = sheetService.spreadsheets()
print('Load weights')
links_df, weight_df = grab_weights_and_links(INPUTS_SPREADSHEET_ID)
# Calls list building function
print('Build project summary list')
all_project_scores = build_project_summary_list(links_df, weight_df, INPUTS_EVAL_MAPPING_ID)
print('Summarize all the projects')
list_to_append, maxMinList = summarize_all_project(all_project_scores, links_df)
updateSheet(list_to_append, OUTPUTS_MASTER_ID, "Summary!A2:AA1000")
updateSheet(maxMinList, OUTPUTS_MASTER_ID, "Potential Issues!A3:AA1000")
print('Finished, Party time')
| 44.328571
| 172
| 0.677409
|
from os import link
from googleapiclient.discovery import build
import json
from csv import reader
from google.oauth2 import service_account
import pandas as pd
from os.path import exists
import numpy as np
from functools import reduce
import time
SERVICE_ACCOUNT_FILE = None
SCOPES = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive']
INPUTS_EVAL_MAPPING_ID =None
OUTPUTS_MASTER_ID = None
INPUTS_SPREADSHEET_ID = None
sheetService = None
#########################################################
def setUpServices():
global sheetService
creds = service_account.Credentials.from_service_account_file( SERVICE_ACCOUNT_FILE, scopes=SCOPES )
sheetService = build('sheets', 'v4', credentials=creds)
def grab_weights_and_links(inputSpreadsheetId):
# Gets score weights from the evaluation sheet, and project links, and puts these things into 2
# dfs to merge with the main summary df later
sheet = sheetService.spreadsheets()
results = sheet.values().get(spreadsheetId=inputSpreadsheetId,range='Score Weighting!C8:D27').execute()
values = results.get('values', [])
del values[13]
del values[6]
weight_df = pd.DataFrame(values, columns=['weight_in_cat', 'global_weight'])
weight_df['weight_in_cat'] = weight_df['weight_in_cat'].astype(float)
weight_df['global_weight'] = weight_df['global_weight'].astype(float)
# Gets project links from the evaluation assignment sheet
sheet = sheetService.spreadsheets()
results = sheet.values().get(spreadsheetId=inputSpreadsheetId,range='Eligible Proposals and Assignments!A2:C').execute()
values = results.get('values', [])
links_df = pd.DataFrame(values, columns=['project_number', 'project_name', 'project_link'])
return(links_df, weight_df)
def list_tab_links(evaluationMappingSheetId):
sheet = sheetService.spreadsheets()
results = sheet.values().get(spreadsheetId=evaluationMappingSheetId,range='Tab Mapping!A1:AB').execute()
tabs = results.get('values', [])
tab_links_df = pd.DataFrame(tabs)
tab_links_df.iloc[0,0] = 'Project'
tab_links_df.columns = tab_links_df.iloc[0]
tab_links_df.drop(tab_links_df.index[0], inplace=True)
tab_links_df.reset_index(inplace=True)
return(tab_links_df)
def build_project_summary_list(links_df, weight_df, evaluationMappingSheetId):
tab_links_df = list_tab_links(evaluationMappingSheetId)
# Get spreadsheet links/ids from the spreadsheet
total_list = []
sheet = sheetService.spreadsheets()
results = sheet.values().get(spreadsheetId=evaluationMappingSheetId,range='Sheet Mapping!A2:C').execute()
link_ss_values = results.get('values', [])
for thing in link_ss_values:
id = thing[1]
print(' Sheet ' + thing[0])
sheet = sheetService.spreadsheets()
sheets = sheet.get(spreadsheetId=id, fields='sheets/properties/title').execute()
ranges = [sheet['properties']['title'] for sheet in sheets['sheets']]
format_list = []
# Goes through each tab and gets values
for tab in ranges[1:]:
print (' Tab ' + tab)
results = sheet.values().get(spreadsheetId=id,range=tab +'!A1:E24').execute()
values = results.get('values', [])
data = values[6:]
#Make a dataframe, then change the rating values to numbers
df = pd.DataFrame(data, columns = ["question_num", 'question', 'rating', 'guidance', 'scoring_category'])
df = df.replace(r'^\s*$', np.nan, regex=True)
if df['rating'].isnull().values.any():
ECI_score = "Not Complete"
PPE_score = "Not Complete"
OQ_score = "Not Complete"
total_score = "Not Complete"
else:
#df["rating"] = df[~df["rating"].isnull()].str.lower()
df["rating"] = df["rating"].str.lower()
df["rating"].replace({"none": 0, "low": 1/3, "medium": 2/3, 'high':1}, inplace=True)
#add more columns
df['total_category_weight'] = df['scoring_category']
df["total_category_weight"].replace({"Equitable Community Impact": 40, "Project Plan and Evaluation": 40, "Organizational Qualification": 20}, inplace=True)
# Adding df of scoring weights to the df I just created
df = pd.concat([df, weight_df], axis=1)
df['category_rating'] = df['rating'].astype(float) * df['weight_in_cat'].astype(float)
#Calc category value by global weight
cat_weights_global = df.groupby(['scoring_category']).category_rating.sum()
#Formatting output
#What are the 11, 8, and 9? They're the total of the "weight within category".
#That makes more sense if you take a look at the scoring sheet-
#"Evaluation Score" Score Weighting tab, column
ECI_score = (cat_weights_global['Equitable Community Impact']/11) * 40
PPE_score = (cat_weights_global['Project Plan and Evaluation']/8) * 40
OQ_score = (cat_weights_global['Organizational Qualification']/9) * 20
total_score = round(ECI_score + PPE_score + OQ_score, 2)
#Grabbing info from list to put into the right output format
project_name = values[1][1].split(": ",1)[1]
project_number = project_name.split(' ')[0]
evaluator = values[0][1].split(": ",1)[1]
evaluator=evaluator.strip()
link = thing[2]
# Using the df from the beginning of this function to look up the links
# to individual tabs on evaluator sheets. Appending that to the end of the list.
eval_link = tab_links_df[evaluator].iloc[int(project_number)-1]
format_list = [project_number, project_name, evaluator, link, total_score, ECI_score, PPE_score, OQ_score, eval_link]
total_list.append(format_list)
time.sleep(1)
time.sleep(3)
return(total_list)
def maxMinDifference(df):
#Get the links dataframe, merge the two together to be able to output links for each project
df.merge(links_df['project_number'], on='project_number', how='left')
#Calculate the difference between the min and max score for each column
maxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['total_score'].agg(['max','min']))
maxMinDF['totalScoreVaries'] = maxMinDF['max'] - maxMinDF['min']
ECIMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['ECI_score'].agg(['max','min']))
ECIMaxMinDF['ECIScoreVaries'] = maxMinDF['max'] - maxMinDF['min']
PPEMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['PPE_score'].agg(['max','min']))
PPEMaxMinDF['PPEScoreVaries'] = maxMinDF['max'] - maxMinDF['min']
OQMaxMinDF = pd.DataFrame(df.groupby(['project_number', 'project_name'])['OQ_score'].agg(['max','min']))
OQMaxMinDF['OQScoreVaries'] = maxMinDF['max'] - maxMinDF['min']
#Merge all these calculations together into one dataframe
maxMinDF = maxMinDF.merge(ECIMaxMinDF['ECIScoreVaries'], on=['project_number', 'project_name'])
maxMinDF = maxMinDF.merge(PPEMaxMinDF['PPEScoreVaries'], on=['project_number', 'project_name'])
maxMinDF = maxMinDF.merge(OQMaxMinDF['OQScoreVaries'], on=['project_number', 'project_name'])
maxMinDF.drop(['max', 'min'], axis=1, inplace=True)
columnList = ['totalScoreVaries', 'ECIScoreVaries', 'PPEScoreVaries', 'OQScoreVaries']
#If the different is greater than 50, "True" is assigned, otherwise np.nan. This is so we can use .dropna to
#drop the rows which have all np.nans in them.
for entry in columnList:
maxMinDF[maxMinDF[entry] >= 50] = True
maxMinDF[maxMinDF[entry] !=True] = np.nan
maxMinDF = maxMinDF.dropna( how='all', subset=['totalScoreVaries', 'ECIScoreVaries', 'PPEScoreVaries', 'OQScoreVaries'])
maxMinDF = maxMinDF.replace(np.nan, '')
maxMinDF = maxMinDF.reset_index()
print(maxMinDF)
maxMinList = maxMinDF.values.tolist()
return(maxMinList)
def summarize_all_project(my_list, links_df):
# Creating initial df
my_df = pd.DataFrame(my_list, columns=['project_number', 'project_name', 'evaluator', 'link_to_proposal',
'total_score', 'ECI_score', 'PPE_score', 'OQ_score', 'eval_link'])
my_df = my_df.round(2)
#Calculating mean and median, renaming columsn and resetting index (so that project #s show up when converted to list)
numericScoreDF = my_df[pd.to_numeric(my_df['total_score'], errors='coerce').notnull()]
numericScoreDF['total_score'] = numericScoreDF['total_score'].astype(float)
numericScoreDF['ECI_score'] = numericScoreDF['ECI_score'].astype(float)
numericScoreDF['PPE_score'] = numericScoreDF['PPE_score'].astype(float)
numericScoreDF['OQ_score'] = numericScoreDF['OQ_score'].astype(float)
maxMinList = maxMinDifference(numericScoreDF)
summary_df = pd.DataFrame(numericScoreDF.groupby(['project_number', 'project_name'])['total_score', 'ECI_score', 'PPE_score', 'OQ_score'].mean())
summary_df = summary_df.reset_index()
median_df = pd.DataFrame(numericScoreDF.groupby(['project_name'])['total_score'].median())
median_df = median_df.rename({'total_score':'median_score'}, axis=1)
# Creating string of all scores per project
my_df['total_score'] = my_df['total_score'].astype(str)
individual_score_list_df = pd.DataFrame(my_df.groupby(['project_name'])['total_score'].apply(', '.join).reset_index())
individual_score_list_df= individual_score_list_df.rename({'total_score':'score_list'}, axis=1)
# Creating string of all links
eval_links_df = pd.DataFrame(my_df.groupby(['project_name'])['eval_link'].apply(', '.join).reset_index())
# Merging the various dfs to create 1 summary
summary_df=summary_df.merge(median_df, on='project_name')
summary_df=summary_df.merge(individual_score_list_df, on='project_name')
summary_df = summary_df.merge(links_df[['project_number', 'project_link']], on='project_number', how='left')
summary_df=summary_df.merge(eval_links_df, on='project_name')
# Reordering columns so the info is in the correct order in the list
summary_df = summary_df[['project_number', 'project_name', 'project_link', 'median_score', 'total_score',
'ECI_score', 'PPE_score', 'OQ_score', 'score_list', 'eval_link']]
summary_df = summary_df.round(2)
final_list = summary_df.values.tolist()
# evals is string of the links to evaluation tabs
# I'm making it a list and appending it to the final_list, so that each link
# will end up in a separate column
for entry in final_list:
evals = entry.pop()
evals = list(evals.split(', '))
entry.extend(evals)
return(final_list, maxMinList)
def updateSheet(my_list, spreadSheetID, range):
resource = {
"majorDimension": "ROWS",
"values": my_list
}
sheetService.spreadsheets().values().update(
spreadsheetId=spreadSheetID,
range=range,
body=resource,
valueInputOption="USER_ENTERED").execute()
###########################################################################
inputs = None
if exists('./inputs.json'):
with open('inputs.json', 'r') as file:
inputs = json.load(file)
else:
print('You must create an inputs.json file')
sys.exit()
INPUTS_EVAL_MAPPING_ID = inputs["INPUTS_EVAL_MAPPING_ID"]
OUTPUTS_MASTER_ID = inputs["OUTPUTS_MASTER_ID"]
INPUTS_SPREADSHEET_ID = inputs['INPUTS_SPREADSHEET_ID']
SERVICE_ACCOUNT_FILE = inputs['SERVICE_ACCOUNT_FILE']
print('Set up services')
setUpServices()
sheet = sheetService.spreadsheets()
print('Load weights')
links_df, weight_df = grab_weights_and_links(INPUTS_SPREADSHEET_ID)
# Calls list building function
print('Build project summary list')
all_project_scores = build_project_summary_list(links_df, weight_df, INPUTS_EVAL_MAPPING_ID)
print('Summarize all the projects')
list_to_append, maxMinList = summarize_all_project(all_project_scores, links_df)
updateSheet(list_to_append, OUTPUTS_MASTER_ID, "Summary!A2:AA1000")
updateSheet(maxMinList, OUTPUTS_MASTER_ID, "Potential Issues!A3:AA1000")
print('Finished, Party time')
| 0
| 0
| 0
| 0
| 0
| 10,595
| 0
| 33
| 337
|
f838c963eb88052d512eb77b99721f3c3bb5120a
| 2,041
|
py
|
Python
|
test/algorithms/initial_points/test_initial_point.py
|
kevinsung/qiskit-nature
|
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/initial_points/test_initial_point.py
|
kevinsung/qiskit-nature
|
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/initial_points/test_initial_point.py
|
kevinsung/qiskit-nature
|
407533e05ca33fa53eb4e9cd7b089a0a99f9540e
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test InitialPoint"""
import unittest
if __name__ == "__main__":
unittest.main()
| 31.890625
| 77
| 0.696717
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test InitialPoint"""
import unittest
from unittest.mock import patch
from test import QiskitNatureTestCase
from qiskit_nature.algorithms.initial_points import InitialPoint
class TestInitialPoint(QiskitNatureTestCase):
"""Test Initial Point"""
@patch.multiple(InitialPoint, __abstractmethods__=set())
def setUp(self) -> None:
super().setUp()
# pylint: disable=abstract-class-instantiated
self.initial_point = InitialPoint() # type: ignore
def test_to_numpy_array(self):
"""Test to_numpy_array."""
with self.assertRaises(NotImplementedError):
self.initial_point.to_numpy_array()
def test_get_ansatz(self):
"""Test get ansatz."""
with self.assertRaises(NotImplementedError):
_ = self.initial_point.ansatz
def test_set_ansatz(self):
"""Test set ansatz."""
with self.assertRaises(NotImplementedError):
self.initial_point.ansatz = None
def test_get_grouped_property(self):
"""Test get grouped_property."""
with self.assertRaises(NotImplementedError):
_ = self.initial_point.grouped_property
def test_set_grouped_property(self):
"""Test set grouped_property."""
with self.assertRaises(NotImplementedError):
self.initial_point.grouped_property = None
def test_compute(self):
"""Test compute."""
with self.assertRaises(NotImplementedError):
self.initial_point.compute(None, None)
if __name__ == "__main__":
unittest.main()
| 0
| 202
| 0
| 1,111
| 0
| 0
| 0
| 69
| 90
|
ca810b6f0d9eabaeb4ad71ee8608ce994583b4bc
| 22,392
|
py
|
Python
|
Compressive_Sampling.py
|
tnakaicode/Python-for-Signal-Processing
|
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
|
[
"CC-BY-3.0"
] | null | null | null |
Compressive_Sampling.py
|
tnakaicode/Python-for-Signal-Processing
|
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
|
[
"CC-BY-3.0"
] | null | null | null |
Compressive_Sampling.py
|
tnakaicode/Python-for-Signal-Processing
|
b610ca377564e115a0dbd5a8cdcc2ad195c3b162
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# ## Compressive sampling Overview
# our previous discussion, we saw that imposing bandlimited-ness on our class of signals permits point-wise sampling of our signal and then later perfect reconstruction. It turns out that by imposing *sparsity* we can also obtain perfect reconstruction irrespective of whether or not we have satsified the sampling rate limits imposed by Shannon's sampling theorem. This has extremely important in practice because many signals are naturally sparse so that collecting samples at high rates only to dump most of them as the signal is compressed is expensive and wasteful.
# ## What Are Sparse Signals?
# Let's carefully discuss what we np.mean by *sparse* in this context. A signal $f$ is sparse if it can be expressed in very few nonzero components ($\mathbf{s}$) with respect to a given basis ($ \mathbf{\Psi} $ ). In other words, in np.matrix-vector language:
#
# $ \mathbf{f} = \mathbf{\Psi} \mathbf{s} $
#
# where $ || \mathbf{s} ||_0 \leq N $ where $N$ is the length of the vector and $|| \cdot||_0$ counts the number of nonzero elements in $\mathbf{s}$. Furthermore, we don't actually collect $N$ samples point-wise as we did in the Shannon sampling case. Rather, we measure $\mathbf{f}$ indirectly as $\mathbf{y}$ with another np.matrix as in:
#
# $\mathbf{y} = \mathbf{\Phi f} = \mathbf{\Phi} \mathbf{\Psi} \mathbf{s} = \mathbf{\Theta s} $
#
# where $\mathbf{\Theta}$ is an $M \times N$ np.matrix and $ M < N $ is the number of measurements. This setup np.means we have two problems to solve. First, how to design a *stable* measurement np.matrix $\mathbf{\Phi}$ and then, second, how to reconstruct $ \mathbf{f} $ from $ \mathbf{y} $.
#
# This may look like a standard linear algebra problem but since $ \mathbf{\Theta} $ has fewer rows than columns, the solution is necessarily ill-posed. This is where we inject the sparsity concept! Suppose that $f$ is $K$-sparse ( $||f||_0=K$ ), then if we somehow knew *which* $K$ columns of $ \mathbf{\Theta} $ matched the $K$ non-zero entries in $\mathbf{s}$, then $\mathbf{\Theta}$ would be $ M \times K $ where we could make $M > K$ and then have a stable inverse.
#
# This bit of reasoning is encapsulated in the following statement for any vector $\mathbf{v}$ sharing the same $K$ non-zero entries as $\mathbf{s}$, we have
#
# $$1-\epsilon \leq \frac{|| \mathbf{\Theta v} ||_2}{|| \mathbf{v} ||_2} \leq 1+\epsilon $$
#
# which is another way of saying that $\mathbf{\Theta}$ preserves the lengths of $K$-sparse vectors. Of course we don't know ahead of time which $K$ components to use, but it turns out that this condition is sufficient for a stable inverse of $\mathbf{\Theta}$ if it holds for any $3K$-sparse vector $\mathbf{v}$. This is the *Restricted Isometry Property* (RIP). Unfortunately, in order to use this sufficient condition, we would have to propose a $\mathbf{\Theta}$ and then check all possible combinations of nonzero entries in the $N$-length vector $\mathbf{v}$. As you may guess, this is prohibitive.
#
# Alternatively, we can approach stability by defining *incoherence* between the measurement np.matrix $\mathbf{\Phi}$ and the sparse basis $\mathbf{\Psi}$ as when any of the columns of one cannot be expressed as a small subset of the columns of the other. For example, if we have delta-spikes for $\mathbf{\Phi}$ as the row-truncated identity np.matrix
#
# $$\mathbf{\Phi} = \mathbf{I}_{M \times N} $$
#
# and the discrete Fourier transform np.matrix for $\mathbf{\Psi}$ as
#
# $\mathbf{\Psi} = \begin{bnp.matrix}\\\\
# e^{-j 2\pi k n/N}\\\\
# \end{bnp.matrix}_{N \times N}$
#
# Then we could not write any of the columns of $\mathbf{\Phi}$ using just a few of the columns of $\mathbf{\Psi}$.
#
# It turns out that picking the measuring $M \times N$ np.matrix np.random.randomly according to a Gaussian zero-np.mean, $1/N$ variance distribution and using the identity np.matrix as $\mathbf{\Phi}$, that the resulting $\mathbf{\Theta}$ np.matrix can be shown to satisfy RIP with a high probability. This np.means that we can recover $N$-length $K$-sparse signals with a high probability from just $M \ge c K \log (N/K)$ samples where $c$ is a small constant. Furthermore, it also turns out that we can use any orthonormal basis for $\mathbf{\Phi}$, not just the identity np.matrix, and these relations will all still hold.
# ## Reconstructing Sparse Signals
# Now that we have a way, by using np.random.random matrices, to satisfy the RIP, we are ready to consider the reconstruction problem. The first impulse is to compute the least-squares solution to this problem as
#
# $$ \mathbf{s}^* = \mathbf{\Theta}^T (\mathbf{\Theta}\mathbf{\Theta}^T)^{-1}\mathbf{y} $$
#
# But a moment's thought may convince you that since $\mathbf{\Theta}$ is a np.random.random np.matrix, most likely with lots of non-zero entries, it is highly unlikely that $\mathbf{s}^* $ will turn out to be sparse. There is actually a deeper geometric intuition as to why this happens, but let's first consider another way of solving this so that the $\mathbf{s}^*$ is $K$-sparse. Suppose instead we shuffle through combinations of $K$ nonzero entries in $\mathbf{s}$ until we satisfy the measurements $\mathbf{y}$. Stated mathematically, this np.means
#
# $$ \mathbf{s}^* = argmin || \mathbf{s}^* ||_0 $$
#
# where
#
# $$ \mathbf{\Theta} \mathbf{s}^* = \mathbf{y} $$
#
# It can be shown that with $M=K+1$ iid Gaussian measurements, this optimization will recover a $K$-sparse signal exactly with high probability. Unfortunately, this is numerically unstable in addition to being an NP-complete problem.
#
# Thus, we need another tractable way to approach this problem. It turns out that when a signal is sparse, it usually np.means that the nonzero terms are highly asymmetric np.meaning that if there are $K$ terms, then most likely there is one term that is dominant (i.e. of much larger magnitude) and that dwarfs the other nonzero terms. Geometrically, this np.means that in $N$-dimensional space, the sparse signal is very close to one (or, maybe just a few) of the axes.
#
# It turns out that one can bypass this combinatorial problem using $L_1$ minimization. To examine this, let's digress and look at the main difference between $L_2$ and $L_1$ minimization problems.
# reference:
# `http://users.ece.gatech.edu/justin/ssp2007`
# ## $L_2$ vs. $L_1$ Optimization
# The classic constrained least squares problem is the following:
#
# min $||\mathbf{x}||_2^2$
#
# where $x_1 + 2 x_2 = 1$
#
# with corresponding solution illustrated below.
#
# [1]
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
x1 = np.linspace(-1, 1, 10)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x1, (1 - x1) / 2)
ax.add_patch(Circle((0, 0), 1 / np.sqrt(5), alpha=0.3))
ax.plot(1 / 5, 2 / 5, 'rs')
ax.axis('equal')
ax.set_xlabel('$x_1$', fontsize=24)
ax.set_ylabel('$x_2$', fontsize=24)
ax.grid()
# Note that the line is the constraint so that any solution to this problem must be on this line (i.e. satisfy the constraint). The $L_2$ solution is the one that just touches the perimeter of the circle. This is because, in $L_2$, the unit-ball has the shape of a circle and represents all solutions of a fixed $L_2$ length. Thus, the one of smallest length that intersects the line is the one that satisfies the stated minimization problem. Intuitively, this np.means that we *inflate* a ball at the origin and stop when it touches the contraint. The point of contact is our $L_2$ minimization solution.
#
# Now, let's do same problem in $L_1$ norm
#
# min $||\mathbf{x}||_1=|x_1|+|x_2|$
#
# where $x_1 + 2 x_2 = 1$
#
# this case the constant-norm unit-ball contour in the $L_1$ norm is a diamond-shape instead of a circle. Comparing the graph below to the last shows that the solutions found are different. Geometrically, this is because the line tilts over in such a way that the inflating circular $L_2$ ball hits a point of tangency that is different from the $L_1$ ball because the $L_1$ ball creeps out mainly along the principal axes and is less influenced by the tilt of the line. This effect is much more pronounced in higher $N$-dimensional spaces where $L_1$-balls get more *spikey*.
#
# The fact that the $L_1$ problem is less sensitive to the tilt of the line is crucial since that tilt (i.e. orientation) is np.random.random due the choice of np.random.random measurement matrices. So, for this problem to be well-posed, we need to *not* be influenced by the orientation of any particular choice of np.random.random np.matrix and this is what casting this as a $L_1$ minimization provides.
# [2]
import matplotlib.patches
import matplotlib.transforms
r = matplotlib.patches.RegularPolygon((0, 0), 4, 1 / 2, np.pi / 2, alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x1, (1 - x1) / 2)
ax.plot(0, 1 / 2, 'rs')
ax.add_patch(r)
ax.grid()
ax.set_xlabel('$x_1$', fontsize=24)
ax.set_ylabel('$x_2$', fontsize=24)
ax.axis('equal')
# To explore this a bit, let's consider using the `cvxopt` package (Python ver 2.6 used here). This can be cast as a linear programming problem as follows:
#
# min $||\mathbf{t}||_1 = |t_1| + |t_2|$
#
# subject to:
#
# $-t_1 < x_1 < t_1$
#
# $-t_2 < x_2 < t_2$
#
# $x_1 + 2 x_2 = 1$
#
# $t_1 > 0$
#
# $t_2 > 0$
#
# where the last two constraints are already implied by the first two and are written out just for clarity. This can be implemented and solved in `cvxopt` as the following:
# [3]
from cvxopt import matrix as matrx # don't overrite numpy matrix class
from cvxopt import solvers
# t1,x1,t2,x2
c = matrx([1, 0, 1, 0], (4, 1), 'd')
G = matrx([[-1, -1, 0, 0], # column-0
[1, -1, 0, 0], # column-1
[0, 0, -1, -1], # column-2
[0, 0, 1, -1], # column-3
], (4, 4), 'd')
# (4,1) is 4-rows,1-column, 'd' is float type spec
h = matrx([0, 0, 0, 0], (4, 1), 'd')
A = matrx([0, 1, 0, 2], (1, 4), 'd')
b = matrx([1], (1, 1), 'd')
sol = solvers.lp(c, G, h, A, b)
x1 = sol['x'][1]
x2 = sol['x'][3]
print('x=%3.2f' % x1)
print('y=%3.2f' % x2)
# ## Example Gaussian np.random.random matrices
# Let's try out our earlier result about np.random.random Gaussian matrices and see if we can reconstruct an unknown $\mathbf{s}$ vector using $L_1$ minimization.
# [56]
import numpy as np
import scipy.linalg
def rearrange_G(x):
'setup to put inequalities np.matrix with last 1/2 of elements as main variables'
n = x.shape[0]
return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]])
K = 2 # components
Nf = 128 # number of samples
M = 12 # > K log2(Nf/K); num of measurements
s = np.zeros((Nf, 1)) # sparse vector we want to find
s[0] = 1 # set the K nonzero entries
s[1] = 0.5
# np.np.random.random.seed(5489) # set np.random.random seed for reproducibility
Phi = np.matrix(np.random.randn(M, Nf) *
np.sqrt(1 / Nf)) # np.random.random Gaussian np.matrix
y = Phi * s # measurements
# -- setup L1 minimization problem --
# equalities np.matrix with
G = matrx(rearrange_G(scipy.linalg.block_diag(
*[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf)))
# objective function row-np.matrix
c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)]))
# RHS for inequalities
h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd')
# equality constraint np.matrix
A = matrx(np.hstack([Phi * 0, Phi]))
# RHS for equality constraints
b = matrx(y)
sol = solvers.lp(c, G, h, A, b)
# nonzero entries
nze = np.array(sol['x']).flatten()[:Nf].round(2).nonzero()
print(np.array(sol['x'])[nze])
# That worked out! However, if you play around with this example enough with different np.random.random matrices (unset the ``seed`` statement above), you will find that it does not *always* find the correct answer. This is because the guarantees about reconstruction are all stated probabalistically (i.e. "high-probability"). This is another major difference between this and Shannon sampling.
#
# Let's encapulate the above $L_1$ minimization code so we can use it later.
# [5]
#from cStringIO import StringIO
# ## Example: Sparse Fourier Transform
# As an additional example, let us consider the Fourier transform and see if we can recover the sparse Fourier transform from a small set of measurements. For simplicity, we will assume that the time domain signal is real which automatically np.means that the Fourier transform is symmetric.
# [141]
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = matrx(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
Nf = 128
K = 3 # components
M = 8 # > K log2(Nf/K); num of measurements
s = np.zeros((Nf, 1)) # sparse vector we want to find
s[0] = 1 # set the K nonzero entries
s[1] = 0.5
s[-1] = 0.5 # symmetric to keep inverse Fourier transform real
Phi = dftmatrix(Nf)[:M, :] # take M-rows
y = Phi * s # measurements
# have to assert the type here on my hardware
sol = L1_min(Phi.real, y.real.astype(np.float64), K)
print(np.allclose(s.flatten(), sol))
# [140]
plt.plot(sol)
plt.plot(y.real)
# ## Uniform Uncertainty Principle
# $\Phi$ obeys a UUP for sets of size $K$ if
#
# <center>
# $$ 0.8 \frac{M}{N} ||f||_2^2 \leq || \Phi f||_2^2 \leq 1.2 \frac{M}{N} ||f||_2^2 $$
# </center>
#
# Measurements that satisfy this are defined as *incoherent*. Given that $f$ is $K$-sparse and we measure
# $y=\Phi f$, then we search for the sparsest vector that explains the $y$ measurements and thus find $f$ as follows:
#
# <center>
# $min_f \\#\lbrace t: f(t) \ne 0 \rbrace $ where $\Phi f = y$
# </center>
# Note that the hash mark is the size (i.e. cardinality) of the set. This np.means that we are looking for the fewest individual points for $f$ that satisfy the constraints. Unfortunately, this is not practically possible, so we must use the $\mathbb{L}_1$ norm as a proxy for sparsity.
#
# Suppose $f$ is $K$-sparse and that $\Phi$ obeys UUP for sets of size $4K$. Then we measure $y=\Phi f$ and then solve
#
# <center>
# $min_f ||f||_1 $ where $\Phi f = y$
# </center>
# to recover $f$ exactly and we can use $M > K \log N$ measurements, where the number of measurements is approximately equal to the number of active components. Let's consider a concrete example of how this works.
# ### Example: Sampling Sinusoids
# Here, we sample in the time-domain, given that we know the signal is sparse in the frequency domain.
#
# <center>
# $$ \hat{f}(\omega) = \sum_{i=1}^K \alpha_i \delta(\omega_i-\omega) $$
# </center>
#
# which np.means that it consists of $K$-sparse nonzero elements. Therefore, the time domain signal is
#
# <center>
# $$ f(t) = \sum_{i=1}^K \alpha_i e^{i \omega_i t} $$
# </center>
#
# where the $\alpha_i$ and $\omega_i$ are unknown. We want solve for these unknowns by taking $M \gt K \log N$ samples of $f$.
# The problem we want to solve is
#
# $ min_g || \hat{g} ||_{L_1}$
#
# subject to
#
# $ g(t_m)=f(t_m) $
#
# The trick here is that are minimizing in the frequency-domain while the constraints are in the time-domain. To make things easier, we will restrict our attention to real time-domain signals $f$ and we will only reconstruct the even-indexed time-samples from the signal. This np.means we need a way of expressing the inverse Fourier Transform as a np.matrix of equality constraints. The assumption of real-valued time-domain signals implies the following symmetry in the frequency-domain:
#
# $ F(k) = F(N-k)^* $
#
# where $F$ is the Fourier transform of $f$ and the asterisk denotes complex conjugation and $k\in \lbrace 0,1,..N-1\rbrace$ and $N$ is the Fourier Transform length. To make things even more tractable we will assume the time-domain signal is even, which np.means real-valued Fourier transform values.
#
# Suppose that $\mathbf{U}_N$ is the $N$-point DFT-np.matrix. Note that we always assume $N$ is even. Since we are dealing with only real-valued signals, the transform is symmetric, so we only need half of the spectrum computed. It turns out that the even-indexed time-domain samples can be constructed as follows:
#
# $ \mathbf{f_{even}} = \mathbf{U}_{N/2} \begin{bnp.matrix}\\\\
# F(0)+F(N/2)^* \\\\
# F(1)+F(N/2-1)^* \\\\
# F(2)+F(N/2-2)^* \\\\
# \dots \\\\
# F(N/2-1)+F(1)^*
# \end{bnp.matrix}$
#
# We can further simplify this by breaking this into real (superscript $R$) and imaginary (superscript $I$) parts and keeping only the real part
#
# $$\mathbf{f_{even}} = \mathbf{U}_{N/2}^R
# \begin{bnp.matrix}\\\\
# F(0)^R+F(N/2)^R \\\\
# F(1)^R+F(N/2-1)^R \\\\
# F(2)^R+F(N/2-2)^R \\\\
# \dots \\\\
# F(N/2-1)^R+F(1)^R
# \end{bnp.matrix}
# +
# \mathbf{U}^I_N
# \begin{bnp.matrix} \\\\
# -F(0)^I+F(N/2)^I \\\\
# -F(1)^I+F(N/2-1)^I \\\\
# -F(2)^I+F(N/2-2)^I \\\\
# \dots \\\\
# -F(N/2-1)^I+F(1)^I
# \end{bnp.matrix}$$
#
# But we are going to force all the $F(k)^I$ to be zero in our example. Note that the second term should have a $\mathbf{U}_{N/2}$ in it instead $\mathbf{U}_N$ but there is something wrong with the javascript parser for that bit of TeX.
#
# Now, let's see if we can walk through to step-by-step to make sure our optimization can actually work. Note that we don't need the second term on the right with the $F^I$ terms because by our construction, $F$ is real.
# [358]
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
def Q_rmatrix(Nf=8):
'implements the reordering, adding, and stacking of the matrices above'
Q_r = np.matrix(np.hstack([np.eye(int(Nf / 2)), np.eye(int(Nf / 2)) * 0])
+ np.hstack([np.zeros((int(Nf / 2), 1)), np.fliplr(np.eye(int(Nf / 2))), np.zeros((int(Nf / 2), int(Nf / 2) - 1))]))
return Q_r
Nf = 8
F = np.zeros((Nf, 1)) # 8-point DFT
F[0] = 1 # DC-term, constant signal
n = np.arange(Nf / 2)
ft = dftmatrix(Nf).H * F # this gives the constant signal
Q_r = Q_rmatrix(Nf)
U = dftmatrix(Nf / 2) # half inverse DFT np.matrix
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
# [359]
# let's try this with another sparse frequency-domain signal
F = np.zeros((Nf, 1))
F[1] = 1
F[Nf - 1] = 1 # symmetric part
ft = dftmatrix(Nf).H * F # this gives the constant signal
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o')
plt.xlabel('$t$', fontsize=22)
plt.ylabel('$f(t)$', fontsize=22)
plt.title('even-numbered samples')
# We can use the above cell to create more complicated real signals. You can experiment with the cell below. Just remember to impose the symmetry condition!
# [360]
Nf = 32 # must be even
F = np.zeros((Nf, 1))
# set values and corresponding symmetry conditions
F[7] = 1
F[12] = 0.5
F[9] = -0.25
F[Nf - 9] = -0.25
F[Nf - 12] = 0.5
F[Nf - 7] = 1 # symmetric part
Q_r = Q_rmatrix(Nf)
U = dftmatrix(Nf / 2) # half inverse DFT np.matrix
ft = dftmatrix(Nf).H * F # this gives the constant signal
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o')
plt.xlabel('$t$', fontsize=22)
plt.ylabel('$f(t)$', fontsize=22)
plt.title('even-numbered samples')
# Now that we have gone through all that trouble to create the even-samples np.matrix, we can finally put it into the framework of the $L_1$ minimization problem:
#
# $ min_F || \mathbf{F} ||_{L_1}$
#
# subject to
#
# $ \mathbf{U}_{N/2}^R \mathbf{Q}_r \mathbf{F}= \mathbf{f} $
# [361]
def rearrange_G(x):
'setup to put inequalities np.matrix with first 1/2 of elements as main variables'
n = x.shape[0]
return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]])
K = 2 # components
Nf = 128 # number of samples
M = 18 # > K log(N); num of measurements
# setup signal DFT as F
F = np.zeros((Nf, 1))
F[1] = 1
F[2] = 0.5
F[Nf - 1] = 1 # symmetric parts
F[Nf - 2] = 0.5
ftime = dftmatrix(Nf).H * F # this gives the time-domain signal
ftime = ftime.real # it's real anyway
time_samples = [0, 2, 4, 12, 14, 16, 18, 24,
34, 36, 38, 40, 44, 46, 52, 56, 54, 62]
half_indexed_time_samples = (np.array(time_samples) / 2).astype(int)
Phi = dftmatrix(Nf / 2).real * Q_rmatrix(Nf)
Phi_i = Phi[half_indexed_time_samples, :]
# equalities np.matrix with
G = matrx(rearrange_G(scipy.linalg.block_diag(
*[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf)))
# objective function row-np.matrix
c = matrx(np.hstack([np.zeros(Nf), np.ones(Nf)]))
# RHS for inequalities
h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd')
# equality constraint np.matrix
A = matrx(np.hstack([Phi_i, Phi_i * 0]))
# RHS for equality constraints
b = matrx(ftime[time_samples])
sol = solvers.lp(c, G, h, A, b)
# [12]
import itertools as it
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
M = 3
# np.np.random.random.seed(5489) # set np.random.random seed for reproducibility
Psi = dftmatrix(128)
Phi = np.random.randn(M, 128)
s = np.zeros((128, 1))
s[0] = 1
s[10] = 1
Theta = Phi * Psi
y = Theta * s
for i in it.combinations(range(128), 2):
sstar = np.zeros((128, 1))
sstar[np.array(i)] = 1
if np.allclose(Theta * sstar, y):
break
else:
print('no solution')
# [9]
# [ ]
| 41.543599
| 626
| 0.675196
|
#!/usr/bin/env python
# coding: utf-8
# ## Compressive sampling Overview
# our previous discussion, we saw that imposing bandlimited-ness on our class of signals permits point-wise sampling of our signal and then later perfect reconstruction. It turns out that by imposing *sparsity* we can also obtain perfect reconstruction irrespective of whether or not we have satsified the sampling rate limits imposed by Shannon's sampling theorem. This has extremely important in practice because many signals are naturally sparse so that collecting samples at high rates only to dump most of them as the signal is compressed is expensive and wasteful.
# ## What Are Sparse Signals?
# Let's carefully discuss what we np.mean by *sparse* in this context. A signal $f$ is sparse if it can be expressed in very few nonzero components ($\mathbf{s}$) with respect to a given basis ($ \mathbf{\Psi} $ ). In other words, in np.matrix-vector language:
#
# $ \mathbf{f} = \mathbf{\Psi} \mathbf{s} $
#
# where $ || \mathbf{s} ||_0 \leq N $ where $N$ is the length of the vector and $|| \cdot||_0$ counts the number of nonzero elements in $\mathbf{s}$. Furthermore, we don't actually collect $N$ samples point-wise as we did in the Shannon sampling case. Rather, we measure $\mathbf{f}$ indirectly as $\mathbf{y}$ with another np.matrix as in:
#
# $\mathbf{y} = \mathbf{\Phi f} = \mathbf{\Phi} \mathbf{\Psi} \mathbf{s} = \mathbf{\Theta s} $
#
# where $\mathbf{\Theta}$ is an $M \times N$ np.matrix and $ M < N $ is the number of measurements. This setup np.means we have two problems to solve. First, how to design a *stable* measurement np.matrix $\mathbf{\Phi}$ and then, second, how to reconstruct $ \mathbf{f} $ from $ \mathbf{y} $.
#
# This may look like a standard linear algebra problem but since $ \mathbf{\Theta} $ has fewer rows than columns, the solution is necessarily ill-posed. This is where we inject the sparsity concept! Suppose that $f$ is $K$-sparse ( $||f||_0=K$ ), then if we somehow knew *which* $K$ columns of $ \mathbf{\Theta} $ matched the $K$ non-zero entries in $\mathbf{s}$, then $\mathbf{\Theta}$ would be $ M \times K $ where we could make $M > K$ and then have a stable inverse.
#
# This bit of reasoning is encapsulated in the following statement for any vector $\mathbf{v}$ sharing the same $K$ non-zero entries as $\mathbf{s}$, we have
#
# $$1-\epsilon \leq \frac{|| \mathbf{\Theta v} ||_2}{|| \mathbf{v} ||_2} \leq 1+\epsilon $$
#
# which is another way of saying that $\mathbf{\Theta}$ preserves the lengths of $K$-sparse vectors. Of course we don't know ahead of time which $K$ components to use, but it turns out that this condition is sufficient for a stable inverse of $\mathbf{\Theta}$ if it holds for any $3K$-sparse vector $\mathbf{v}$. This is the *Restricted Isometry Property* (RIP). Unfortunately, in order to use this sufficient condition, we would have to propose a $\mathbf{\Theta}$ and then check all possible combinations of nonzero entries in the $N$-length vector $\mathbf{v}$. As you may guess, this is prohibitive.
#
# Alternatively, we can approach stability by defining *incoherence* between the measurement np.matrix $\mathbf{\Phi}$ and the sparse basis $\mathbf{\Psi}$ as when any of the columns of one cannot be expressed as a small subset of the columns of the other. For example, if we have delta-spikes for $\mathbf{\Phi}$ as the row-truncated identity np.matrix
#
# $$\mathbf{\Phi} = \mathbf{I}_{M \times N} $$
#
# and the discrete Fourier transform np.matrix for $\mathbf{\Psi}$ as
#
# $\mathbf{\Psi} = \begin{bnp.matrix}\\\\
# e^{-j 2\pi k n/N}\\\\
# \end{bnp.matrix}_{N \times N}$
#
# Then we could not write any of the columns of $\mathbf{\Phi}$ using just a few of the columns of $\mathbf{\Psi}$.
#
# It turns out that picking the measuring $M \times N$ np.matrix np.random.randomly according to a Gaussian zero-np.mean, $1/N$ variance distribution and using the identity np.matrix as $\mathbf{\Phi}$, that the resulting $\mathbf{\Theta}$ np.matrix can be shown to satisfy RIP with a high probability. This np.means that we can recover $N$-length $K$-sparse signals with a high probability from just $M \ge c K \log (N/K)$ samples where $c$ is a small constant. Furthermore, it also turns out that we can use any orthonormal basis for $\mathbf{\Phi}$, not just the identity np.matrix, and these relations will all still hold.
# ## Reconstructing Sparse Signals
# Now that we have a way, by using np.random.random matrices, to satisfy the RIP, we are ready to consider the reconstruction problem. The first impulse is to compute the least-squares solution to this problem as
#
# $$ \mathbf{s}^* = \mathbf{\Theta}^T (\mathbf{\Theta}\mathbf{\Theta}^T)^{-1}\mathbf{y} $$
#
# But a moment's thought may convince you that since $\mathbf{\Theta}$ is a np.random.random np.matrix, most likely with lots of non-zero entries, it is highly unlikely that $\mathbf{s}^* $ will turn out to be sparse. There is actually a deeper geometric intuition as to why this happens, but let's first consider another way of solving this so that the $\mathbf{s}^*$ is $K$-sparse. Suppose instead we shuffle through combinations of $K$ nonzero entries in $\mathbf{s}$ until we satisfy the measurements $\mathbf{y}$. Stated mathematically, this np.means
#
# $$ \mathbf{s}^* = argmin || \mathbf{s}^* ||_0 $$
#
# where
#
# $$ \mathbf{\Theta} \mathbf{s}^* = \mathbf{y} $$
#
# It can be shown that with $M=K+1$ iid Gaussian measurements, this optimization will recover a $K$-sparse signal exactly with high probability. Unfortunately, this is numerically unstable in addition to being an NP-complete problem.
#
# Thus, we need another tractable way to approach this problem. It turns out that when a signal is sparse, it usually np.means that the nonzero terms are highly asymmetric np.meaning that if there are $K$ terms, then most likely there is one term that is dominant (i.e. of much larger magnitude) and that dwarfs the other nonzero terms. Geometrically, this np.means that in $N$-dimensional space, the sparse signal is very close to one (or, maybe just a few) of the axes.
#
# It turns out that one can bypass this combinatorial problem using $L_1$ minimization. To examine this, let's digress and look at the main difference between $L_2$ and $L_1$ minimization problems.
# reference:
# `http://users.ece.gatech.edu/justin/ssp2007`
# ## $L_2$ vs. $L_1$ Optimization
# The classic constrained least squares problem is the following:
#
# min $||\mathbf{x}||_2^2$
#
# where $x_1 + 2 x_2 = 1$
#
# with corresponding solution illustrated below.
#
# [1]
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
x1 = np.linspace(-1, 1, 10)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x1, (1 - x1) / 2)
ax.add_patch(Circle((0, 0), 1 / np.sqrt(5), alpha=0.3))
ax.plot(1 / 5, 2 / 5, 'rs')
ax.axis('equal')
ax.set_xlabel('$x_1$', fontsize=24)
ax.set_ylabel('$x_2$', fontsize=24)
ax.grid()
# Note that the line is the constraint so that any solution to this problem must be on this line (i.e. satisfy the constraint). The $L_2$ solution is the one that just touches the perimeter of the circle. This is because, in $L_2$, the unit-ball has the shape of a circle and represents all solutions of a fixed $L_2$ length. Thus, the one of smallest length that intersects the line is the one that satisfies the stated minimization problem. Intuitively, this np.means that we *inflate* a ball at the origin and stop when it touches the contraint. The point of contact is our $L_2$ minimization solution.
#
# Now, let's do same problem in $L_1$ norm
#
# min $||\mathbf{x}||_1=|x_1|+|x_2|$
#
# where $x_1 + 2 x_2 = 1$
#
# this case the constant-norm unit-ball contour in the $L_1$ norm is a diamond-shape instead of a circle. Comparing the graph below to the last shows that the solutions found are different. Geometrically, this is because the line tilts over in such a way that the inflating circular $L_2$ ball hits a point of tangency that is different from the $L_1$ ball because the $L_1$ ball creeps out mainly along the principal axes and is less influenced by the tilt of the line. This effect is much more pronounced in higher $N$-dimensional spaces where $L_1$-balls get more *spikey*.
#
# The fact that the $L_1$ problem is less sensitive to the tilt of the line is crucial since that tilt (i.e. orientation) is np.random.random due the choice of np.random.random measurement matrices. So, for this problem to be well-posed, we need to *not* be influenced by the orientation of any particular choice of np.random.random np.matrix and this is what casting this as a $L_1$ minimization provides.
# [2]
from matplotlib.patches import Rectangle
import matplotlib.patches
import matplotlib.transforms
r = matplotlib.patches.RegularPolygon((0, 0), 4, 1 / 2, np.pi / 2, alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x1, (1 - x1) / 2)
ax.plot(0, 1 / 2, 'rs')
ax.add_patch(r)
ax.grid()
ax.set_xlabel('$x_1$', fontsize=24)
ax.set_ylabel('$x_2$', fontsize=24)
ax.axis('equal')
# To explore this a bit, let's consider using the `cvxopt` package (Python ver 2.6 used here). This can be cast as a linear programming problem as follows:
#
# min $||\mathbf{t}||_1 = |t_1| + |t_2|$
#
# subject to:
#
# $-t_1 < x_1 < t_1$
#
# $-t_2 < x_2 < t_2$
#
# $x_1 + 2 x_2 = 1$
#
# $t_1 > 0$
#
# $t_2 > 0$
#
# where the last two constraints are already implied by the first two and are written out just for clarity. This can be implemented and solved in `cvxopt` as the following:
# [3]
from cvxopt import matrix as matrx # don't overrite numpy matrix class
from cvxopt import solvers
# t1,x1,t2,x2
c = matrx([1, 0, 1, 0], (4, 1), 'd')
G = matrx([[-1, -1, 0, 0], # column-0
[1, -1, 0, 0], # column-1
[0, 0, -1, -1], # column-2
[0, 0, 1, -1], # column-3
], (4, 4), 'd')
# (4,1) is 4-rows,1-column, 'd' is float type spec
h = matrx([0, 0, 0, 0], (4, 1), 'd')
A = matrx([0, 1, 0, 2], (1, 4), 'd')
b = matrx([1], (1, 1), 'd')
sol = solvers.lp(c, G, h, A, b)
x1 = sol['x'][1]
x2 = sol['x'][3]
print('x=%3.2f' % x1)
print('y=%3.2f' % x2)
# ## Example Gaussian np.random.random matrices
# Let's try out our earlier result about np.random.random Gaussian matrices and see if we can reconstruct an unknown $\mathbf{s}$ vector using $L_1$ minimization.
# [56]
import numpy as np
import scipy.linalg
def rearrange_G(x):
'setup to put inequalities np.matrix with last 1/2 of elements as main variables'
n = x.shape[0]
return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]])
K = 2 # components
Nf = 128 # number of samples
M = 12 # > K log2(Nf/K); num of measurements
s = np.zeros((Nf, 1)) # sparse vector we want to find
s[0] = 1 # set the K nonzero entries
s[1] = 0.5
# np.np.random.random.seed(5489) # set np.random.random seed for reproducibility
Phi = np.matrix(np.random.randn(M, Nf) *
np.sqrt(1 / Nf)) # np.random.random Gaussian np.matrix
y = Phi * s # measurements
# -- setup L1 minimization problem --
# equalities np.matrix with
G = matrx(rearrange_G(scipy.linalg.block_diag(
*[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf)))
# objective function row-np.matrix
c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)]))
# RHS for inequalities
h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd')
# equality constraint np.matrix
A = matrx(np.hstack([Phi * 0, Phi]))
# RHS for equality constraints
b = matrx(y)
sol = solvers.lp(c, G, h, A, b)
# nonzero entries
nze = np.array(sol['x']).flatten()[:Nf].round(2).nonzero()
print(np.array(sol['x'])[nze])
# That worked out! However, if you play around with this example enough with different np.random.random matrices (unset the ``seed`` statement above), you will find that it does not *always* find the correct answer. This is because the guarantees about reconstruction are all stated probabalistically (i.e. "high-probability"). This is another major difference between this and Shannon sampling.
#
# Let's encapulate the above $L_1$ minimization code so we can use it later.
# [5]
#from cStringIO import StringIO
import sys
def L1_min(Phi, y, K):
# equalities np.matrix with
M, Nf = Phi.shape
G = matrx(rearrange_G(scipy.linalg.block_diag(
*[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf)))
# objective function row-np.matrix
c = matrx(np.hstack([np.ones(Nf), np.zeros(Nf)]))
# RHS for inequalities
h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd')
# equality constraint np.matrix
A = matrx(np.hstack([Phi * 0, Phi]))
# RHS for equality constraints
b = matrx(y)
# suppress standard output
old_stdout = sys.stdout
#s.stdout = mystdout = StringIO()
#s.stdout = mystdout
sol = solvers.lp(c, G, h, A, b)
# restore standard output
sys.stdout = old_stdout
sln = np.array(sol['x']).flatten()[:Nf].round(4)
return sln
# ## Example: Sparse Fourier Transform
# As an additional example, let us consider the Fourier transform and see if we can recover the sparse Fourier transform from a small set of measurements. For simplicity, we will assume that the time domain signal is real which automatically np.means that the Fourier transform is symmetric.
# [141]
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = matrx(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
Nf = 128
K = 3 # components
M = 8 # > K log2(Nf/K); num of measurements
s = np.zeros((Nf, 1)) # sparse vector we want to find
s[0] = 1 # set the K nonzero entries
s[1] = 0.5
s[-1] = 0.5 # symmetric to keep inverse Fourier transform real
Phi = dftmatrix(Nf)[:M, :] # take M-rows
y = Phi * s # measurements
# have to assert the type here on my hardware
sol = L1_min(Phi.real, y.real.astype(np.float64), K)
print(np.allclose(s.flatten(), sol))
# [140]
plt.plot(sol)
plt.plot(y.real)
# ## Uniform Uncertainty Principle
# $\Phi$ obeys a UUP for sets of size $K$ if
#
# <center>
# $$ 0.8 \frac{M}{N} ||f||_2^2 \leq || \Phi f||_2^2 \leq 1.2 \frac{M}{N} ||f||_2^2 $$
# </center>
#
# Measurements that satisfy this are defined as *incoherent*. Given that $f$ is $K$-sparse and we measure
# $y=\Phi f$, then we search for the sparsest vector that explains the $y$ measurements and thus find $f$ as follows:
#
# <center>
# $min_f \\#\lbrace t: f(t) \ne 0 \rbrace $ where $\Phi f = y$
# </center>
# Note that the hash mark is the size (i.e. cardinality) of the set. This np.means that we are looking for the fewest individual points for $f$ that satisfy the constraints. Unfortunately, this is not practically possible, so we must use the $\mathbb{L}_1$ norm as a proxy for sparsity.
#
# Suppose $f$ is $K$-sparse and that $\Phi$ obeys UUP for sets of size $4K$. Then we measure $y=\Phi f$ and then solve
#
# <center>
# $min_f ||f||_1 $ where $\Phi f = y$
# </center>
# to recover $f$ exactly and we can use $M > K \log N$ measurements, where the number of measurements is approximately equal to the number of active components. Let's consider a concrete example of how this works.
# ### Example: Sampling Sinusoids
# Here, we sample in the time-domain, given that we know the signal is sparse in the frequency domain.
#
# <center>
# $$ \hat{f}(\omega) = \sum_{i=1}^K \alpha_i \delta(\omega_i-\omega) $$
# </center>
#
# which np.means that it consists of $K$-sparse nonzero elements. Therefore, the time domain signal is
#
# <center>
# $$ f(t) = \sum_{i=1}^K \alpha_i e^{i \omega_i t} $$
# </center>
#
# where the $\alpha_i$ and $\omega_i$ are unknown. We want solve for these unknowns by taking $M \gt K \log N$ samples of $f$.
# The problem we want to solve is
#
# $ min_g || \hat{g} ||_{L_1}$
#
# subject to
#
# $ g(t_m)=f(t_m) $
#
# The trick here is that are minimizing in the frequency-domain while the constraints are in the time-domain. To make things easier, we will restrict our attention to real time-domain signals $f$ and we will only reconstruct the even-indexed time-samples from the signal. This np.means we need a way of expressing the inverse Fourier Transform as a np.matrix of equality constraints. The assumption of real-valued time-domain signals implies the following symmetry in the frequency-domain:
#
# $ F(k) = F(N-k)^* $
#
# where $F$ is the Fourier transform of $f$ and the asterisk denotes complex conjugation and $k\in \lbrace 0,1,..N-1\rbrace$ and $N$ is the Fourier Transform length. To make things even more tractable we will assume the time-domain signal is even, which np.means real-valued Fourier transform values.
#
# Suppose that $\mathbf{U}_N$ is the $N$-point DFT-np.matrix. Note that we always assume $N$ is even. Since we are dealing with only real-valued signals, the transform is symmetric, so we only need half of the spectrum computed. It turns out that the even-indexed time-domain samples can be constructed as follows:
#
# $ \mathbf{f_{even}} = \mathbf{U}_{N/2} \begin{bnp.matrix}\\\\
# F(0)+F(N/2)^* \\\\
# F(1)+F(N/2-1)^* \\\\
# F(2)+F(N/2-2)^* \\\\
# \dots \\\\
# F(N/2-1)+F(1)^*
# \end{bnp.matrix}$
#
# We can further simplify this by breaking this into real (superscript $R$) and imaginary (superscript $I$) parts and keeping only the real part
#
# $$\mathbf{f_{even}} = \mathbf{U}_{N/2}^R
# \begin{bnp.matrix}\\\\
# F(0)^R+F(N/2)^R \\\\
# F(1)^R+F(N/2-1)^R \\\\
# F(2)^R+F(N/2-2)^R \\\\
# \dots \\\\
# F(N/2-1)^R+F(1)^R
# \end{bnp.matrix}
# +
# \mathbf{U}^I_N
# \begin{bnp.matrix} \\\\
# -F(0)^I+F(N/2)^I \\\\
# -F(1)^I+F(N/2-1)^I \\\\
# -F(2)^I+F(N/2-2)^I \\\\
# \dots \\\\
# -F(N/2-1)^I+F(1)^I
# \end{bnp.matrix}$$
#
# But we are going to force all the $F(k)^I$ to be zero in our example. Note that the second term should have a $\mathbf{U}_{N/2}$ in it instead $\mathbf{U}_N$ but there is something wrong with the javascript parser for that bit of TeX.
#
# Now, let's see if we can walk through to step-by-step to make sure our optimization can actually work. Note that we don't need the second term on the right with the $F^I$ terms because by our construction, $F$ is real.
# [358]
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
def Q_rmatrix(Nf=8):
'implements the reordering, adding, and stacking of the matrices above'
Q_r = np.matrix(np.hstack([np.eye(int(Nf / 2)), np.eye(int(Nf / 2)) * 0])
+ np.hstack([np.zeros((int(Nf / 2), 1)), np.fliplr(np.eye(int(Nf / 2))), np.zeros((int(Nf / 2), int(Nf / 2) - 1))]))
return Q_r
Nf = 8
F = np.zeros((Nf, 1)) # 8-point DFT
F[0] = 1 # DC-term, constant signal
n = np.arange(Nf / 2)
ft = dftmatrix(Nf).H * F # this gives the constant signal
Q_r = Q_rmatrix(Nf)
U = dftmatrix(Nf / 2) # half inverse DFT np.matrix
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
# [359]
# let's try this with another sparse frequency-domain signal
F = np.zeros((Nf, 1))
F[1] = 1
F[Nf - 1] = 1 # symmetric part
ft = dftmatrix(Nf).H * F # this gives the constant signal
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o')
plt.xlabel('$t$', fontsize=22)
plt.ylabel('$f(t)$', fontsize=22)
plt.title('even-numbered samples')
# We can use the above cell to create more complicated real signals. You can experiment with the cell below. Just remember to impose the symmetry condition!
# [360]
Nf = 32 # must be even
F = np.zeros((Nf, 1))
# set values and corresponding symmetry conditions
F[7] = 1
F[12] = 0.5
F[9] = -0.25
F[Nf - 9] = -0.25
F[Nf - 12] = 0.5
F[Nf - 7] = 1 # symmetric part
Q_r = Q_rmatrix(Nf)
U = dftmatrix(Nf / 2) # half inverse DFT np.matrix
ft = dftmatrix(Nf).H * F # this gives the constant signal
feven = U.real * Q_r * F # half the size
print(np.allclose(feven, ft[::2])) # retrieved even-numbered samples
plt.plot(np.arange(Nf), ft.real, np.arange(Nf)[::2], feven, 'o')
plt.xlabel('$t$', fontsize=22)
plt.ylabel('$f(t)$', fontsize=22)
plt.title('even-numbered samples')
# Now that we have gone through all that trouble to create the even-samples np.matrix, we can finally put it into the framework of the $L_1$ minimization problem:
#
# $ min_F || \mathbf{F} ||_{L_1}$
#
# subject to
#
# $ \mathbf{U}_{N/2}^R \mathbf{Q}_r \mathbf{F}= \mathbf{f} $
# [361]
def rearrange_G(x):
'setup to put inequalities np.matrix with first 1/2 of elements as main variables'
n = x.shape[0]
return np.hstack([x[:, np.arange(0, n, 2) + 1], x[:, np.arange(0, n, 2)]])
K = 2 # components
Nf = 128 # number of samples
M = 18 # > K log(N); num of measurements
# setup signal DFT as F
F = np.zeros((Nf, 1))
F[1] = 1
F[2] = 0.5
F[Nf - 1] = 1 # symmetric parts
F[Nf - 2] = 0.5
ftime = dftmatrix(Nf).H * F # this gives the time-domain signal
ftime = ftime.real # it's real anyway
time_samples = [0, 2, 4, 12, 14, 16, 18, 24,
34, 36, 38, 40, 44, 46, 52, 56, 54, 62]
half_indexed_time_samples = (np.array(time_samples) / 2).astype(int)
Phi = dftmatrix(Nf / 2).real * Q_rmatrix(Nf)
Phi_i = Phi[half_indexed_time_samples, :]
# equalities np.matrix with
G = matrx(rearrange_G(scipy.linalg.block_diag(
*[np.matrix([[-1, -1], [1, -1.0]]), ] * Nf)))
# objective function row-np.matrix
c = matrx(np.hstack([np.zeros(Nf), np.ones(Nf)]))
# RHS for inequalities
h = matrx([0.0, ] * (Nf * 2), (Nf * 2, 1), 'd')
# equality constraint np.matrix
A = matrx(np.hstack([Phi_i, Phi_i * 0]))
# RHS for equality constraints
b = matrx(ftime[time_samples])
sol = solvers.lp(c, G, h, A, b)
# [12]
import itertools as it
def dftmatrix(N=8):
'compute inverse DFT matrices'
n = np.arange(N)
U = np.matrix(np.exp(1j * 2 * np.pi / N * n * n[:, None])) / np.sqrt(N)
return np.matrix(U)
M = 3
# np.np.random.random.seed(5489) # set np.random.random seed for reproducibility
Psi = dftmatrix(128)
Phi = np.random.randn(M, 128)
s = np.zeros((128, 1))
s[0] = 1
s[10] = 1
Theta = Phi * Psi
y = Theta * s
for i in it.combinations(range(128), 2):
sstar = np.zeros((128, 1))
sstar[np.array(i)] = 1
if np.allclose(Theta * sstar, y):
break
else:
print('no solution')
# [9]
# [ ]
| 0
| 0
| 0
| 0
| 0
| 745
| 0
| 8
| 68
|
8afc13e213c403a3dacc0931aa5029c3a13cf2e0
| 2,656
|
py
|
Python
|
lineartransporteqn.py
|
killacamron/CFDcourse21
|
5ae59303d042819e0246e793271f420de8e1bbdb
|
[
"MIT"
] | null | null | null |
lineartransporteqn.py
|
killacamron/CFDcourse21
|
5ae59303d042819e0246e793271f420de8e1bbdb
|
[
"MIT"
] | null | null | null |
lineartransporteqn.py
|
killacamron/CFDcourse21
|
5ae59303d042819e0246e793271f420de8e1bbdb
|
[
"MIT"
] | null | null | null |
# =============================================================================
#
# Explicit Finite Difference Method Code to Solve the 1D Linear Transport Equation
# Adapted by: Cameron Armstrong (2019)
# Source: Lorena Barba, 12 Steps to NS in Python
# Institution: Virginia Commonwealth University
#
# =============================================================================
# Required Modules
import numpy as np
from matplotlib import pyplot as plt
import time
xl = 2 # x length
nx = 600 # number of grid points
x = np.linspace(0,xl,nx) # x grid
dx = xl/(nx-1) # x stepsize
nt = 350 # number of timesteps
dt = 0.0025 # time stepsize
c = 1 # wave speed
g = .01 # gaussian variance parameter (peak width)
theta = x/(0.5*xl) # gaussian mean parameter (peak position)
cfl = round(c*dt/dx,2) # cfl condition 2 decimal places
# Fun little CFL condition check and print report
if cfl >= 1:
print('Hold your horses! The CFL is %s, which is over 1' %(cfl))
else:
print('CFL = %s' %(cfl))
# Array Initialization
u = np.ones(nx) # initializing solution array
un = np.ones(nx) # initializing temporary solution array
u = (1/(2*np.sqrt(np.pi*(g))))*np.exp(-(1-theta)**2/(4*g)) # initial condition (IC) as a gaussian
ui = u.copy()
plt.plot(x,u); # plots IC
# BDS/Upwind with inner for-loop with example on process timing
start = time.process_time()
for n in range(nt):
un = u.copy()
for i in range(1,nx-1):
u[i] = un[i] - c*dt/(dx)*(un[i]-un[i-1])
# periodic BC's
u[0] = u[nx-2]
u[nx-1] = u[1]
end = time.process_time()
print(end-start)
# # BDS/Upwind with vectorization
# for n in range(nt):
# un = u.copy()
# u[1:-1] = un[1:-1] - c*dt/(dx)*(un[1:-1]-un[:-2])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
# # CDS with inner for-loop
#for n in range(nt):
# un = u.copy()
# for i in range(1,nx-1):
# u[i] = un[i] - c*dt/(2*dx)*(un[i+1]-un[i-1])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
# # CDS with vectorization
#for n in range(nt):
# un = u.copy()
# u[1:-1] = un[1:-1] - c*dt/(2*dx)*(un[2:]-un[:-2])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
plt.plot(x,u);
| 33.2
| 98
| 0.460843
|
# =============================================================================
#
# Explicit Finite Difference Method Code to Solve the 1D Linear Transport Equation
# Adapted by: Cameron Armstrong (2019)
# Source: Lorena Barba, 12 Steps to NS in Python
# Institution: Virginia Commonwealth University
#
# =============================================================================
# Required Modules
import numpy as np
from matplotlib import pyplot as plt
import time
xl = 2 # x length
nx = 600 # number of grid points
x = np.linspace(0,xl,nx) # x grid
dx = xl/(nx-1) # x stepsize
nt = 350 # number of timesteps
dt = 0.0025 # time stepsize
c = 1 # wave speed
g = .01 # gaussian variance parameter (peak width)
theta = x/(0.5*xl) # gaussian mean parameter (peak position)
cfl = round(c*dt/dx,2) # cfl condition 2 decimal places
# Fun little CFL condition check and print report
if cfl >= 1:
print('Hold your horses! The CFL is %s, which is over 1' %(cfl))
else:
print('CFL = %s' %(cfl))
# Array Initialization
u = np.ones(nx) # initializing solution array
un = np.ones(nx) # initializing temporary solution array
u = (1/(2*np.sqrt(np.pi*(g))))*np.exp(-(1-theta)**2/(4*g)) # initial condition (IC) as a gaussian
ui = u.copy()
plt.plot(x,u); # plots IC
# BDS/Upwind with inner for-loop with example on process timing
start = time.process_time()
for n in range(nt):
un = u.copy()
for i in range(1,nx-1):
u[i] = un[i] - c*dt/(dx)*(un[i]-un[i-1])
# periodic BC's
u[0] = u[nx-2]
u[nx-1] = u[1]
end = time.process_time()
print(end-start)
# # BDS/Upwind with vectorization
# for n in range(nt):
# un = u.copy()
# u[1:-1] = un[1:-1] - c*dt/(dx)*(un[1:-1]-un[:-2])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
# # CDS with inner for-loop
#for n in range(nt):
# un = u.copy()
# for i in range(1,nx-1):
# u[i] = un[i] - c*dt/(2*dx)*(un[i+1]-un[i-1])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
# # CDS with vectorization
#for n in range(nt):
# un = u.copy()
# u[1:-1] = un[1:-1] - c*dt/(2*dx)*(un[2:]-un[:-2])
# # periodic BC's
# u[0] = u[nx-2]
# u[nx-1] = u[1]
plt.plot(x,u);
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
452c277e6f370c2445473182b1051da62995480e
| 5,454
|
py
|
Python
|
monty/exts/info/docs/_html.py
|
onerandomusername/monty-python
|
fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7
|
[
"MIT"
] | 20
|
2021-12-31T10:17:20.000Z
|
2022-03-31T04:16:17.000Z
|
monty/exts/info/docs/_html.py
|
onerandomusername/monty-bot
|
b1c769e44b56bc45f37fc809064571d59c80db27
|
[
"MIT"
] | 1
|
2022-03-13T22:34:33.000Z
|
2022-03-13T22:34:52.000Z
|
monty/exts/info/docs/_html.py
|
onerandomusername/monty-bot
|
b1c769e44b56bc45f37fc809064571d59c80db27
|
[
"MIT"
] | 3
|
2022-01-02T15:21:46.000Z
|
2022-03-05T09:37:54.000Z
|
import re
from functools import partial
from typing import Callable, Container, Iterable, List, Union
from bs4 import BeautifulSoup
from bs4.element import NavigableString, PageElement, Tag
from monty.log import get_logger
from . import MAX_SIGNATURE_AMOUNT
log = get_logger(__name__)
_UNWANTED_SIGNATURE_SYMBOLS_RE = re.compile(r"\[source]|\\\\|")
_SEARCH_END_TAG_ATTRS = (
"data",
"function",
"class",
"exception",
"seealso",
"section",
"rubric",
"sphinxsidebar",
)
def _find_elements_until_tag(
start_element: PageElement,
end_tag_filter: Union[Container[str], Callable[[Tag], bool]],
*,
func: Callable,
include_strings: bool = False,
limit: int = None,
) -> List[Union[Tag, NavigableString]]:
"""
Get all elements up to `limit` or until a tag matching `end_tag_filter` is found.
`end_tag_filter` can be either a container of string names to check against,
or a filtering callable that's applied to tags.
When `include_strings` is True, `NavigableString`s from the document will be included in the result along `Tag`s.
`func` takes in a BeautifulSoup unbound method for finding multiple elements, such as `BeautifulSoup.find_all`.
The method is then iterated over and all elements until the matching tag or the limit are added to the return list.
"""
use_container_filter = not callable(end_tag_filter)
elements = []
for element in func(start_element, name=Strainer(include_strings=include_strings), limit=limit):
if isinstance(element, Tag):
if use_container_filter:
if element.name in end_tag_filter:
break
elif end_tag_filter(element):
break
elements.append(element)
return elements
_find_next_children_until_tag = partial(_find_elements_until_tag, func=partial(BeautifulSoup.find_all, recursive=False))
_find_recursive_children_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_all)
_find_next_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_next_siblings)
_find_previous_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_previous_siblings)
def _class_filter_factory(class_names: Iterable[str]) -> Callable[[Tag], bool]:
"""Create callable that returns True when the passed in tag's class is in `class_names` or when it's a table."""
return match_tag
def get_general_description(start_element: PageElement) -> List[Union[Tag, NavigableString]]:
"""
Get page content to a table or a tag with its class in `SEARCH_END_TAG_ATTRS`.
A headerlink tag is attempted to be found to skip repeating the symbol information in the description.
If it's found it's used as the tag to start the search from instead of the `start_element`.
"""
child_tags = _find_recursive_children_until_tag(start_element, _class_filter_factory(["section"]), limit=100)
header = next(filter(_class_filter_factory(["headerlink"]), child_tags), None)
start_tag = header.parent if header is not None else start_element
return _find_next_siblings_until_tag(start_tag, _class_filter_factory(_SEARCH_END_TAG_ATTRS), include_strings=True)
def get_dd_description(symbol: PageElement) -> List[Union[Tag, NavigableString]]:
"""Get the contents of the next dd tag, up to a dt or a dl tag."""
description_tag = symbol.find_next("dd")
return _find_next_children_until_tag(description_tag, ("dt", "dl"), include_strings=True)
def get_signatures(start_signature: PageElement) -> List[str]:
"""
Collect up to `_MAX_SIGNATURE_AMOUNT` signatures from dt tags around the `start_signature` dt tag.
First the signatures under the `start_signature` are included;
if less than 2 are found, tags above the start signature are added to the result if any are present.
"""
signatures = []
for element in (
*reversed(_find_previous_siblings_until_tag(start_signature, ("dd",), limit=2)),
start_signature,
*_find_next_siblings_until_tag(start_signature, ("dd",), limit=2),
)[-MAX_SIGNATURE_AMOUNT:]:
signature = _UNWANTED_SIGNATURE_SYMBOLS_RE.sub("", element.text)
if signature:
signatures.append(signature)
return signatures
| 38.957143
| 120
| 0.710671
|
import re
from functools import partial
from typing import Callable, Container, Iterable, List, Union
from bs4 import BeautifulSoup
from bs4.element import NavigableString, PageElement, SoupStrainer, Tag
from monty.log import get_logger
from . import MAX_SIGNATURE_AMOUNT
log = get_logger(__name__)
_UNWANTED_SIGNATURE_SYMBOLS_RE = re.compile(r"\[source]|\\\\|¶")
_SEARCH_END_TAG_ATTRS = (
"data",
"function",
"class",
"exception",
"seealso",
"section",
"rubric",
"sphinxsidebar",
)
class Strainer(SoupStrainer):
"""Subclass of SoupStrainer to allow matching of both `Tag`s and `NavigableString`s."""
def __init__(self, *, include_strings: bool, **kwargs):
self.include_strings = include_strings
passed_text = kwargs.pop("text", None)
if passed_text is not None:
log.warning("`text` is not a supported kwarg in the custom strainer.")
super().__init__(**kwargs)
Markup = Union[PageElement, List["Markup"]]
def search(self, markup: Markup) -> Union[PageElement, str]:
"""Extend default SoupStrainer behaviour to allow matching both `Tag`s` and `NavigableString`s."""
if isinstance(markup, str):
# Let everything through the text filter if we're including strings and tags.
if not self.name and not self.attrs and self.include_strings:
return markup
else:
return super().search(markup)
def _find_elements_until_tag(
start_element: PageElement,
end_tag_filter: Union[Container[str], Callable[[Tag], bool]],
*,
func: Callable,
include_strings: bool = False,
limit: int = None,
) -> List[Union[Tag, NavigableString]]:
"""
Get all elements up to `limit` or until a tag matching `end_tag_filter` is found.
`end_tag_filter` can be either a container of string names to check against,
or a filtering callable that's applied to tags.
When `include_strings` is True, `NavigableString`s from the document will be included in the result along `Tag`s.
`func` takes in a BeautifulSoup unbound method for finding multiple elements, such as `BeautifulSoup.find_all`.
The method is then iterated over and all elements until the matching tag or the limit are added to the return list.
"""
use_container_filter = not callable(end_tag_filter)
elements = []
for element in func(start_element, name=Strainer(include_strings=include_strings), limit=limit):
if isinstance(element, Tag):
if use_container_filter:
if element.name in end_tag_filter:
break
elif end_tag_filter(element):
break
elements.append(element)
return elements
_find_next_children_until_tag = partial(_find_elements_until_tag, func=partial(BeautifulSoup.find_all, recursive=False))
_find_recursive_children_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_all)
_find_next_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_next_siblings)
_find_previous_siblings_until_tag = partial(_find_elements_until_tag, func=BeautifulSoup.find_previous_siblings)
def _class_filter_factory(class_names: Iterable[str]) -> Callable[[Tag], bool]:
"""Create callable that returns True when the passed in tag's class is in `class_names` or when it's a table."""
def match_tag(tag: Tag) -> bool:
for attr in class_names:
if attr in tag.get("class", ()):
return True
return tag.name == "table"
return match_tag
def get_general_description(start_element: PageElement) -> List[Union[Tag, NavigableString]]:
"""
Get page content to a table or a tag with its class in `SEARCH_END_TAG_ATTRS`.
A headerlink tag is attempted to be found to skip repeating the symbol information in the description.
If it's found it's used as the tag to start the search from instead of the `start_element`.
"""
child_tags = _find_recursive_children_until_tag(start_element, _class_filter_factory(["section"]), limit=100)
header = next(filter(_class_filter_factory(["headerlink"]), child_tags), None)
start_tag = header.parent if header is not None else start_element
return _find_next_siblings_until_tag(start_tag, _class_filter_factory(_SEARCH_END_TAG_ATTRS), include_strings=True)
def get_dd_description(symbol: PageElement) -> List[Union[Tag, NavigableString]]:
"""Get the contents of the next dd tag, up to a dt or a dl tag."""
description_tag = symbol.find_next("dd")
return _find_next_children_until_tag(description_tag, ("dt", "dl"), include_strings=True)
def get_signatures(start_signature: PageElement) -> List[str]:
"""
Collect up to `_MAX_SIGNATURE_AMOUNT` signatures from dt tags around the `start_signature` dt tag.
First the signatures under the `start_signature` are included;
if less than 2 are found, tags above the start signature are added to the result if any are present.
"""
signatures = []
for element in (
*reversed(_find_previous_siblings_until_tag(start_signature, ("dd",), limit=2)),
start_signature,
*_find_next_siblings_until_tag(start_signature, ("dd",), limit=2),
)[-MAX_SIGNATURE_AMOUNT:]:
signature = _UNWANTED_SIGNATURE_SYMBOLS_RE.sub("", element.text)
if signature:
signatures.append(signature)
return signatures
| 2
| 0
| 0
| 917
| 0
| 152
| 0
| 14
| 50
|
23d3fbbdeeaccf228df906ea84306f0500785058
| 6,513
|
py
|
Python
|
preflibtools/instances/sampling.py
|
nmattei/PrefLib-Tools
|
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
|
[
"BSD-3-Clause-Clear"
] | 17
|
2015-06-01T15:00:09.000Z
|
2019-09-18T18:05:38.000Z
|
preflibtools/instances/sampling.py
|
nmattei/PrefLib-Tools
|
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
|
[
"BSD-3-Clause-Clear"
] | 6
|
2016-06-06T07:40:41.000Z
|
2018-01-04T22:09:21.000Z
|
preflibtools/instances/sampling.py
|
nmattei/PrefLib-Tools
|
d1a1137efdc6a5722bbb0e15a0c1174a0236aefb
|
[
"BSD-3-Clause-Clear"
] | 7
|
2015-06-02T04:58:13.000Z
|
2019-12-13T13:26:58.000Z
|
""" This module describes procedures to sample preferences for different probability distributions.
"""
import numpy as np
def generateICStrictProfile(nbVoters, alternatives):
""" Generates a profile following the impartial culture.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
return urnModel(nbVoters, 0, alternatives)
def generateICAnonymousStrictProfile(nbVoters, alternatives):
""" Generates a profile following the anonymous impartial culture.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
return urnModel(nbVoters, 1, alternatives)
def mallowsModel(nbVoters, nbAlternatives, mixture, dispersions, references):
""" Generates a profile following a mixture of Mallow's models.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param nbAlternatives: Number of alternatives for the sampled orders.
:type nbAlternatives: int
:param mixture: A list of the weights of each element of the mixture.
:type replace: list of int
:param dispersions: A list of the dispersion coefficient of each element of the mixture.
:type dispersions: list of float
:param references: A list of the reference orders for each element of the mixture.
:type references: list of tuples of tuples of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
if len(mixture) != len(dispersions) or len(mixture) != len(references):
raise ValueError("Parameters of Mallows' mixture do not have the same length.")
# We normalize the mixture so that it sums up to 1
if sum(mixture) != 1:
mixture = [m / sum(mixture) for m in mixture]
#Precompute the distros for each Phi.
insertDistributions = []
for i in range(len(mixture)):
insertDistributions.append(mallowsInsertDistributions(nbAlternatives, dispersions[i]))
#Now, generate votes...
votemap = {}
for cvoter in range(nbVoters):
cmodel = np.random.choice(range(len(mixture)), 1, p = mixture)[0]
#Generate a vote for the selected model
insertVector = [0] * nbAlternatives
for i in range(1, len(insertVector) + 1):
#options are 1...max
insertVector[i - 1] = np.random.choice(range(1, i + 1), 1, p = insertDistributions[cmodel][i])[0]
vote = []
for i in range(len(references[cmodel])):
vote.insert(insertVector[i] - 1, references[cmodel][i])
tvote = tuple((alt,) for alt in vote)
votemap[tvote] = votemap.get(tvote, 0) + 1
return votemap
def mallowsMixture(nbVoters, nbReferences, alternatives):
""" Generates a profile following a mixture of Mallow's models for which reference points and dispersion
coefficients are independently and identically distributed.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param nbAlternatives: Number of alternatives for the sampled orders.
:type nbAlternatives: int
:param nbReferences: Number of element
:type nbReferences: int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
mixture = []
dispersions = []
references = []
for i in range(nbReferences):
references.append(tuple(generateICStrictProfile(1, alternatives))[0])
dispersions.append(round(np.random.rand(), 5))
mixture.append(np.random.randint(1, 101))
sumMixture = sum(mixture)
mixture = [float(i) / float(sumMixture) for i in mixture]
return mallowsModel(nbVoters, len(alternatives), mixture, dispersions, references)
def urnModel(nbVoters, replace, alternatives):
""" Generates a profile following the urn model.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param replace: The number of replacements for the urn model.
:type replace: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
voteMap = {}
ReplaceVotes = {}
ICsize = np.math.factorial(len(alternatives))
ReplaceSize = 0
for x in range(nbVoters):
flip = np.random.randint(1, ICsize + ReplaceSize + 1)
if flip <= ICsize:
#generate an IC vote and make a suitable number of replacements...
tvote = generateICVote(alternatives)
voteMap[tvote] = (voteMap.get(tvote, 0) + 1)
ReplaceVotes[tvote] = (ReplaceVotes.get(tvote, 0) + replace)
ReplaceSize += replace
#print("made " + str(tvote))
else:
#iterate over replacement hash and select proper vote.
flip = flip - ICsize
for vote in ReplaceVotes.keys():
flip = flip - ReplaceVotes[vote]
if flip <= 0:
vote = tuple((alt,) for alt in vote)
voteMap[vote] = (voteMap.get(vote, 0) + 1)
ReplaceVotes[vote] = (ReplaceVotes.get(vote, 0) + replace)
ReplaceSize += replace
break
else:
print("We Have a problem... replace fell through....")
exit()
return voteMap
def generateICVote(alternatives):
""" Generates a strict order over the set of alternatives following the impartial culture.
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A strict order over the alternatives, i.e., a tuple of tuples of size 1.
:rtype: tuple
"""
options = list(alternatives)
vote = []
while(len(options) > 0):
#randomly select an option
vote.append(options.pop(np.random.randint(0, len(options))))
return tuple((alt,) for alt in vote)
| 35.396739
| 106
| 0.716874
|
""" This module describes procedures to sample preferences for different probability distributions.
"""
import numpy as np
def generateICStrictProfile(nbVoters, alternatives):
""" Generates a profile following the impartial culture.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
return urnModel(nbVoters, 0, alternatives)
def generateICAnonymousStrictProfile(nbVoters, alternatives):
""" Generates a profile following the anonymous impartial culture.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
return urnModel(nbVoters, 1, alternatives)
def mallowsModel(nbVoters, nbAlternatives, mixture, dispersions, references):
""" Generates a profile following a mixture of Mallow's models.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param nbAlternatives: Number of alternatives for the sampled orders.
:type nbAlternatives: int
:param mixture: A list of the weights of each element of the mixture.
:type replace: list of int
:param dispersions: A list of the dispersion coefficient of each element of the mixture.
:type dispersions: list of float
:param references: A list of the reference orders for each element of the mixture.
:type references: list of tuples of tuples of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
def mallowsInsertDistributions(nbAlternatives, phi):
distributions = {}
for i in range(1, nbAlternatives + 1):
#Start with an empty distro of length i
distribution = [0] * i
#compute the denom = phi^0 + phi^1 + ... phi^(i-1)
denominator = sum([pow(phi, k) for k in range(i)])
#Fill each element of the distro with phi^(i-j) / denominator
for j in range(1, i+1):
distribution[j-1] = pow(phi, i - j) / denominator
distributions[i] = distribution
return distributions
if len(mixture) != len(dispersions) or len(mixture) != len(references):
raise ValueError("Parameters of Mallows' mixture do not have the same length.")
# We normalize the mixture so that it sums up to 1
if sum(mixture) != 1:
mixture = [m / sum(mixture) for m in mixture]
#Precompute the distros for each Phi.
insertDistributions = []
for i in range(len(mixture)):
insertDistributions.append(mallowsInsertDistributions(nbAlternatives, dispersions[i]))
#Now, generate votes...
votemap = {}
for cvoter in range(nbVoters):
cmodel = np.random.choice(range(len(mixture)), 1, p = mixture)[0]
#Generate a vote for the selected model
insertVector = [0] * nbAlternatives
for i in range(1, len(insertVector) + 1):
#options are 1...max
insertVector[i - 1] = np.random.choice(range(1, i + 1), 1, p = insertDistributions[cmodel][i])[0]
vote = []
for i in range(len(references[cmodel])):
vote.insert(insertVector[i] - 1, references[cmodel][i])
tvote = tuple((alt,) for alt in vote)
votemap[tvote] = votemap.get(tvote, 0) + 1
return votemap
def mallowsMixture(nbVoters, nbReferences, alternatives):
""" Generates a profile following a mixture of Mallow's models for which reference points and dispersion
coefficients are independently and identically distributed.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param nbAlternatives: Number of alternatives for the sampled orders.
:type nbAlternatives: int
:param nbReferences: Number of element
:type nbReferences: int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
mixture = []
dispersions = []
references = []
for i in range(nbReferences):
references.append(tuple(generateICStrictProfile(1, alternatives))[0])
dispersions.append(round(np.random.rand(), 5))
mixture.append(np.random.randint(1, 101))
sumMixture = sum(mixture)
mixture = [float(i) / float(sumMixture) for i in mixture]
return mallowsModel(nbVoters, len(alternatives), mixture, dispersions, references)
def urnModel(nbVoters, replace, alternatives):
""" Generates a profile following the urn model.
:param nbVoters: Number of orders to sample.
:type nbVoters: int
:param replace: The number of replacements for the urn model.
:type replace: int
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A vote map, i.e., a dictionary whose keys are orders, mapping to the number of voters with
the given order as their preferences.
:rtype: dict
"""
voteMap = {}
ReplaceVotes = {}
ICsize = np.math.factorial(len(alternatives))
ReplaceSize = 0
for x in range(nbVoters):
flip = np.random.randint(1, ICsize + ReplaceSize + 1)
if flip <= ICsize:
#generate an IC vote and make a suitable number of replacements...
tvote = generateICVote(alternatives)
voteMap[tvote] = (voteMap.get(tvote, 0) + 1)
ReplaceVotes[tvote] = (ReplaceVotes.get(tvote, 0) + replace)
ReplaceSize += replace
#print("made " + str(tvote))
else:
#iterate over replacement hash and select proper vote.
flip = flip - ICsize
for vote in ReplaceVotes.keys():
flip = flip - ReplaceVotes[vote]
if flip <= 0:
vote = tuple((alt,) for alt in vote)
voteMap[vote] = (voteMap.get(vote, 0) + 1)
ReplaceVotes[vote] = (ReplaceVotes.get(vote, 0) + replace)
ReplaceSize += replace
break
else:
print("We Have a problem... replace fell through....")
exit()
return voteMap
def generateICVote(alternatives):
""" Generates a strict order over the set of alternatives following the impartial culture.
:param alternatives: List of alternatives.
:type alternatives: list of int
:return: A strict order over the alternatives, i.e., a tuple of tuples of size 1.
:rtype: tuple
"""
options = list(alternatives)
vote = []
while(len(options) > 0):
#randomly select an option
vote.append(options.pop(np.random.randint(0, len(options))))
return tuple((alt,) for alt in vote)
| 0
| 0
| 0
| 0
| 0
| 474
| 0
| 0
| 24
|
1a8e4c575c1b0238e1aaf8f4d51c772141dd949b
| 1,100
|
py
|
Python
|
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
|
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7976332b75b8c81b2dd06682c3be1acd390dbd8c
|
[
"MIT"
] | null | null | null |
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
|
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7976332b75b8c81b2dd06682c3be1acd390dbd8c
|
[
"MIT"
] | null | null | null |
module1-introduction-to-sql/assignment/buddymove_holidayiq.py
|
jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases
|
7976332b75b8c81b2dd06682c3be1acd390dbd8c
|
[
"MIT"
] | null | null | null |
import sqlite3
import pandas as pd
!wget https://raw.githubusercontent.com/jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
cur = conn.cursor()
df = pd.read_csv('/content/buddymove_holidayiq.csv', index_col= 'User Id')
df.to_sql(name = 'review', con = conn)
query_rows = """
SELECT COUNT(*)
FROM review
"""
cur.execute(query_rows)
total_people = cur.fetchall()
print(f'There are a total of {total_people[0][0]} rows')
query_nature_shopping = """
SELECT COUNT(*)
FROM review
WHERE Nature >= 100 AND Shopping >= 100
"""
cur.execute(query_nature_shopping)
nature_shop = cur.fetchall()
print(f'There are {nature_shop[0][0]} people who reviewed nature and shopping at least 100 times')
columns = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic']
for ii in range(len(columns)):
query = """
SELECT AVG(%s)
FROM review
"""
cur.execute(query %columns[ii])
avg = cur.fetchall()
print(f'Average number of reviews for {columns[ii]} is {avg[0][0]}')
| 25.581395
| 154
| 0.719091
|
import sqlite3
import pandas as pd
!wget https://raw.githubusercontent.com/jonathanmendoza-tx/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module1-introduction-to-sql/buddymove_holidayiq.csv
conn = sqlite3.connect('buddymove_holidayiq.sqlite3')
cur = conn.cursor()
df = pd.read_csv('/content/buddymove_holidayiq.csv', index_col= 'User Id')
df.to_sql(name = 'review', con = conn)
query_rows = """
SELECT COUNT(*)
FROM review
"""
cur.execute(query_rows)
total_people = cur.fetchall()
print(f'There are a total of {total_people[0][0]} rows')
query_nature_shopping = """
SELECT COUNT(*)
FROM review
WHERE Nature >= 100 AND Shopping >= 100
"""
cur.execute(query_nature_shopping)
nature_shop = cur.fetchall()
print(f'There are {nature_shop[0][0]} people who reviewed nature and shopping at least 100 times')
columns = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic']
for ii in range(len(columns)):
query = """
SELECT AVG(%s)
FROM review
"""
cur.execute(query %columns[ii])
avg = cur.fetchall()
print(f'Average number of reviews for {columns[ii]} is {avg[0][0]}')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
95e7082386e5418aa14fda8e81fb4e55fd79a141
| 54
|
py
|
Python
|
quart_rapidoc/version.py
|
marirs/quart-rapidoc
|
fd86604ee5ffea7bd33b08af537472f0df21e8c8
|
[
"MIT"
] | 1
|
2020-07-06T17:11:02.000Z
|
2020-07-06T17:11:02.000Z
|
quart_rapidoc/version.py
|
marirs/quart-rapidoc
|
fd86604ee5ffea7bd33b08af537472f0df21e8c8
|
[
"MIT"
] | null | null | null |
quart_rapidoc/version.py
|
marirs/quart-rapidoc
|
fd86604ee5ffea7bd33b08af537472f0df21e8c8
|
[
"MIT"
] | null | null | null |
"""quart_redoc version file."""
__version__ = "0.5.1"
| 18
| 31
| 0.666667
|
"""quart_redoc version file."""
__version__ = "0.5.1"
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e2e6c98c7ef19d0a3e295150199aeb3a5229e053
| 48,085
|
py
|
Python
|
cli/cyberPanel.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
cli/cyberPanel.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
cli/cyberPanel.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
#!/usr/local/CyberCP/bin/python
import os, sys
sys.path.append('/usr/local/CyberCP')
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings")
django.setup()
import plogical.backupUtilities as backupUtilities
# All that we see or seem is but a dream within a dream.
if __name__ == "__main__":
main()
| 36.181339
| 214
| 0.58434
|
#!/usr/local/CyberCP/bin/python
import os,sys
sys.path.append('/usr/local/CyberCP')
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings")
django.setup()
from inspect import stack
from cli.cliLogger import cliLogger as logger
import json
from plogical.virtualHostUtilities import virtualHostUtilities
import re
from websiteFunctions.models import Websites, ChildDomains
from plogical.dnsUtilities import DNS
import time
import plogical.backupUtilities as backupUtilities
import requests
from loginSystem.models import Administrator
from packages.models import Package
from plogical.mysqlUtilities import mysqlUtilities
from cli.cliParser import cliParser
from plogical.vhost import vhost
from plogical.mailUtilities import mailUtilities
from plogical.ftpUtilities import FTPUtilities
from plogical.sslUtilities import sslUtilities
from plogical.processUtilities import ProcessUtilities
from plogical.backupSchedule import backupSchedule
# All that we see or seem is but a dream within a dream.
class cyberPanel:
def printStatus(self, operationStatus, errorMessage):
data = json.dumps({'success': operationStatus,
'errorMessage': errorMessage
})
print(data)
## Website Functions
def createWebsite(self, package, owner, domainName, email, php, ssl, dkim, openBasedir):
try:
externalApp = "".join(re.findall("[a-zA-Z]+", domainName))[:7]
phpSelection = 'PHP ' + php
result = virtualHostUtilities.createVirtualHost(domainName, email, phpSelection, externalApp, ssl, dkim,
openBasedir, owner, package, 0)
if result[0] == 1:
self.printStatus(1,'None')
else:
self.printStatus(0, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def createDomain(self, masterDomain, domainName, owner, php, ssl, dkim, openBasedir):
try:
path = '/home/' + masterDomain + '/public_html/' + domainName
phpSelection = 'PHP ' + php
result = virtualHostUtilities.createDomain(masterDomain, domainName, phpSelection, path, ssl, dkim, openBasedir, owner, 0)
if result[0] == 1:
self.printStatus(1,'None')
else:
self.printStatus(0, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteWebsite(self, domainName):
try:
vhost.deleteVirtualHostConfigurations(domainName)
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def deleteChild(self, childDomain):
try:
result = virtualHostUtilities.deleteDomain(childDomain)
if result[0] == 1:
self.printStatus(1,'None')
else:
self.printStatus(0, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listWebsitesJson(self):
try:
websites = Websites.objects.all()
ipFile = "/etc/cyberpanel/machineIP"
f = open(ipFile)
ipData = f.read()
ipAddress = ipData.split('\n', 1)[0]
json_data = "["
checker = 0
for items in websites:
if items.state == 0:
state = "Suspended"
else:
state = "Active"
dic = {'domain': items.domain, 'adminEmail': items.adminEmail,'ipAddress':ipAddress,'admin': items.admin.userName,'package': items.package.packageName,'state':state}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data +',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listWebsitesPretty(self):
try:
from prettytable import PrettyTable
websites = Websites.objects.all()
ipFile = "/etc/cyberpanel/machineIP"
f = open(ipFile)
ipData = f.read()
ipAddress = ipData.split('\n', 1)[0]
table = PrettyTable(['ID','Domain', 'IP Address', 'Package', 'Owner', 'State', 'Email'])
for items in websites:
if items.state == 0:
state = "Suspended"
else:
state = "Active"
table.add_row([items.id, items.domain, ipAddress, items.package.packageName, items.admin.userName, state, items.adminEmail])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def changePHP(self, virtualHostName, phpVersion):
try:
phpVersion = 'PHP ' + phpVersion
confPath = virtualHostUtilities.Server_root + "/conf/vhosts/" + virtualHostName
completePathToConfigFile = confPath + "/vhost.conf"
result = vhost.changePHP(completePathToConfigFile, phpVersion)
if result[0] == 1:
self.printStatus(1,'None')
else:
self.printStatus(0, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def changePackage(self, virtualHostName, packageName):
try:
if Websites.objects.filter(domain=virtualHostName).count() == 0:
self.printStatus(0, 'This website does not exists.')
if Package.objects.filter(packageName=packageName).count() == 0:
self.printStatus(0, 'This package does not exists.')
website = Websites.objects.get(domain=virtualHostName)
package = Package.objects.get(packageName=packageName)
website.package = package
website.save()
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
## DNS Functions
def listDNSJson(self, virtualHostName):
try:
records = DNS.getDNSRecords(virtualHostName)
json_data = "["
checker = 0
for items in records:
dic = {'id': items.id,
'type': items.type,
'name': items.name,
'content': items.content,
'priority': items.prio,
'ttl': items.ttl
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listDNSPretty(self, virtualHostName):
try:
from prettytable import PrettyTable
records = DNS.getDNSRecords(virtualHostName)
table = PrettyTable(['ID', 'TYPE', 'Name', 'Value', 'Priority', 'TTL'])
for items in records:
if len(items.content) >= 30:
content = items.content[0:30] + " ..."
else:
content = items.content
table.add_row([items.id, items.type, items.name, content, items.prio, items.ttl])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listDNSZonesJson(self):
try:
records = DNS.getDNSZones()
json_data = "["
checker = 0
for items in records:
dic = {'id': items.id,
'name': items.name,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listDNSZonesPretty(self):
try:
from prettytable import PrettyTable
records = records = DNS.getDNSZones()
table = PrettyTable(['ID', 'Name'])
for items in records:
table.add_row([items.id, items.name])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def createDNSZone(self, virtualHostName, owner):
try:
admin = Administrator.objects.get(userName=owner)
DNS.dnsTemplate(virtualHostName, admin)
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def createDNSRecord(self, virtualHostName, name, recordType, value, priority, ttl):
try:
zone = DNS.getZoneObject(virtualHostName)
DNS.createDNSRecord(zone, name, recordType, value, int(priority), int(ttl))
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteDNSZone(self, virtualHostName):
try:
DNS.deleteDNSZone(virtualHostName)
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteDNSRecord(self, recordID):
try:
DNS.deleteDNSRecord(recordID)
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
## Backup Functions
def createBackup(self, virtualHostName):
try:
backupLogPath = "/usr/local/lscp/logs/backup_log."+time.strftime("%I-%M-%S-%a-%b-%Y")
print('Backup logs to be generated in %s' % (backupLogPath))
backupSchedule.createLocalBackup(virtualHostName, backupLogPath)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def restoreBackup(self, fileName):
try:
if os.path.exists('/home/backup/' + fileName):
dir = "CyberPanelRestore"
else:
dir = 'CLI'
backupUtilities.submitRestore(fileName, dir)
while (1):
time.sleep(1)
finalData = json.dumps({'backupFile': fileName, "dir": dir})
r = requests.post("http://localhost:5003/backup/restoreStatus", data=finalData,
verify=False)
data = json.loads(r.text)
if data['abort'] == 1 and data['running'] == "Error":
print('Failed to restore backup, Error message : ' + data['status'] + '\n')
break
elif data['abort'] == 1 and data['running'] == "Completed":
print('\n\n')
print('Backup restore completed.\n')
break
else:
print('Waiting for restore to complete. Current status: ' + data['status'])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
## Packages
def createPackage(self, owner, packageName, diskSpace, bandwidth, emailAccounts, dataBases, ftpAccounts, allowedDomains):
try:
admin = Administrator.objects.get(userName=owner)
newPack = Package(admin=admin, packageName=packageName, diskSpace=diskSpace, bandwidth=bandwidth,
emailAccounts=emailAccounts, dataBases=dataBases, ftpAccounts=ftpAccounts,
allowedDomains=allowedDomains)
newPack.save()
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deletePackage(self, packageName):
try:
delPack = Package.objects.get(packageName=packageName)
delPack.delete()
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def listPackagesJson(self):
try:
records = Package.objects.all()
json_data = "["
checker = 0
for items in records:
dic = {'id': items.id,
'packageName': items.packageName,
'domains': items.allowedDomains,
'diskSpace': items.diskSpace,
'bandwidth': items.bandwidth,
'ftpAccounts ': items.ftpAccounts,
'dataBases': items.dataBases,
'emailAccounts':items.emailAccounts
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listPackagesPretty(self):
try:
from prettytable import PrettyTable
records = Package.objects.all()
table = PrettyTable(['Name', 'Domains', 'Disk Space', 'Bandwidth', 'FTP Accounts', 'Databases', 'Email Accounts'])
for items in records:
table.add_row([items.packageName, items.allowedDomains, items.diskSpace, items.bandwidth, items.ftpAccounts, items.dataBases, items.emailAccounts])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
## Database functions
def createDatabase(self, dbName, dbUsername, dbPassword, databaseWebsite):
try:
result = mysqlUtilities.submitDBCreation(dbName, dbUsername, dbPassword, databaseWebsite)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteDatabase(self, dbName):
try:
result = mysqlUtilities.submitDBDeletion(dbName)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def listDatabasesJson(self, virtualHostName):
try:
records = mysqlUtilities.getDatabases(virtualHostName)
json_data = "["
checker = 0
for items in records:
dic = {'id': items.id,
'dbName': items.dbName,
'dbUser': items.dbUser,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listDatabasesPretty(self, virtualHostName):
try:
from prettytable import PrettyTable
records = mysqlUtilities.getDatabases(virtualHostName)
table = PrettyTable(['ID', 'Database Name', 'Database User'])
for items in records:
table.add_row([items.id, items.dbName, items.dbUser])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
## Email functions
def createEmail(self, domain, userName, password):
try:
result = mailUtilities.createEmailAccount(domain, userName, password)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteEmail(self, email):
try:
result = mailUtilities.deleteEmailAccount(email)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def changeEmailPassword(self, email, password):
try:
result = mailUtilities.changeEmailPassword(email, password)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def listEmailsJson(self, virtualHostName):
try:
records = mailUtilities.getEmailAccounts(virtualHostName)
json_data = "["
checker = 0
for items in records:
dic = {
'email': items.email,
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listEmailsPretty(self, virtualHostName):
try:
from prettytable import PrettyTable
records = mailUtilities.getEmailAccounts(virtualHostName)
table = PrettyTable(['Email'])
for items in records:
table.add_row([items.email])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
## FTP Functions
## FTP Functions
# FTP Functions
def createFTPAccount(self, domain, userName, password, owner):
try:
result = FTPUtilities.submitFTPCreation(domain, userName, password, 'None', owner)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def deleteFTPAccount(self, userName):
try:
result = FTPUtilities.submitFTPDeletion(userName)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def changeFTPPassword(self, userName, password):
try:
result = FTPUtilities.changeFTPPassword(userName, password)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def listFTPJson(self, virtualHostName):
try:
records = FTPUtilities.getFTPRecords(virtualHostName)
json_data = "["
checker = 0
for items in records:
dic = {'id': items.id,
'username': items.user,
'path': items.dir
}
if checker == 0:
json_data = json_data + json.dumps(dic)
checker = 1
else:
json_data = json_data + ',' + json.dumps(dic)
json_data = json_data + ']'
final_json = json.dumps(json_data)
print(final_json)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
def listFTPPretty(self, virtualHostName):
try:
from prettytable import PrettyTable
records = FTPUtilities.getFTPRecords(virtualHostName)
table = PrettyTable(['ID', 'User', 'Path'])
for items in records:
table.add_row([items.id, items.user, items.dir])
print(table)
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
print(0)
## FTP Functions
## SSL Functions
def issueSSL(self, virtualHost):
try:
path = ''
adminEmail = ''
try:
website = ChildDomains.objects.get(domain=virtualHost)
adminEmail = website.master.adminEmail
path = website.path
except:
website = Websites.objects.get(domain=virtualHost)
adminEmail = website.adminEmail
path = "/home/" + virtualHost + "/public_html"
result = virtualHostUtilities.issueSSL(virtualHost, path, adminEmail)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def issueSSLForHostName(self, virtualHost):
try:
path = ''
adminEmail = ''
try:
website = ChildDomains.objects.get(domain=virtualHost)
adminEmail = website.master.adminEmail
path = website.path
except:
website = Websites.objects.get(domain=virtualHost)
adminEmail = website.adminEmail
path = "/home/" + virtualHost + "/public_html"
result = virtualHostUtilities.issueSSLForHostName(virtualHost, path)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def issueSSLForMailServer(self, virtualHost):
try:
path = ''
adminEmail = ''
try:
website = ChildDomains.objects.get(domain=virtualHost)
adminEmail = website.master.adminEmail
path = website.path
except:
website = Websites.objects.get(domain=virtualHost)
adminEmail = website.adminEmail
path = "/home/" + virtualHost + "/public_html"
result = virtualHostUtilities.issueSSLForMailServer(virtualHost, path)
if result[0] == 1:
self.printStatus(1, 'None')
else:
self.printStatus(1, result[1])
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def issueSelfSignedSSL(self, virtualHost):
try:
try:
website = ChildDomains.objects.get(domain=virtualHost)
adminEmail = website.master.adminEmail
except:
website = Websites.objects.get(domain=virtualHost)
adminEmail = website.adminEmail
pathToStoreSSL = "/etc/letsencrypt/live/" + virtualHost
command = 'mkdir -p ' + pathToStoreSSL
ProcessUtilities.executioner(command)
pathToStoreSSLPrivKey = "/etc/letsencrypt/live/" + virtualHost + "/privkey.pem"
pathToStoreSSLFullChain = "/etc/letsencrypt/live/" + virtualHost + "/fullchain.pem"
command = 'openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -keyout ' + pathToStoreSSLPrivKey + ' -out ' + pathToStoreSSLFullChain
ProcessUtilities.executioner(command)
sslUtilities.installSSLForDomain(virtualHost, adminEmail)
ProcessUtilities.restartLitespeed()
self.printStatus(1, 'None')
except BaseException as msg:
logger.writeforCLI(str(msg), "Error", stack()[0][3])
self.printStatus(0, str(msg))
def main():
parser = cliParser()
args = parser.prepareArguments()
cyberpanel = cyberPanel()
## Website functions
if args.function == "createWebsite":
completeCommandExample = 'cyberpanel createWebsite --package Detault --owner admin --domainName cyberpanel.net --email [email protected] --php 5.6'
if not args.package:
print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.owner:
print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.domainName:
print("\n\nPlease enter the domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.email:
print("\n\nPlease enter the email. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.php:
print("\n\nPlease enter the PHP version such as 5.6 for PHP version 5.6. For example:\n\n" + completeCommandExample + "\n\n")
return
if args.ssl:
ssl = int(args.ssl)
else:
ssl = 0
if args.dkim:
dkim = int(args.dkim)
else:
dkim = 0
if args.openBasedir:
openBasedir = int(args.openBasedir)
else:
openBasedir = 0
cyberpanel.createWebsite(args.package, args.owner, args.domainName, args.email, args.php, ssl, dkim, openBasedir)
elif args.function == "deleteWebsite":
completeCommandExample = 'cyberpanel deleteWebsite --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain to delete. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteWebsite(args.domainName)
elif args.function == "createChild":
completeCommandExample = 'cyberpanel createChild --masterDomain cyberpanel.net --childDomain child.cyberpanel.net' \
' --owner admin --php 5.6'
if not args.masterDomain:
print("\n\nPlease enter Master domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.childDomain:
print("\n\nPlease enter the Child Domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.owner:
print("\n\nPlease enter owner for this domain DNS records. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.php:
print("\n\nPlease enter required PHP version. For example:\n\n" + completeCommandExample + "\n\n")
return
if args.ssl:
ssl = int(args.ssl)
else:
ssl = 0
if args.dkim:
dkim = int(args.dkim)
else:
dkim = 0
if args.openBasedir:
openBasedir = int(args.openBasedir)
else:
openBasedir = 0
cyberpanel.createDomain(args.masterDomain, args.childDomain, args.owner, args.php, ssl, dkim, openBasedir)
elif args.function == "deleteChild":
completeCommandExample = 'cyberpanel deleteChild --childDomain cyberpanel.net'
if not args.childDomain:
print("\n\nPlease enter the child domain to delete. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteChild(args.childDomain)
elif args.function == "listWebsitesJson":
cyberpanel.listWebsitesJson()
elif args.function == "listWebsitesPretty":
cyberpanel.listWebsitesPretty()
elif args.function == "changePHP":
completeCommandExample = 'cyberpanel changePHP --domainName cyberpanel.net --php 5.6'
if not args.domainName:
print("\n\nPlease enter Domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.php:
print("\n\nPlease enter required PHP version. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.changePHP(args.domainName, args.php)
elif args.function == "changePackage":
completeCommandExample = 'cyberpanel changePackage --domainName cyberpanel.net --packageName CLI'
if not args.domainName:
print("\n\nPlease enter the Domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.packageName:
print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.changePackage(args.domainName, args.packageName)
## DNS Functions
elif args.function == "listDNSJson":
completeCommandExample = 'cyberpanel listDNSJson --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listDNSJson(args.domainName)
elif args.function == "listDNSPretty":
completeCommandExample = 'cyberpanel listDNSPretty --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listDNSPretty(args.domainName)
elif args.function == "listDNSZonesJson":
cyberpanel.listDNSZonesJson()
elif args.function == "listDNSZonesPretty":
cyberpanel.listDNSZonesPretty()
elif args.function == "createDNSZone":
completeCommandExample = 'cyberpanel createDNSZone --owner admin --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.owner:
print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createDNSZone(args.domainName, args.owner)
elif args.function == "deleteDNSZone":
completeCommandExample = 'cyberpanel deleteDNSZone --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteDNSZone(args.domainName)
elif args.function == "createDNSRecord":
completeCommandExample = 'cyberpanel createDNSRecord --domainName cyberpanel.net --name cyberpanel.net' \
' --recordType A --value 192.168.100.1 --priority 0 --ttl 3600'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.name:
print("\n\nPlease enter the record name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.recordType:
print("\n\nPlease enter the record type. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.value:
print("\n\nPlease enter the record value. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.priority:
print("\n\nPlease enter the priority. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.ttl:
print("\n\nPlease enter the ttl. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createDNSRecord(args.domainName, args.name, args.recordType, args.value, args.priority, args.ttl)
elif args.function == "deleteDNSRecord":
completeCommandExample = 'cyberpanel deleteDNSRecord --recordID 200'
if not args.recordID:
print("\n\nPlease enter the record ID to be deleted, you can find record ID by listing the current DNS records. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteDNSRecord(args.recordID)
## Backup Functions.
elif args.function == "createBackup":
completeCommandExample = 'cyberpanel createBackup --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter the domain. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createBackup(args.domainName)
elif args.function == "restoreBackup":
completeCommandExample = 'cyberpanel restoreBackup --fileName /home/talkshosting.com/backup/backup-talksho-01-30-53-Fri-Jun-2018.tar.gz'
if not args.fileName:
print("\n\nPlease enter the file name or complete path to file. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.restoreBackup(args.fileName)
## Package functions.
elif args.function == "createPackage":
completeCommandExample = 'cyberpanel createPackage --owner admin --packageName CLI --diskSpace 1000 --bandwidth 10000 --emailAccounts 100' \
' --dataBases 100 --ftpAccounts 100 --allowedDomains 100'
if not args.owner:
print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.packageName:
print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.diskSpace:
print("\n\nPlease enter value for Disk Space. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.bandwidth:
print("\n\nPlease enter value for Bandwidth. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.emailAccounts:
print("\n\nPlease enter value for Email accounts. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.dataBases:
print("\n\nPlease enter value for Databases. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.ftpAccounts:
print("\n\nPlease enter value for Ftp accounts. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.allowedDomains:
print("\n\nPlease enter value for Allowed Child Domains. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createPackage(args.owner, args.packageName, args.diskSpace, args.bandwidth, args.emailAccounts,
args.dataBases, args.ftpAccounts, args.allowedDomains)
elif args.function == "deletePackage":
completeCommandExample = 'cyberpanel deletePackage --packageName CLI'
if not args.packageName:
print("\n\nPlease enter the package name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deletePackage(args.packageName)
elif args.function == "listPackagesJson":
cyberpanel.listPackagesJson()
elif args.function == "listPackagesPretty":
cyberpanel.listPackagesPretty()
## Database functions.
elif args.function == "createDatabase":
completeCommandExample = 'cyberpanel createDatabase --databaseWebsite cyberpanel.net --dbName cyberpanel ' \
'--dbUsername cyberpanel --dbPassword cyberpanel'
if not args.databaseWebsite:
print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.dbName:
print("\n\nPlease enter the database name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.dbUsername:
print("\n\nPlease enter the database username. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.dbPassword:
print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createDatabase(args.dbName, args.dbUsername, args.dbPassword, args.databaseWebsite)
elif args.function == "deleteDatabase":
completeCommandExample = 'cyberpanel deleteDatabase --dbName cyberpanel'
if not args.dbName:
print("\n\nPlease enter the database name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteDatabase(args.dbName)
elif args.function == "listDatabasesJson":
completeCommandExample = 'cyberpanel listDatabasesJson --databaseWebsite cyberpanel.net'
if not args.databaseWebsite:
print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listDatabasesJson(args.databaseWebsite)
elif args.function == "listDatabasesPretty":
completeCommandExample = 'cyberpanel listDatabasesPretty --databaseWebsite cyberpanel.net'
if not args.databaseWebsite:
print("\n\nPlease enter database website. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listDatabasesPretty(args.databaseWebsite)
## Email Functions
elif args.function == "createEmail":
completeCommandExample = 'cyberpanel createEmail --domainName cyberpanel.net --userName cyberpanel ' \
'--password cyberpanel'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.userName:
print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.password:
print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createEmail(args.domainName, args.userName, args.password)
elif args.function == "deleteEmail":
completeCommandExample = 'cyberpanel deleteEmail --email [email protected]'
if not args.email:
print("\n\nPlease enter the email. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteEmail(args.email)
elif args.function == "changeEmailPassword":
completeCommandExample = 'cyberpanel changeEmailPassword --email [email protected] --password cyberpanel'
if not args.email:
print("\n\nPlease enter email. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.password:
print("\n\nPlease enter the password. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.changeEmailPassword(args.email, args.password)
elif args.function == "listEmailsJson":
completeCommandExample = 'cyberpanel listEmailsJson --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listEmailsJson(args.domainName)
elif args.function == "listEmailsPretty":
completeCommandExample = 'cyberpanel listEmailsPretty --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listEmailsPretty(args.domainName)
## FTP Functions
elif args.function == "createFTPAccount":
completeCommandExample = 'cyberpanel createFTPAccount --domainName cyberpanel.net --userName cyberpanel ' \
'--password cyberpanel --owner admin'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.userName:
print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.password:
print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.owner:
print("\n\nPlease enter the owner name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.createFTPAccount(args.domainName, args.userName, args.password, args.owner)
elif args.function == "deleteFTPAccount":
completeCommandExample = 'cyberpanel deleteFTPAccount --userName cyberpanel'
if not args.userName:
print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.deleteFTPAccount(args.userName)
elif args.function == "changeFTPPassword":
completeCommandExample = 'cyberpanel changeFTPPassword --userName cyberpanel --password cyberpanel'
if not args.userName:
print("\n\nPlease enter the user name. For example:\n\n" + completeCommandExample + "\n\n")
return
if not args.password:
print("\n\nPlease enter the password for database. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.changeFTPPassword(args.userName, args.password)
elif args.function == "listFTPJson":
completeCommandExample = 'cyberpanel listFTPJson --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listFTPJson(args.domainName)
elif args.function == "listFTPPretty":
completeCommandExample = 'cyberpanel listFTPPretty --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.listFTPPretty(args.domainName)
## SSL Functions
elif args.function == "issueSSL":
completeCommandExample = 'cyberpanel issueSSL --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.issueSSL(args.domainName)
elif args.function == "hostNameSSL":
completeCommandExample = 'cyberpanel hostNameSSL --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.issueSSLForHostName(args.domainName)
elif args.function == "mailServerSSL":
completeCommandExample = 'cyberpanel mailServerSSL --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.issueSSLForMailServer(args.domainName)
elif args.function == "issueSelfSignedSSL":
completeCommandExample = 'cyberpanel issueSelfSignedSSL --domainName cyberpanel.net'
if not args.domainName:
print("\n\nPlease enter Domain name. For example:\n\n" + completeCommandExample + "\n\n")
return
cyberpanel.issueSelfSignedSSL(args.domainName)
elif args.function == 'utility':
if not os.path.exists('/usr/bin/cyberpanel_utility'):
command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh'
ProcessUtilities.executioner(command)
command = 'chmod 700 /usr/bin/cyberpanel_utility'
ProcessUtilities.executioner(command)
command = '/usr/bin/cyberpanel_utility'
ProcessUtilities.executioner(command)
elif args.function == 'upgrade' or args.function == 'update':
if not os.path.exists('/usr/bin/cyberpanel_utility'):
command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh'
ProcessUtilities.executioner(command)
command = 'chmod 700 /usr/bin/cyberpanel_utility'
ProcessUtilities.executioner(command)
command = '/usr/bin/cyberpanel_utility --upgrade'
ProcessUtilities.executioner(command)
elif args.function == 'help':
if not os.path.exists('/usr/bin/cyberpanel_utility'):
command = 'wget -q -O /usr/bin/cyberpanel_utility https://cyberpanel.sh/misc/cyberpanel_utility.sh'
ProcessUtilities.executioner(command)
command = 'chmod 700 /usr/bin/cyberpanel_utility'
ProcessUtilities.executioner(command)
command = '/usr/bin/cyberpanel_utility --help'
ProcessUtilities.executioner(command)
elif args.function == 'version' or args.function == 'v' or args.function == 'V':
## Get CurrentVersion
with open('/usr/local/CyberCP/version.txt') as file:
file_contents = file.read()
version = re.search('\d.\d', file_contents)
version = version.group()
build = file_contents[-2:]
build = build[0:1]
currentversion = version + '.' + build
print (currentversion)
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 25,509
| 0
| 21,468
| 0
| 313
| 464
|
3fb46c294a5360bb8f1d5b3c59d0fde197c8961d
| 4,380
|
py
|
Python
|
setup.py
|
Tobi-Alonso/ResNet50-PYNQ
|
7c203c2b249479c5384afe152dde2bb06576339b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Tobi-Alonso/ResNet50-PYNQ
|
7c203c2b249479c5384afe152dde2bb06576339b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Tobi-Alonso/ResNet50-PYNQ
|
7c203c2b249479c5384afe152dde2bb06576339b
|
[
"BSD-3-Clause"
] | 1
|
2020-03-27T18:20:47.000Z
|
2020-03-27T18:20:47.000Z
|
# Copyright (c) 2019, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
import os
__author__ = "Lucian Petrica"
__copyright__ = "Copyright 2019, Xilinx"
# global variables
module_name = "resnet50_pynq"
data_files = []
with open("README.md", encoding="utf-8") as fh:
readme_lines = fh.readlines()
readme_lines = readme_lines[
readme_lines.index("## PYNQ quick start\n") + 2:
readme_lines.index("## Author\n"):
]
long_description = ("".join(readme_lines))
extend_package(os.path.join(module_name, "notebooks"))
setup(name=module_name,
version="1.0",
description="Quantized dataflow implementation of ResNet50 on Alveo",
long_description=long_description,
long_description_content_type="text/markdown",
author="Lucian Petrica",
url="https://github.com/Xilinx/ResNet50-PYNQ",
packages=find_packages(),
download_url="https://github.com/Xilinx/ResNet50-PYNQ",
package_data={
"": data_files,
},
python_requires=">=3.5.2",
# keeping 'setup_requires' only for readability - relying on
# pyproject.toml and PEP 517/518
setup_requires=[
"pynq>=2.5.1"
],
install_requires=[
"pynq>=2.5.1",
"jupyter",
"jupyterlab",
"plotly",
"opencv-python",
"wget"
],
extras_require={
':python_version<"3.6"': [
'matplotlib<3.1',
'ipython==7.9'
],
':python_version>="3.6"': [
'matplotlib'
]
},
entry_points={
"pynq.notebooks": [
"ResNet50 = {}.notebooks".format(module_name)
]
},
cmdclass={"build_py": build_py},
license="BSD 3-Clause"
)
| 35.04
| 81
| 0.656164
|
# Copyright (c) 2019, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
from distutils.dir_util import copy_tree
import os
from pynq.utils import build_py as _build_py
__author__ = "Lucian Petrica"
__copyright__ = "Copyright 2019, Xilinx"
# global variables
module_name = "resnet50_pynq"
data_files = []
def extend_package(path):
if os.path.isdir(path):
data_files.extend(
[os.path.join("..", root, f)
for root, _, files in os.walk(path) for f in files]
)
elif os.path.isfile(path):
data_files.append(os.path.join("..", path))
class build_py(_build_py):
"""Overload the pynq.utils 'build_py' command (that performs overlay
download) to also call the function 'copy_notebooks'.
"""
def copy_notebooks(self):
cmd = self.get_finalized_command("build_py")
for package, src_dir, build_dir, _ in cmd.data_files:
if "." not in package: # sub-packages are skipped
src_folder = os.path.join(os.path.dirname(src_dir), "host")
dst_folder = os.path.join(build_dir, "notebooks")
if os.path.isdir(src_folder):
copy_tree(src_folder, dst_folder)
def run(self):
super().run()
self.copy_notebooks()
with open("README.md", encoding="utf-8") as fh:
readme_lines = fh.readlines()
readme_lines = readme_lines[
readme_lines.index("## PYNQ quick start\n") + 2:
readme_lines.index("## Author\n"):
]
long_description = ("".join(readme_lines))
extend_package(os.path.join(module_name, "notebooks"))
setup(name=module_name,
version="1.0",
description="Quantized dataflow implementation of ResNet50 on Alveo",
long_description=long_description,
long_description_content_type="text/markdown",
author="Lucian Petrica",
url="https://github.com/Xilinx/ResNet50-PYNQ",
packages=find_packages(),
download_url="https://github.com/Xilinx/ResNet50-PYNQ",
package_data={
"": data_files,
},
python_requires=">=3.5.2",
# keeping 'setup_requires' only for readability - relying on
# pyproject.toml and PEP 517/518
setup_requires=[
"pynq>=2.5.1"
],
install_requires=[
"pynq>=2.5.1",
"jupyter",
"jupyterlab",
"plotly",
"opencv-python",
"wget"
],
extras_require={
':python_version<"3.6"': [
'matplotlib<3.1',
'ipython==7.9'
],
':python_version>="3.6"': [
'matplotlib'
]
},
entry_points={
"pynq.notebooks": [
"ResNet50 = {}.notebooks".format(module_name)
]
},
cmdclass={"build_py": build_py},
license="BSD 3-Clause"
)
| 0
| 0
| 0
| 667
| 0
| 258
| 0
| 42
| 90
|
c04249abf3a5ebc265209326af85d9f62c50c23b
| 3,052
|
py
|
Python
|
testing/python_lib/test_faucet_state_collector.py
|
pbatta/forch
|
df033bc5b7cbac06e1c406257193cb0cb62f2742
|
[
"Apache-2.0"
] | 1
|
2019-12-12T23:13:24.000Z
|
2019-12-12T23:13:24.000Z
|
testing/python_lib/test_faucet_state_collector.py
|
pbatta/forch
|
df033bc5b7cbac06e1c406257193cb0cb62f2742
|
[
"Apache-2.0"
] | 92
|
2019-12-13T03:30:35.000Z
|
2021-11-11T16:16:13.000Z
|
testing/python_lib/test_faucet_state_collector.py
|
pbatta/forch
|
df033bc5b7cbac06e1c406257193cb0cb62f2742
|
[
"Apache-2.0"
] | 7
|
2020-01-11T14:12:46.000Z
|
2021-01-25T17:30:55.000Z
|
"""Unit tests for Faucet State Collector"""
import unittest
if __name__ == '__main__':
unittest.main()
| 37.219512
| 100
| 0.582241
|
"""Unit tests for Faucet State Collector"""
import unittest
from unit_base import FaucetStateCollectorTestBase
from forch.proto.faucet_event_pb2 import StackTopoChange
from forch.utils import dict_proto
class DataplaneStateTestCase(FaucetStateCollectorTestBase):
"""Test cases for dataplane state"""
def _build_link(self, dp1, port1, dp2, port2):
return {
'key': dp1 + ':' + port1 + '-' + dp2 + ':' + port2,
'source': dp1,
'target': dp2,
'port_map': {
'dp_a': dp1,
'port_a': 'Port ' + port1,
'dp_z': dp2,
'port_z': 'Port ' + port2
}
}
def _build_loop_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(root_hop_port=1),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '2'),
self._build_link('sw2', '1', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'dps': dps,
'links_graph': links_graph
}
def _build_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '1'),
self._build_link('sw2', '2', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'active_root': 'sw1',
'dps': dps,
'links_graph': links_graph
}
def test_topology_loop(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_loop_topo_obj()
egress_path = self._faucet_state_collector.get_switch_egress_path('sw1')
self.assertEqual(egress_path['path_state'], 1)
self.assertEqual(egress_path['path_state_detail'],
'No path to root found. Loop in topology.')
def test_egress_path(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_topo_obj()
# pylint: disable=protected-access
self._faucet_state_collector._get_egress_port = lambda port: 28
egress_path = self._faucet_state_collector.get_switch_egress_path('sw3')
self.assertEqual(egress_path['path_state'], 5)
self.assertEqual(egress_path['path'],
[{'switch': 'sw3', 'out': 1}, {'switch': 'sw1', 'in': 2, 'out': 28}])
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 2,774
| 0
| 0
| 0
| 77
| 90
|
3c98d96e351e9f0cf0c5d2fb68fa0eae5f624451
| 2,344
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
|
pdehaan/gaia
|
0ea959d81cefa0128157ec3ff0e2b7bdd29afacf
|
[
"Apache-2.0"
] | 1
|
2015-03-02T04:03:00.000Z
|
2015-03-02T04:03:00.000Z
|
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
|
caseyyee/gaia
|
fa82433dda06e9ae7d35a1f74cc16f4dd72cc514
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py
|
caseyyee/gaia
|
fa82433dda06e9ae7d35a1f74cc16f4dd72cc514
|
[
"Apache-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
| 37.206349
| 103
| 0.729522
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest.apps.base import Base
class Persona(Base):
# iframes
_persona_frame_locator = (By.CSS_SELECTOR, "iframe.screen[data-url*='persona.org/sign_in#NATIVE']")
# persona login
_body_loading_locator = (By.CSS_SELECTOR, 'body.loading')
_email_input_locator = (By.ID, 'authentication_email')
_password_input_locator = (By.ID, 'authentication_password')
_continue_button_locator = (By.CSS_SELECTOR, '.continue.right')
_returning_button_locator = (By.CSS_SELECTOR, 'button.isReturning')
def __init__(self, marionette):
Base.__init__(self, marionette)
def login(self, email, password):
# This only supports logging in with a known user and no existing session
self.type_email(email)
self.tap_continue()
self.type_password(password)
self.tap_returning()
self.marionette.switch_to_frame()
self.wait_for_element_not_present(*self._persona_frame_locator)
self.apps.switch_to_displayed_app()
def wait_for_persona_to_load(self):
self.wait_for_element_not_displayed(*self._body_loading_locator)
def switch_to_persona_frame(self):
self.marionette.switch_to_frame()
self.frame = self.wait_for_element_present(*self._persona_frame_locator)
self.marionette.switch_to_frame(self.frame)
self.wait_for_persona_to_load()
def type_email(self, value):
self.marionette.find_element(*self._email_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def type_password(self, value):
self.marionette.find_element(*self._password_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def tap_continue(self):
self.marionette.find_element(*self._continue_button_locator).tap()
self.wait_for_element_not_displayed(*self._continue_button_locator)
self.wait_for_element_displayed(*self._password_input_locator)
def tap_returning(self):
self.marionette.find_element(*self._returning_button_locator).tap()
| 0
| 0
| 0
| 2,055
| 0
| 0
| 0
| 21
| 68
|
46007c370fa322eb1ca7e4346385565ed9dfbbd8
| 4,826
|
py
|
Python
|
ethevents/client/connection.py
|
ezdac/ethevents
|
9f4b0ff1ba0d303180abe3b5336805335bc0765b
|
[
"MIT"
] | 2
|
2018-08-21T01:06:30.000Z
|
2019-03-05T08:15:55.000Z
|
ethevents/client/connection.py
|
ezdac/ethevents
|
9f4b0ff1ba0d303180abe3b5336805335bc0765b
|
[
"MIT"
] | 1
|
2018-04-23T14:01:51.000Z
|
2018-04-23T14:09:51.000Z
|
ethevents/client/connection.py
|
ezdac/ethevents
|
9f4b0ff1ba0d303180abe3b5336805335bc0765b
|
[
"MIT"
] | 1
|
2022-03-22T04:57:16.000Z
|
2022-03-22T04:57:16.000Z
|
import logging
log = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 28.05814
| 90
| 0.564857
|
import time
import click
import requests
from elasticsearch.connection import Connection
from elasticsearch.connection_pool import DummyConnectionPool
from elasticsearch.transport import Transport
from elasticsearch.exceptions import (
ConnectionError,
ConnectionTimeout,
SSLError
)
from elasticsearch.compat import urlencode
from requests import Session
from ethevents.client.app import App
import logging
log = logging.getLogger(__name__)
class MicroRaidenConnection(Connection):
def __init__(
self,
host,
port,
session: Session,
use_ssl=False,
headers=None,
**kwargs
):
super(MicroRaidenConnection, self).__init__(
host=host,
port=port,
use_ssl=use_ssl,
**kwargs
)
self.base_url = 'http%s://%s:%d%s' % (
's' if self.use_ssl else '',
host, port, self.url_prefix
)
self.session = session
self.session.headers = headers or {}
self.session.headers.setdefault('content-type', 'application/json')
def perform_request(
self,
method,
url,
params=None,
body=None,
timeout=None,
ignore=(),
headers=None
):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
request = requests.Request(method=method, headers=headers, url=url, data=body)
prepared_request = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(
prepared_request.url,
{},
None,
None,
None
)
send_kwargs = {'timeout': timeout or self.timeout}
send_kwargs.update(settings)
try:
response = self.session.request(
prepared_request.method,
prepared_request.url,
data=prepared_request.body,
headers=prepared_request.headers,
**send_kwargs
)
duration = time.time() - start
raw_data = response.text
except Exception as e:
self.log_request_fail(
method,
url,
prepared_request.path_url,
body,
time.time() - start,
exception=e
)
if isinstance(e, requests.exceptions.SSLError):
raise SSLError('N/A', str(e), e)
if isinstance(e, requests.Timeout):
raise ConnectionTimeout('TIMEOUT', str(e), e)
raise ConnectionError('N/A', str(e), e)
# raise errors based on http status codes, let the client handle those if needed
if not (200 <= response.status_code < 300) and response.status_code not in ignore:
self.log_request_fail(
method,
url,
response.request.path_url,
body,
duration,
response.status_code,
raw_data
)
self._raise_error(response.status_code, raw_data)
self.log_request_success(
method,
url,
response.request.path_url,
body,
response.status_code,
raw_data,
duration
)
return response.status_code, response.headers, raw_data
class MicroRaidenTransport(Transport):
def __init__(
self,
hosts,
*args,
session: Session,
connection_class=MicroRaidenConnection,
connection_pool_class=DummyConnectionPool,
**kwargs
):
self.hosts = hosts
log.debug('initializing transport')
super(MicroRaidenTransport, self).__init__(
hosts,
*args,
connection_class=connection_class,
connection_pool_class=connection_pool_class,
session=session,
**kwargs
)
@click.option(
'--limits/--no-limits',
default=True
)
@click.command()
def main(limits: bool):
logging.basicConfig(level=logging.DEBUG)
log.debug('in main')
app = App()
app.start(ignore_security_limits=not limits, endpoint_url='https://api.eth.events')
log.debug('session started')
if app.account.unlocked:
import elasticsearch
es = elasticsearch.Elasticsearch(
transport_class=MicroRaidenTransport,
hosts=['api.eth.events:443'],
use_ssl=True,
session=app.session
)
response = es.search('ethereum', 'block', body=dict(query=dict(match_all=dict())))
print(response)
if __name__ == '__main__':
main()
| 0
| 664
| 0
| 3,594
| 0
| 0
| 0
| 184
| 290
|
2abdd1a8347e55f710b0bd9bf098d6715d1155a9
| 561
|
py
|
Python
|
Number Guessing.py
|
GamePlayer-7/Gaming
|
4466f2e693f0c10d3bc041b388526484713dc2e1
|
[
"MIT"
] | null | null | null |
Number Guessing.py
|
GamePlayer-7/Gaming
|
4466f2e693f0c10d3bc041b388526484713dc2e1
|
[
"MIT"
] | null | null | null |
Number Guessing.py
|
GamePlayer-7/Gaming
|
4466f2e693f0c10d3bc041b388526484713dc2e1
|
[
"MIT"
] | null | null | null |
import random # imports the random module, which contains a variety of things to do with random number generation.
number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number.
for i in range(0,3):
user = int(input("guess the number"))
if user == number:
print("Hurray!!")
print(f"you guessed the number right it's {number}")
break
if user != number:
print(f"Your guess is incorrect the number is {number}")
| 51
| 161
| 0.672014
|
import random # imports the random module, which contains a variety of things to do with random number generation.
number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number.
for i in range(0,3):
user = int(input("guess the number"))
if user == number:
print("Hurray!!")
print(f"you guessed the number right it's {number}")
break
if user != number:
print(f"Your guess is incorrect the number is {number}")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a8f717c691e08e576daf5d6b539ccd45bbb8b08f
| 2,114
|
py
|
Python
|
src/blockdiag/imagedraw/utils/__init__.py
|
Dridi/blockdiag
|
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
|
[
"Apache-2.0"
] | null | null | null |
src/blockdiag/imagedraw/utils/__init__.py
|
Dridi/blockdiag
|
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
|
[
"Apache-2.0"
] | null | null | null |
src/blockdiag/imagedraw/utils/__init__.py
|
Dridi/blockdiag
|
bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unicodedata
from blockdiag.utils import Size
from blockdiag.utils.compat import u
def is_zenkaku(char):
"""Detect given character is Japanese ZENKAKU character"""
char_width = unicodedata.east_asian_width(char)
return char_width in u("WFA")
def zenkaku_len(string):
"""Count Japanese ZENKAKU characters from string"""
return len([x for x in string if is_zenkaku(x)])
def hankaku_len(string):
"""Count non Japanese ZENKAKU characters from string"""
return len([x for x in string if not is_zenkaku(x)])
def string_width(string):
"""Measure rendering width of string.
Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point
"""
widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2}
return sum(widthmap[unicodedata.east_asian_width(c)] for c in string)
def textsize(string, font):
"""Measure rendering size (width and height) of line.
Returned size will not be exactly as rendered text size,
Because this method does not use fonts to measure size.
"""
width = (zenkaku_len(string) * font.size +
hankaku_len(string) * font.size * 0.55)
return Size(int(math.ceil(width)), font.size)
| 30.2
| 78
| 0.682119
|
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unicodedata
from functools import wraps
from blockdiag.utils import Size
from blockdiag.utils.compat import u
def is_zenkaku(char):
"""Detect given character is Japanese ZENKAKU character"""
char_width = unicodedata.east_asian_width(char)
return char_width in u("WFA")
def zenkaku_len(string):
"""Count Japanese ZENKAKU characters from string"""
return len([x for x in string if is_zenkaku(x)])
def hankaku_len(string):
"""Count non Japanese ZENKAKU characters from string"""
return len([x for x in string if not is_zenkaku(x)])
def string_width(string):
"""Measure rendering width of string.
Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point
"""
widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2}
return sum(widthmap[unicodedata.east_asian_width(c)] for c in string)
def textsize(string, font):
"""Measure rendering size (width and height) of line.
Returned size will not be exactly as rendered text size,
Because this method does not use fonts to measure size.
"""
width = (zenkaku_len(string) * font.size +
hankaku_len(string) * font.size * 0.55)
return Size(int(math.ceil(width)), font.size)
def memoize(fn):
fn.cache = {}
@wraps(fn)
def func(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in fn.cache:
fn.cache[key] = fn(*args, **kwargs)
return fn.cache[key]
return func
| 0
| 168
| 0
| 0
| 0
| 57
| 0
| 6
| 45
|
5a4d29d31fc8b9261b5b5f65d7bb0b5cb3b90e4d
| 4,639
|
py
|
Python
|
xt/framework/comm/comm_conf.py
|
ZZHsunsky/xingtian
|
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
|
[
"MIT"
] | null | null | null |
xt/framework/comm/comm_conf.py
|
ZZHsunsky/xingtian
|
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
|
[
"MIT"
] | null | null | null |
xt/framework/comm/comm_conf.py
|
ZZHsunsky/xingtian
|
0484e2c968d9e6b2e5f43a3b86c0213a095ba309
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
MAX_ACTOR_NUM = 40
MAX_LEARNER_NUM = 10
START_PORT = 20000
PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1
# redisredis,
def get_port(start_port):
''' get port used by module '''
predict_port = start_port + 1
if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER:
raise Exception("port num is not enough")
return start_port, predict_port
def test():
''' test interface'''
test_comm_conf = CommConf()
redis_key = 'port_pool'
print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key)))
for _ in range(test_comm_conf.redis.llen(redis_key)):
pop_val = test_comm_conf.redis.lpop(redis_key)
print("pop val: {} from '{}'".format(pop_val, redis_key))
start = time.time()
test_comm_conf.init_portpool()
print("use time", time.time() - start)
train_port = get_port(20000)
print(train_port)
if __name__ == "__main__":
test()
| 34.362963
| 79
| 0.64432
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import time
from subprocess import Popen
import redis
MAX_ACTOR_NUM = 40
MAX_LEARNER_NUM = 10
START_PORT = 20000
PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1
# 初始化,查看redis,连接redis, 生成端口池,即检测端口号哪些可用
class CommConf(object):
def __init__(self):
try:
redis.Redis(host="127.0.0.1", port=6379, db=0).ping()
except redis.ConnectionError:
Popen("echo save '' | setsid redis-server -", shell=True)
time.sleep(0.3)
self.redis = redis.Redis(host="127.0.0.1", port=6379, db=0)
self.pool_name = "port_pool"
if not self.redis.exists(self.pool_name):
self.init_portpool()
def init_portpool(self):
''' init port pool '''
start_port = START_PORT
try_num = 10
for _ in range(MAX_LEARNER_NUM):
for _ in range(try_num):
check_flag, next_port = self.check_learner_port(start_port)
if not check_flag:
break
else:
start_port = next_port
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
self.redis.incr('max_port_num', amount=1)
start_port = next_port
def get_start_port(self):
''' get start port '''
if int(self.redis.get('port_num')) == 0:
raise Exception("Dont have available port")
start_port = self.redis.lpop(self.pool_name)
self.redis.decr('port_num', amount=1)
return int(start_port)
def release_start_port(self, start_port):
''' release start port '''
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
if self.redis.get('port_num') == self.redis.get('max_port_num'):
self.redis.delete('port_num')
self.redis.delete('max_port_num')
self.redis.delete('port_pool')
print("shutdown redis")
self.redis.shutdown(nosave=True)
return
def check_learner_port(self, start_port):
''' check if multi-port is in use '''
ip = "localhost"
for i in range(PORTNUM_PERLEARNER):
if self.check_port(ip, start_port + i):
return True, start_port + i + 1
return False, start_port + PORTNUM_PERLEARNER
def check_port(self, ip, port):
''' check if port is in use '''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
print("port is used", int(port))
return True
except BaseException:
return False
def get_port(start_port):
''' get port used by module '''
predict_port = start_port + 1
if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER:
raise Exception("port num is not enough")
return start_port, predict_port
def test():
''' test interface'''
test_comm_conf = CommConf()
redis_key = 'port_pool'
print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key)))
for _ in range(test_comm_conf.redis.llen(redis_key)):
pop_val = test_comm_conf.redis.lpop(redis_key)
print("pop val: {} from '{}'".format(pop_val, redis_key))
start = time.time()
test_comm_conf.init_portpool()
print("use time", time.time() - start)
train_port = get_port(20000)
print(train_port)
if __name__ == "__main__":
test()
| 75
| 0
| 0
| 2,462
| 0
| 0
| 0
| -10
| 89
|
3e25d2bb70c9499de1f4cb505fe2880342dc5c50
| 2,572
|
py
|
Python
|
python/nbdb/anomaly/static.py
|
rubrikinc/nbdb2
|
359db63a39e016e3eb197b8ea511d6e8cffa1853
|
[
"Apache-2.0"
] | 2
|
2022-03-21T15:48:33.000Z
|
2022-03-27T00:43:12.000Z
|
python/nbdb/anomaly/static.py
|
rubrikinc/nbdb2
|
359db63a39e016e3eb197b8ea511d6e8cffa1853
|
[
"Apache-2.0"
] | null | null | null |
python/nbdb/anomaly/static.py
|
rubrikinc/nbdb2
|
359db63a39e016e3eb197b8ea511d6e8cffa1853
|
[
"Apache-2.0"
] | 1
|
2022-03-27T00:43:31.000Z
|
2022-03-27T00:43:31.000Z
|
"""
Static threshold based anomaly detection
"""
import logging
logger = logging.getLogger(__name__)
| 35.232877
| 73
| 0.613919
|
"""
Static threshold based anomaly detection
"""
from typing import List, Tuple
import logging
import numpy as np
import pandas as pd
from nbdb.anomaly.anomaly_interface import AnomalyInterface
from nbdb.readapi.graphite_response import Anomaly
from nbdb.readapi.time_series_response import TimeRange
logger = logging.getLogger(__name__)
class Static(AnomalyInterface): # pylint: disable=too-few-public-methods
"""
Simple algorithm to do threshold based anomaly detection.
Currently supports two functions (lt, gt).
"""
def find_anomalies(self,
baseline: np.ndarray,
raw_data: pd.Series) -> List[Tuple]:
"""
Use static threshold to determine anomalies in the
raw data. Supports the lt, gt functions to compare
against the threshold
:param baseline:
:param raw_data:
:return:
"""
comparator_fn = self.config.get('comparator_fn', 'gt')
threshold = self.config.get('threshold')
raw_data.dropna(inplace=True)
if comparator_fn == 'gt':
anomalous_points = raw_data[raw_data > threshold]
elif comparator_fn == 'lt':
anomalous_points = raw_data[raw_data < threshold]
else:
raise NotImplementedError('Unknown comparator fn: {}'.format(
comparator_fn))
anomalies = []
# No anomalous points found. Return early
if len(anomalous_points) == 0:
return anomalies
previous_epoch = anomalous_points.index[0]
anomaly_start = anomalous_points.index[0]
sampling_interval = np.diff(raw_data.index).min()
anomaly_score = 1.0
epoch = None
for epoch, _ in anomalous_points.iteritems():
if (epoch - previous_epoch) / sampling_interval > 1:
# Mark the current anomaly as ended and start a new one
anomaly_window = TimeRange(anomaly_start, previous_epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
anomaly_score = 1.0
anomaly_start = epoch
else:
previous_epoch = epoch
anomaly_score += 1
# append the final anomaly
if epoch is not None:
anomaly_window = TimeRange(anomaly_start, epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
return anomalies
| 0
| 0
| 0
| 2,207
| 0
| 0
| 0
| 105
| 157
|
9288bc7c0b122d032f93019718b7a23eb2c872b0
| 1,598
|
py
|
Python
|
tests/test_help.py
|
thomasvolk/R_ev3dev
|
53b8c83af49e88eb4766deea0a690c55d1304d6a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_help.py
|
thomasvolk/R_ev3dev
|
53b8c83af49e88eb4766deea0a690c55d1304d6a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_help.py
|
thomasvolk/R_ev3dev
|
53b8c83af49e88eb4766deea0a690c55d1304d6a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
| 21.594595
| 86
| 0.628911
|
#!/usr/bin/env python3
import unittest
from R_ev3dev.interpreter import Interpreter, Command
from R_ev3dev.help import Help, Version
class TestCommand01(Command):
""" this is the test command 01
usage:
c01
"""
def invoke(self, interpreter_context, args):
return 1
class TestCommand02(Command):
""" this is the test command 02
"""
def invoke(self, interpreter_context, args):
return 2
class TestCommand03(Command):
def invoke(self, interpreter_context, args):
return 3
class TestHelp(unittest.TestCase):
def setUp(self):
self.interpreter = Interpreter([
TestCommand01('c01'),
TestCommand02('c02'),
TestCommand03('c03'),
Help('help'),
Version('version')
])
def test_overview(self):
self.assertEqual("""---
R_ev3 protocol language version 0.0.1
author: Thomas Volk
license: Apache License Version 2.0
source: https://github.com/thomasvolk/R_ev3dev
possible commands:
c01 - this is the test command 01
c02 - this is the test command 02
c03 -
help - show help
version - show version
use help <command> for details
---""", self.interpreter.evaluate_internal("help").value)
def test_help(self):
self.assertEqual("""---
c01
this is the test command 01
usage:
c01
---""", self.interpreter.evaluate_internal("help c01").value)
def test_version(self):
self.assertEqual('0.0.1', self.interpreter.evaluate_internal("version").value)
| 0
| 0
| 0
| 1,370
| 0
| 0
| 0
| 44
| 158
|
0cb3d0dd6f38e1ffc07fd4e85e3458786f9cf6d8
| 420
|
py
|
Python
|
news/urls.py
|
vigen-b/FakeNews
|
fc19f623529d1661c9f3d475adc9db98ee95a38a
|
[
"Apache-2.0"
] | null | null | null |
news/urls.py
|
vigen-b/FakeNews
|
fc19f623529d1661c9f3d475adc9db98ee95a38a
|
[
"Apache-2.0"
] | null | null | null |
news/urls.py
|
vigen-b/FakeNews
|
fc19f623529d1661c9f3d475adc9db98ee95a38a
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from news import views
app_name = "news"
urlpatterns = [
path("news/", views.NewsList.as_view()),
path("news/<int:pk>/", views.NewsDetail.as_view()),
path("category/", views.CategoryList.as_view()),
path("category/<str:pk>/", views.CategoryDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 30
| 63
| 0.735714
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from news import views
app_name = "news"
urlpatterns = [
path("news/", views.NewsList.as_view()),
path("news/<int:pk>/", views.NewsDetail.as_view()),
path("category/", views.CategoryList.as_view()),
path("category/<str:pk>/", views.CategoryDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9967baa443818e97fb20549f70a4bd20685b2cd4
| 5,239
|
py
|
Python
|
bobstack/sipmessaging/sipMessage.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
bobstack/sipmessaging/sipMessage.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
bobstack/sipmessaging/sipMessage.py
|
bobjects/BobStack
|
c177b286075044832f44baf9ace201780c8b4320
|
[
"Apache-2.0"
] | null | null | null |
try:
from cStringIO import StringIO
except ImportError:
| 23.181416
| 76
| 0.640389
|
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from sipHeader import SIPHeader
from sipStartLineFactory import SIPStartLineFactory
class SIPMessage(object):
@classmethod
def new_parsed_from(cls, a_string):
answer = cls()
answer.raw_string = a_string
return answer
@classmethod
def _new_for_attributes(cls, start_line=None, header=None, content=""):
answer = cls()
answer.start_line = start_line
if header:
answer.header = header
else:
answer.header = SIPHeader.new_for_attributes(header_fields=None)
answer.content = content
return answer
def __init__(self):
self._content = None
self._startLine = None
self._header = None
self._rawString = None
@property
def deep_copy(self):
return self.__class__.new_parsed_from(self.raw_string)
@property
def raw_string(self):
if self._rawString is None:
self.render_raw_string_from_attributes()
return self._rawString
@raw_string.setter
def raw_string(self, a_string):
self._rawString = a_string
self.clear_attributes()
@property
def body(self):
return self.content
def clear_raw_string(self):
self._rawString = None
def clear_attributes(self):
self._content = None
self._startLine = None
self._header = None
def parse_attributes_from_raw_string(self):
self._content = ""
string_io = StringIO(self._rawString)
self._startLine = SIPStartLineFactory().next_for_stringio(string_io)
self._header = SIPHeader.new_parsed_from(string_io)
self._content = string_io.read()
string_io.close()
def render_raw_string_from_attributes(self):
stringio = StringIO()
stringio.write(self._startLine.raw_string)
stringio.write("\r\n")
self._header.render_raw_string_from_attributes(stringio)
stringio.write(self._content)
self._rawString = stringio.getvalue()
stringio.close()
@property
def start_line(self):
if self._startLine is None:
self.parse_attributes_from_raw_string()
return self._startLine
@start_line.setter
def start_line(self, a_sip_start_line):
self._startLine = a_sip_start_line
self.clear_raw_string()
@property
def header(self):
if self._header is None:
self.parse_attributes_from_raw_string()
return self._header
@header.setter
def header(self, a_sip_header):
self._header = a_sip_header
self.clear_raw_string()
@property
def content(self):
if self._content is None:
self.parse_attributes_from_raw_string()
return self._content
@content.setter
def content(self, a_string):
self._content = a_string
self.clear_raw_string()
@property
def vias(self):
return self.header.vias
@property
def via_header_fields(self):
return self.header.via_header_fields
@property
def route_uris(self):
return self.header.route_uris
@property
def record_route_uris(self):
return self.header.record_route_uris
@property
def transaction_hash(self):
return self.header.transaction_hash
@property
def dialog_hash(self):
return self.header.dialog_hash
# TODO: This is a hot method. Should we cache?
@property
def is_valid(self):
if self.is_malformed:
return False
if not self.header.is_valid:
return False
if self.header.content_length is not None:
if self.header.content_length != self.content.__len__():
return False
return True
@property
def is_invalid(self):
return not self.is_valid
@property
def is_unknown(self):
return not self.is_known
@property
def is_known(self):
return False
@property
def is_malformed(self):
return False
@property
def is_request(self):
return False
@property
def is_response(self):
return False
@property
def is_ack_request(self):
return False
@property
def is_bye_request(self):
return False
@property
def is_cancel_request(self):
return False
@property
def is_info_request(self):
return False
@property
def is_invite_request(self):
return False
@property
def is_message_request(self):
return False
@property
def is_notify_request(self):
return False
@property
def is_options_request(self):
return False
@property
def is_publish_request(self):
return False
@property
def is_prack_request(self):
return False
@property
def is_refer_request(self):
return False
@property
def is_register_request(self):
return False
@property
def is_subscribe_request(self):
return False
@property
def is_update_request(self):
return False
| 0
| 2,936
| 0
| 2,100
| 0
| 0
| 0
| 48
| 93
|
943c8ed1cd17178b2e7dd6ef67854da8a007f148
| 98
|
py
|
Python
|
codes_auto/1635.number-of-good-pairs.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
codes_auto/1635.number-of-good-pairs.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
codes_auto/1635.number-of-good-pairs.py
|
smartmark-pro/leetcode_record
|
6504b733d892a705571eb4eac836fb10e94e56db
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode.cn id=1635 lang=python3
#
# [1635] number-of-good-pairs
#
None
# @lc code=end
| 14
| 42
| 0.673469
|
#
# @lc app=leetcode.cn id=1635 lang=python3
#
# [1635] number-of-good-pairs
#
None
# @lc code=end
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d707e5f1c279e06637838e9d88dd40ec499c8ba
| 1,110
|
py
|
Python
|
python-the-hard-way/12-prompting-people.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
python-the-hard-way/12-prompting-people.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
python-the-hard-way/12-prompting-people.py
|
Valka7a/python-playground
|
f08d4374f2cec2e8b1afec3753854b1ec10ff480
|
[
"MIT"
] | null | null | null |
# Exercise 12: Prompting People
# Variables
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
# Print
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# Study Drills
# 1. In Terminal where you normally run python to run your scripts,
# type pydoc raw_input. Read what it says. If you're on Windows
# try python -m pydoc raw_input instead.
# 2. Get out of pydoc by typing q to quit.
# 3. Look onine for what the pydoc command does.
# 4. Use pydoc to also read about open, file, os and sys. It's
# alright if you do not understand thosel just read through
# and take notes about interesting things.
# Drill 1
# Help on built-in function raw_input in module __builtin__:
# raw_input(...)
# raw_input([prompt]) -> string
#
# Read a string from standard input. The trailing newline is stripped.
# If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
# On Unix, GNU readline is used if enabled. The prompt string, if given,
# is printed without a trailing newline before reading.
| 34.6875
| 76
| 0.715315
|
# Exercise 12: Prompting People
# Variables
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
# Print
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# Study Drills
# 1. In Terminal where you normally run python to run your scripts,
# type pydoc raw_input. Read what it says. If you're on Windows
# try python -m pydoc raw_input instead.
# 2. Get out of pydoc by typing q to quit.
# 3. Look onine for what the pydoc command does.
# 4. Use pydoc to also read about open, file, os and sys. It's
# alright if you do not understand thosel just read through
# and take notes about interesting things.
# Drill 1
# Help on built-in function raw_input in module __builtin__:
# raw_input(...)
# raw_input([prompt]) -> string
#
# Read a string from standard input. The trailing newline is stripped.
# If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
# On Unix, GNU readline is used if enabled. The prompt string, if given,
# is printed without a trailing newline before reading.
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
17884ad7e858a3c341d64db09625d9ca52b143f6
| 1,730
|
py
|
Python
|
alipay/aop/api/domain/InvestorMaterialInfo.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/InvestorMaterialInfo.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/InvestorMaterialInfo.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
| 24.366197
| 67
| 0.550289
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvestorMaterialInfo(object):
def __init__(self):
self._file_id = None
self._file_url = None
self._type = None
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_url(self):
return self._file_url
@file_url.setter
def file_url(self, value):
self._file_url = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_url:
if hasattr(self.file_url, 'to_alipay_dict'):
params['file_url'] = self.file_url.to_alipay_dict()
else:
params['file_url'] = self.file_url
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvestorMaterialInfo()
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_url' in d:
o.file_url = d['file_url']
if 'type' in d:
o.type = d['type']
return o
| 0
| 573
| 0
| 1,019
| 0
| 0
| 0
| 21
| 68
|
715eab9e05e4e3e6f81c12646f271a7236441291
| 12,764
|
py
|
Python
|
msticpy/nbtools/azure_ml_tools.py
|
ekmixon/msticpy
|
8676a648ba9bfb4d848a8dda964820d4942a32ca
|
[
"MIT"
] | null | null | null |
msticpy/nbtools/azure_ml_tools.py
|
ekmixon/msticpy
|
8676a648ba9bfb4d848a8dda964820d4942a32ca
|
[
"MIT"
] | 3
|
2021-05-15T02:16:39.000Z
|
2022-01-19T13:13:25.000Z
|
msticpy/nbtools/azure_ml_tools.py
|
ekmixon/msticpy
|
8676a648ba9bfb4d848a8dda964820d4942a32ca
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Checker functions for Azure ML notebooks."""
import json
import os
import socket
import sys
import urllib
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Union
from IPython import get_ipython
from IPython.display import HTML, display
from pkg_resources import parse_version
from .._version import VERSION
from ..common.pkg_config import refresh_config
__version__ = VERSION
AZ_GET_STARTED = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting"
"%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"
)
TROUBLE_SHOOTING = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/"
"TroubleShootingNotebooks.ipynb"
)
MISSING_PKG_ERR = """
<h4><font color='orange'>The package '<b>{package}</b>' is not
installed or has an unsupported version (installed version = '{inst_ver}')</font></h4>
Please install or upgrade before continuing: required version is {package}>={req_ver}
"""
MP_INSTALL_FAILED = """
<h4><font color='red'>The notebook may not run correctly without
the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4>
Please see the <a href="{nbk_uri}">
Getting Started Guide For Azure Sentinel ML Notebooks</a></b>
for more information<br><hr>
"""
RELOAD_MP = """
<h4><font color='orange'>Kernel restart needed</h4>
An error was detected trying to load the updated version of MSTICPy.<br>
Please restart the notebook kernel and re-run this cell - it should
run without error.
"""
MIN_PYTHON_VER_DEF = "3.6"
MSTICPY_REQ_VERSION = __version__
VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)"
MP_ENV_VAR = "MSTICPYCONFIG"
MP_FILE = "msticpyconfig.yaml"
NB_CHECK_URI = (
"https://raw.githubusercontent.com/Azure/Azure-Sentinel-"
"Notebooks/master/utils/nb_check.py"
)
def is_in_aml():
"""Return True if running in Azure Machine Learning."""
return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance"
def check_versions(
min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF,
min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION,
extras: Optional[List[str]] = None,
mp_release: Optional[str] = None,
**kwargs,
):
"""
Check the current versions of the Python kernel and MSTICPy.
Parameters
----------
min_py_ver : Union[Tuple[int, int], str]
Minimum Python version
min_mp_ver : Union[Tuple[int, int], str]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
mp_release : Optional[str], optional
Override the MSTICPy release version. This
can also be specified in the environment variable 'MP_TEST_VER'
Raises
------
RuntimeError
If the Python version does not support the notebook.
If the MSTICPy version does not support the notebook
and the user chose not to upgrade
"""
del kwargs
_disp_html("<h4>Starting notebook pre-checks...</h4>")
if isinstance(min_py_ver, str):
min_py_ver = _get_pkg_version(min_py_ver).release
check_python_ver(min_py_ver=min_py_ver)
_check_mp_install(min_mp_ver, mp_release, extras)
_check_kql_prereqs()
_set_kql_env_vars(extras)
_run_user_settings()
_set_mpconfig_var()
_disp_html("<h4>Notebook pre-checks complete.</h4>")
def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF):
"""
Check the current version of the Python kernel.
Parameters
----------
min_py_ver : Tuple[int, int]
Minimum Python version
Raises
------
RuntimeError
If the Python version does not support the notebook.
"""
min_py_ver = _get_pkg_version(min_py_ver)
sys_ver = _get_pkg_version(sys.version_info[:3])
_disp_html("Checking Python kernel version...")
if sys_ver < min_py_ver:
# Bandit SQL inject error found here
_disp_html(
f"""
<h4><font color='red'>This notebook requires a later
(Python) kernel version.</h4></font>
Select a kernel from the notebook toolbar (above), that is Python
{min_py_ver} or later (Python 3.8 recommended)<br>
""" # nosec
)
_disp_html(
f"""
Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a>
for more information<br><br><hr>
"""
)
# Bandit SQL inject error found here
raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec
if sys_ver < _get_pkg_version("3.8"):
_disp_html(
"Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel"
" if this is available."
)
_disp_html(f"Info: Python kernel version {sys_ver} - OK<br>")
def _check_mp_install(
min_mp_ver: Union[str, Tuple],
mp_release: Optional[str],
extras: Optional[List[str]],
):
"""Check for and try to install required MSTICPy version."""
# Use the release ver specified in params, in the environment or
# the notebook default.
pkg_version = _get_pkg_version(min_mp_ver)
mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version)
check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras)
def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]):
"""
Check and optionally update the current version of msticpy.
Parameters
----------
min_msticpy_ver : Tuple[int, int]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
Raises
------
ImportError
If MSTICPy version is insufficient and we need to upgrade
"""
mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver)
_disp_html("Checking msticpy version...<br>")
inst_version = _get_pkg_version(__version__)
if inst_version < mp_min_pkg_ver:
_disp_html(
MISSING_PKG_ERR.format(
package="msticpy",
inst_ver=inst_version,
req_ver=mp_min_pkg_ver,
)
)
mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy"
mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}"
_disp_html(
f"Please run the following command to upgrade MSTICPy<br>"
f"<pre>!{mp_pkg_spec}</pre><br>"
)
raise ImportError(
"Unsupported version of MSTICPy installed",
f"Installed version: {inst_version}",
f"Required version: {mp_min_pkg_ver}",
)
_disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>")
def _set_kql_env_vars(extras: Optional[List[str]]):
"""Set environment variables for Kqlmagic based on MP extras."""
jp_extended = ("azsentinel", "azuresentinel", "kql")
if extras and any(extra for extra in extras if extra in jp_extended):
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended"
else:
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic"
if is_in_aml():
os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn()
def _get_pkg_version(version: Union[str, Tuple]) -> Any:
"""Return pkg_resources parsed version from string or tuple."""
if isinstance(version, str):
return parse_version(version)
if isinstance(version, tuple):
return parse_version(".".join(str(ver) for ver in version))
raise TypeError(f"Unparseable type version {version}")
def _disp_html(text: str):
"""Display the HTML text."""
display(HTML(text))
def get_aml_user_folder() -> Optional[Path]:
"""Return the root of the user folder."""
path_parts = Path(".").absolute().parts
if "Users" not in path_parts:
return None
# find the index of the last occurrence of "users"
users_idx = len(path_parts) - path_parts[::-1].index("Users")
# the user folder is one item below this
if len(path_parts) < users_idx + 1:
return None
return Path("/".join(path_parts[: users_idx + 1]))
# pylint: disable=import-outside-toplevel, unused-import, import-error
def _run_user_settings():
"""Import nbuser_settings.py, if it exists."""
user_folder = get_aml_user_folder()
if user_folder.joinpath("nbuser_settings.py").is_file():
sys.path.append(str(user_folder))
# pylint: enable=import-outside-toplevel, unused-import, import-error
def _set_mpconfig_var():
"""Set MSTICPYCONFIG to file in user directory if no other found."""
mp_path_val = os.environ.get(MP_ENV_VAR)
if (
# If a valid MSTICPYCONFIG value is found - return
(mp_path_val and Path(mp_path_val).is_file())
# Or if there is a msticpconfig in the current folder.
or Path(".").joinpath(MP_FILE).is_file()
):
return
# Otherwise check the user's root folder
user_dir = get_aml_user_folder()
mp_path = Path(user_dir).joinpath(MP_FILE)
if mp_path.is_file():
# If there's a file there, set the env variable to that.
os.environ[MP_ENV_VAR] = str(mp_path)
# Since we have already imported msticpy to check the version
# it will have already configured settings so we need to refresh.
refresh_config()
_disp_html(
f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>"
)
def _get_vm_metadata() -> Mapping[str, Any]:
"""Use local request to get VM metadata."""
vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
req = urllib.request.Request(vm_uri) # type: ignore
req.add_header("Metadata", "true")
# Bandit warning on urlopen - Fixed private URL
with urllib.request.urlopen(req) as resp: # type: ignore # nosec
metadata = json.loads(resp.read())
return metadata if isinstance(metadata, dict) else {}
def _get_vm_fqdn() -> str:
"""Get the FQDN of the host."""
az_region = _get_vm_metadata().get("compute", {}).get("location")
return ".".join(
[
socket.gethostname(),
az_region,
"instances.azureml.ms",
]
if az_region
else ""
)
def _check_kql_prereqs():
"""
Check and install packages for Kqlmagic/msal_extensions.
Notes
-----
Kqlmagic may trigger warnings about a missing PyGObject package
and some system library dependencies. To fix this do the
following:<br>
From a notebook run:
%pip uninstall enum34
!sudo apt-get --yes install libgirepository1.0-dev
!sudo apt-get --yes install gir1.2-secret-1
%pip install pygobject
You can also do this from a terminal - but ensure that you've
activated the environment corresponding to the kernel you are
using prior to running the pip commands.
# Install the libgi dependency
sudo apt install libgirepository1.0-dev
sudo apt install gir1.2-secret-1
# activate the environment
# conda activate azureml_py38
# source ./env_path/scripts/activate
# Uninstall enum34
python -m pip uninstall enum34
# Install pygobject
python -m install pygobject
"""
if not is_in_aml():
return
try:
# If this successfully imports, we are ok
# pylint: disable=import-outside-toplevel
import gi
# pylint: enable=import-outside-toplevel
del gi
except ImportError:
# Check for system packages
ip_shell = get_ipython()
if not ip_shell:
return
apt_list = ip_shell.run_line_magic("sx", "apt list")
apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list]
missing_lx_pkg = [
apt_pkg
for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1")
if apt_pkg not in apt_list
]
if missing_lx_pkg:
_disp_html(
"Kqlmagic/msal-extensions pre-requisite PyGObject not installed."
)
_disp_html(
"To prevent warnings when loading the Kqlmagic data provider,"
" Please run the following command:<br>"
"!conda install --yes -c conda-forge pygobject<br>"
)
| 33.413613
| 90
| 0.645096
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Checker functions for Azure ML notebooks."""
import json
import os
import socket
import sys
import urllib
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Union
from IPython import get_ipython
from IPython.display import HTML, display
from pkg_resources import parse_version
from .._version import VERSION
from ..common.pkg_config import refresh_config
__version__ = VERSION
AZ_GET_STARTED = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting"
"%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"
)
TROUBLE_SHOOTING = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/"
"TroubleShootingNotebooks.ipynb"
)
MISSING_PKG_ERR = """
<h4><font color='orange'>The package '<b>{package}</b>' is not
installed or has an unsupported version (installed version = '{inst_ver}')</font></h4>
Please install or upgrade before continuing: required version is {package}>={req_ver}
"""
MP_INSTALL_FAILED = """
<h4><font color='red'>The notebook may not run correctly without
the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4>
Please see the <a href="{nbk_uri}">
Getting Started Guide For Azure Sentinel ML Notebooks</a></b>
for more information<br><hr>
"""
RELOAD_MP = """
<h4><font color='orange'>Kernel restart needed</h4>
An error was detected trying to load the updated version of MSTICPy.<br>
Please restart the notebook kernel and re-run this cell - it should
run without error.
"""
MIN_PYTHON_VER_DEF = "3.6"
MSTICPY_REQ_VERSION = __version__
VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)"
MP_ENV_VAR = "MSTICPYCONFIG"
MP_FILE = "msticpyconfig.yaml"
NB_CHECK_URI = (
"https://raw.githubusercontent.com/Azure/Azure-Sentinel-"
"Notebooks/master/utils/nb_check.py"
)
def is_in_aml():
"""Return True if running in Azure Machine Learning."""
return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance"
def check_versions(
min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF,
min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION,
extras: Optional[List[str]] = None,
mp_release: Optional[str] = None,
**kwargs,
):
"""
Check the current versions of the Python kernel and MSTICPy.
Parameters
----------
min_py_ver : Union[Tuple[int, int], str]
Minimum Python version
min_mp_ver : Union[Tuple[int, int], str]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
mp_release : Optional[str], optional
Override the MSTICPy release version. This
can also be specified in the environment variable 'MP_TEST_VER'
Raises
------
RuntimeError
If the Python version does not support the notebook.
If the MSTICPy version does not support the notebook
and the user chose not to upgrade
"""
del kwargs
_disp_html("<h4>Starting notebook pre-checks...</h4>")
if isinstance(min_py_ver, str):
min_py_ver = _get_pkg_version(min_py_ver).release
check_python_ver(min_py_ver=min_py_ver)
_check_mp_install(min_mp_ver, mp_release, extras)
_check_kql_prereqs()
_set_kql_env_vars(extras)
_run_user_settings()
_set_mpconfig_var()
_disp_html("<h4>Notebook pre-checks complete.</h4>")
def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF):
"""
Check the current version of the Python kernel.
Parameters
----------
min_py_ver : Tuple[int, int]
Minimum Python version
Raises
------
RuntimeError
If the Python version does not support the notebook.
"""
min_py_ver = _get_pkg_version(min_py_ver)
sys_ver = _get_pkg_version(sys.version_info[:3])
_disp_html("Checking Python kernel version...")
if sys_ver < min_py_ver:
# Bandit SQL inject error found here
_disp_html(
f"""
<h4><font color='red'>This notebook requires a later
(Python) kernel version.</h4></font>
Select a kernel from the notebook toolbar (above), that is Python
{min_py_ver} or later (Python 3.8 recommended)<br>
""" # nosec
)
_disp_html(
f"""
Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a>
for more information<br><br><hr>
"""
)
# Bandit SQL inject error found here
raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec
if sys_ver < _get_pkg_version("3.8"):
_disp_html(
"Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel"
" if this is available."
)
_disp_html(f"Info: Python kernel version {sys_ver} - OK<br>")
def _check_mp_install(
min_mp_ver: Union[str, Tuple],
mp_release: Optional[str],
extras: Optional[List[str]],
):
"""Check for and try to install required MSTICPy version."""
# Use the release ver specified in params, in the environment or
# the notebook default.
pkg_version = _get_pkg_version(min_mp_ver)
mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version)
check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras)
def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]):
"""
Check and optionally update the current version of msticpy.
Parameters
----------
min_msticpy_ver : Tuple[int, int]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
Raises
------
ImportError
If MSTICPy version is insufficient and we need to upgrade
"""
mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver)
_disp_html("Checking msticpy version...<br>")
inst_version = _get_pkg_version(__version__)
if inst_version < mp_min_pkg_ver:
_disp_html(
MISSING_PKG_ERR.format(
package="msticpy",
inst_ver=inst_version,
req_ver=mp_min_pkg_ver,
)
)
mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy"
mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}"
_disp_html(
f"Please run the following command to upgrade MSTICPy<br>"
f"<pre>!{mp_pkg_spec}</pre><br>"
)
raise ImportError(
"Unsupported version of MSTICPy installed",
f"Installed version: {inst_version}",
f"Required version: {mp_min_pkg_ver}",
)
_disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>")
def _set_kql_env_vars(extras: Optional[List[str]]):
"""Set environment variables for Kqlmagic based on MP extras."""
jp_extended = ("azsentinel", "azuresentinel", "kql")
if extras and any(extra for extra in extras if extra in jp_extended):
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended"
else:
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic"
if is_in_aml():
os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn()
def _get_pkg_version(version: Union[str, Tuple]) -> Any:
"""Return pkg_resources parsed version from string or tuple."""
if isinstance(version, str):
return parse_version(version)
if isinstance(version, tuple):
return parse_version(".".join(str(ver) for ver in version))
raise TypeError(f"Unparseable type version {version}")
def _disp_html(text: str):
"""Display the HTML text."""
display(HTML(text))
def get_aml_user_folder() -> Optional[Path]:
"""Return the root of the user folder."""
path_parts = Path(".").absolute().parts
if "Users" not in path_parts:
return None
# find the index of the last occurrence of "users"
users_idx = len(path_parts) - path_parts[::-1].index("Users")
# the user folder is one item below this
if len(path_parts) < users_idx + 1:
return None
return Path("/".join(path_parts[: users_idx + 1]))
# pylint: disable=import-outside-toplevel, unused-import, import-error
def _run_user_settings():
"""Import nbuser_settings.py, if it exists."""
user_folder = get_aml_user_folder()
if user_folder.joinpath("nbuser_settings.py").is_file():
sys.path.append(str(user_folder))
import nbuser_settings # noqa: F401
# pylint: enable=import-outside-toplevel, unused-import, import-error
def _set_mpconfig_var():
"""Set MSTICPYCONFIG to file in user directory if no other found."""
mp_path_val = os.environ.get(MP_ENV_VAR)
if (
# If a valid MSTICPYCONFIG value is found - return
(mp_path_val and Path(mp_path_val).is_file())
# Or if there is a msticpconfig in the current folder.
or Path(".").joinpath(MP_FILE).is_file()
):
return
# Otherwise check the user's root folder
user_dir = get_aml_user_folder()
mp_path = Path(user_dir).joinpath(MP_FILE)
if mp_path.is_file():
# If there's a file there, set the env variable to that.
os.environ[MP_ENV_VAR] = str(mp_path)
# Since we have already imported msticpy to check the version
# it will have already configured settings so we need to refresh.
refresh_config()
_disp_html(
f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>"
)
def _get_vm_metadata() -> Mapping[str, Any]:
"""Use local request to get VM metadata."""
vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
req = urllib.request.Request(vm_uri) # type: ignore
req.add_header("Metadata", "true")
# Bandit warning on urlopen - Fixed private URL
with urllib.request.urlopen(req) as resp: # type: ignore # nosec
metadata = json.loads(resp.read())
return metadata if isinstance(metadata, dict) else {}
def _get_vm_fqdn() -> str:
"""Get the FQDN of the host."""
az_region = _get_vm_metadata().get("compute", {}).get("location")
return ".".join(
[
socket.gethostname(),
az_region,
"instances.azureml.ms",
]
if az_region
else ""
)
def _check_kql_prereqs():
"""
Check and install packages for Kqlmagic/msal_extensions.
Notes
-----
Kqlmagic may trigger warnings about a missing PyGObject package
and some system library dependencies. To fix this do the
following:<br>
From a notebook run:
%pip uninstall enum34
!sudo apt-get --yes install libgirepository1.0-dev
!sudo apt-get --yes install gir1.2-secret-1
%pip install pygobject
You can also do this from a terminal - but ensure that you've
activated the environment corresponding to the kernel you are
using prior to running the pip commands.
# Install the libgi dependency
sudo apt install libgirepository1.0-dev
sudo apt install gir1.2-secret-1
# activate the environment
# conda activate azureml_py38
# source ./env_path/scripts/activate
# Uninstall enum34
python -m pip uninstall enum34
# Install pygobject
python -m install pygobject
"""
if not is_in_aml():
return
try:
# If this successfully imports, we are ok
# pylint: disable=import-outside-toplevel
import gi
# pylint: enable=import-outside-toplevel
del gi
except ImportError:
# Check for system packages
ip_shell = get_ipython()
if not ip_shell:
return
apt_list = ip_shell.run_line_magic("sx", "apt list")
apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list]
missing_lx_pkg = [
apt_pkg
for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1")
if apt_pkg not in apt_list
]
if missing_lx_pkg:
_disp_html(
"Kqlmagic/msal-extensions pre-requisite PyGObject not installed."
)
_disp_html(
"To prevent warnings when loading the Kqlmagic data provider,"
" Please run the following command:<br>"
"!conda install --yes -c conda-forge pygobject<br>"
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 44
|
b65206728e5f3f6cbab0f87066d7ed1dc8784f63
| 4,423
|
py
|
Python
|
konfuzio_sdk/urls.py
|
atraining/document-ai-python-sdk
|
ea2df68af0254053da7e6f4c6e2c2df6d7911233
|
[
"MIT"
] | null | null | null |
konfuzio_sdk/urls.py
|
atraining/document-ai-python-sdk
|
ea2df68af0254053da7e6f4c6e2c2df6d7911233
|
[
"MIT"
] | null | null | null |
konfuzio_sdk/urls.py
|
atraining/document-ai-python-sdk
|
ea2df68af0254053da7e6f4c6e2c2df6d7911233
|
[
"MIT"
] | null | null | null |
"""Endpoints of the Konfuzio Host."""
import logging
from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID
logger = logging.getLogger(__name__)
def get_auth_token_url() -> str:
"""
Generate URL that creates an authentication token for the user.
:return: URL to generate the token.
"""
return f"{KONFUZIO_HOST}/api/token-auth/"
def get_project_list_url() -> str:
"""
Generate URL to load all the projects available for the user.
:return: URL to get all the projects for the user.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def create_new_project_url() -> str:
"""
Generate URL to create a new project.
:return: URL to create a new project.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def get_documents_meta_url() -> str:
"""
Generate URL to load meta information about documents.
:return: URL to get all the documents details.
"""
return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/"
def get_upload_document_url() -> str:
"""
Generate URL to upload a document.
:return: URL to upload a document
"""
return f"{KONFUZIO_HOST}/api/v2/docs/"
def get_create_label_url() -> str:
"""
Generate URL to create a label.
:return: URL to create a label.
"""
return f"{KONFUZIO_HOST}/api/v2/labels/"
def get_document_ocr_file_url(document_id: int) -> str:
"""
Generate URL to access OCR version of document.
:param document_id: ID of the document as integer
:return: URL to get OCR document file.
"""
return f'{KONFUZIO_HOST}/doc/show/{document_id}/'
def get_document_original_file_url(document_id: int) -> str:
"""
Generate URL to access original version of the document.
:param document_id: ID of the document as integer
:return: URL to get the original document
"""
return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/'
def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str:
"""
Generate URL to access document details of one document in a project.
:param document_id: ID of the document as integer
:param include_extractions: Bool to include extractions
:param extra_fields: Extra information to include in the response
:return: URL to get document details
"""
return (
f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/'
f'?include_extractions={include_extractions}&extra_fields={extra_fields}'
)
def get_project_url(project_id=None) -> str:
"""
Generate URL to get project details.
:param project_id: ID of the project
:return: URL to get project details.
"""
project_id = project_id if project_id else KONFUZIO_PROJECT_ID
return f'{KONFUZIO_HOST}/api/projects/{project_id}/'
def post_project_api_document_annotations_url(document_id: int) -> str:
"""
Add new annotations to a document.
:param document_id: ID of the document as integer
:return: URL for adding annotations to a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/'
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/'
def get_document_result_v1(document_id: int) -> str:
"""
Generate URL to access web interface for labeling of this project.
:param document_id: ID of the document as integer
:return: URL for labeling of the project.
"""
return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/'
def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str:
"""
Generate URL to get the segmentation results of a document.
:param document_id: ID of the document as integer
:param project_id: ID of the project
:param action: Action from where to get the results
:return: URL to access the segmentation results of a document
"""
return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
| 29.291391
| 116
| 0.703821
|
"""Endpoints of the Konfuzio Host."""
import logging
from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID
logger = logging.getLogger(__name__)
def get_auth_token_url() -> str:
"""
Generate URL that creates an authentication token for the user.
:return: URL to generate the token.
"""
return f"{KONFUZIO_HOST}/api/token-auth/"
def get_project_list_url() -> str:
"""
Generate URL to load all the projects available for the user.
:return: URL to get all the projects for the user.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def create_new_project_url() -> str:
"""
Generate URL to create a new project.
:return: URL to create a new project.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def get_documents_meta_url() -> str:
"""
Generate URL to load meta information about documents.
:return: URL to get all the documents details.
"""
return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/"
def get_upload_document_url() -> str:
"""
Generate URL to upload a document.
:return: URL to upload a document
"""
return f"{KONFUZIO_HOST}/api/v2/docs/"
def get_create_label_url() -> str:
"""
Generate URL to create a label.
:return: URL to create a label.
"""
return f"{KONFUZIO_HOST}/api/v2/labels/"
def get_document_ocr_file_url(document_id: int) -> str:
"""
Generate URL to access OCR version of document.
:param document_id: ID of the document as integer
:return: URL to get OCR document file.
"""
return f'{KONFUZIO_HOST}/doc/show/{document_id}/'
def get_document_original_file_url(document_id: int) -> str:
"""
Generate URL to access original version of the document.
:param document_id: ID of the document as integer
:return: URL to get the original document
"""
return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/'
def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str:
"""
Generate URL to access document details of one document in a project.
:param document_id: ID of the document as integer
:param include_extractions: Bool to include extractions
:param extra_fields: Extra information to include in the response
:return: URL to get document details
"""
return (
f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/'
f'?include_extractions={include_extractions}&extra_fields={extra_fields}'
)
def get_project_url(project_id=None) -> str:
"""
Generate URL to get project details.
:param project_id: ID of the project
:return: URL to get project details.
"""
project_id = project_id if project_id else KONFUZIO_PROJECT_ID
return f'{KONFUZIO_HOST}/api/projects/{project_id}/'
def post_project_api_document_annotations_url(document_id: int) -> str:
"""
Add new annotations to a document.
:param document_id: ID of the document as integer
:return: URL for adding annotations to a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/'
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/'
def get_document_result_v1(document_id: int) -> str:
"""
Generate URL to access web interface for labeling of this project.
:param document_id: ID of the document as integer
:return: URL for labeling of the project.
"""
return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/'
def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str:
"""
Generate URL to get the segmentation results of a document.
:param document_id: ID of the document as integer
:param project_id: ID of the project
:param action: Action from where to get the results
:return: URL to access the segmentation results of a document
"""
return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5a5179184e11bd69c115e048377711a912dc3761
| 440
|
py
|
Python
|
Web/user/models.py
|
Pancras-Zheng/Graduation-Project
|
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
|
[
"MIT"
] | 37
|
2018-01-25T03:14:24.000Z
|
2021-12-15T10:02:37.000Z
|
Web/user/models.py
|
Pancras-Zheng/Graduation-Project
|
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
|
[
"MIT"
] | null | null | null |
Web/user/models.py
|
Pancras-Zheng/Graduation-Project
|
5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0
|
[
"MIT"
] | 10
|
2019-04-11T07:27:10.000Z
|
2021-11-24T11:16:14.000Z
|
# Create your models here.
| 27.5
| 69
| 0.747727
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class User(AbstractUser):
nickname = models.CharField(_('昵称'),max_length=50,blank=True)
info = models.CharField(_('备注'),max_length=200,blank=True)
class Meta(AbstractUser.Meta):
pass
| 12
| 0
| 0
| 188
| 0
| 0
| 0
| 107
| 111
|
db4b3216850356cdd188fbda35706bb2acbe536c
| 14,096
|
py
|
Python
|
src/huggingface_hub/commands/user.py
|
FrancescoSaverioZuppichini/huggingface_hub
|
9e7ffda07ddcd668302a61156bcae0d9ec97a26e
|
[
"Apache-2.0"
] | 1
|
2022-03-28T14:15:24.000Z
|
2022-03-28T14:15:24.000Z
|
src/huggingface_hub/commands/user.py
|
osanseviero/huggingface_hub
|
b1cf2d8f47088d3fce2244058d222a4d8234b3ab
|
[
"Apache-2.0"
] | null | null | null |
src/huggingface_hub/commands/user.py
|
osanseviero/huggingface_hub
|
b1cf2d8f47088d3fce2244058d222a4d8234b3ab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
- stackoverflow.com/a/8356620/593036
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Immediately click login after typing your password or
it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Copy a token from <a
href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
tokens page</a> and paste it below. <br> Immediately click login after copying
your token or it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_END = """
<b>Pro Tip:</b> If you don't already have one, you can create a dedicated
'notebooks' token with 'write' access, that you can then easily reuse for all
notebooks. <br> <i>Logging in with your username and password is deprecated and
won't be possible anymore in the near future. You can still use them for now by
clicking below.</i> </center>"""
def notebook_login():
"""
Displays a widget to login to the HF website and store the token.
"""
try:
import ipywidgets.widgets as widgets
from IPython.display import display
except ImportError:
raise ImportError(
"The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the "
"`ipywdidgets` module: `pip install ipywidgets`."
)
box_layout = widgets.Layout(
display="flex", flex_flow="column", align_items="center", width="50%"
)
token_widget = widgets.Password(description="Token:")
token_finish_button = widgets.Button(description="Login")
switch_button = widgets.Button(description="Use password")
login_token_widget = widgets.VBox(
[
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
token_widget,
token_finish_button,
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
switch_button,
],
layout=box_layout,
)
display(login_token_widget)
# Deprecated page for login
input_widget = widgets.Text(description="Username:")
password_widget = widgets.Password(description="Password:")
password_finish_button = widgets.Button(description="Login")
login_password_widget = widgets.VBox(
[
widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML),
widgets.HBox([input_widget, password_widget]),
password_finish_button,
],
layout=box_layout,
)
# On click events
token_finish_button.on_click(login_token_event)
password_finish_button.on_click(login_password_event)
switch_button.on_click(switch_event)
| 35.064677
| 135
| 0.605065
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from huggingface_hub.commands import BaseHuggingfaceCLICommand
from huggingface_hub.constants import (
REPO_TYPES,
REPO_TYPES_URL_PREFIXES,
SPACES_SDK_TYPES,
)
from huggingface_hub.hf_api import HfApi, HfFolder
from requests.exceptions import HTTPError
class UserCommands(BaseHuggingfaceCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser(
"login", help="Log in using the same credentials as on huggingface.co"
)
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser(
"whoami", help="Find out which huggingface.co account you are logged in as."
)
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# new system: git-based repo system
repo_parser = parser.add_parser(
"repo",
help="{create, ls-files} Commands to interact with your huggingface.co repos.",
)
repo_subparsers = repo_parser.add_subparsers(
help="huggingface.co repos related commands"
)
repo_create_parser = repo_subparsers.add_parser(
"create", help="Create a new repo on huggingface.co"
)
repo_create_parser.add_argument(
"name",
type=str,
help="Name for your repo. Will be namespaced under your username to build the repo id.",
)
repo_create_parser.add_argument(
"--type",
type=str,
help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
)
repo_create_parser.add_argument(
"--organization", type=str, help="Optional: organization namespace."
)
repo_create_parser.add_argument(
"--space_sdk",
type=str,
help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
choices=SPACES_SDK_TYPES,
)
repo_create_parser.add_argument(
"-y",
"--yes",
action="store_true",
help="Optional: answer Yes to the prompt",
)
repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_red = "\u001b[31m"
_gray = "\u001b[90m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return f"{cls._bold}{s}{cls._reset}"
@classmethod
def red(cls, s):
return f"{cls._bold + cls._red}{s}{cls._reset}"
@classmethod
def gray(cls, s):
return f"{cls._gray}{s}{cls._reset}"
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
- stackoverflow.com/a/8356620/593036
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
def currently_setup_credential_helpers(directory=None) -> List[str]:
try:
output = subprocess.run(
"git config --list".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=directory,
).stdout.split("\n")
current_credential_helpers = []
for line in output:
if "credential.helper" in line:
current_credential_helpers.append(line.split("=")[-1])
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return current_credential_helpers
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print( # docstyle-ignore
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
To login, `huggingface_hub` now requires a token generated from https://huggingface.co/settings/tokens.
(Deprecated, will be removed in v0.3.0) To login with username and password instead, interrupt with Ctrl+C.
"""
)
try:
token = getpass("Token: ")
_login(self._api, token=token)
except KeyboardInterrupt:
username = input("\rUsername: ")
password = getpass()
_login(self._api, username, password)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
info = self._api.whoami(token)
print(info["name"])
orgs = [org["name"] for org in info["orgs"]]
if orgs:
print(ANSI.bold("orgs: "), ",".join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
HfApi.unset_access_token()
try:
self._api.logout(token)
except HTTPError as e:
# Logging out with an access token will return a client error.
if not e.response.status_code == 400:
raise e
print("Successfully logged out.")
class RepoCreateCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print("Looks like you do not have git installed, please install.")
try:
stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print(
ANSI.red(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
)
print("")
user = self._api.whoami(token)["name"]
namespace = (
self.args.organization if self.args.organization is not None else user
)
repo_id = f"{namespace}/{self.args.name}"
if self.args.type not in REPO_TYPES:
print("Invalid repo --type")
exit(1)
if self.args.type in REPO_TYPES_URL_PREFIXES:
prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
else:
prefixed_repo_id = repo_id
print(f"You are about to create {ANSI.bold(prefixed_repo_id)}")
if not self.args.yes:
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
try:
url = self._api.create_repo(
repo_id=repo_id,
token=token,
repo_type=self.args.type,
space_sdk=self.args.space_sdk,
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("\nYour repo now lives at:")
print(f" {ANSI.bold(url)}")
print(
"\nYou can clone it locally with the command below,"
" and commit/push as usual."
)
print(f"\n git clone {url}")
print("")
NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Immediately click login after typing your password or
it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Copy a token from <a
href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
tokens page</a> and paste it below. <br> Immediately click login after copying
your token or it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_END = """
<b>Pro Tip:</b> If you don't already have one, you can create a dedicated
'notebooks' token with 'write' access, that you can then easily reuse for all
notebooks. <br> <i>Logging in with your username and password is deprecated and
won't be possible anymore in the near future. You can still use them for now by
clicking below.</i> </center>"""
def notebook_login():
"""
Displays a widget to login to the HF website and store the token.
"""
try:
import ipywidgets.widgets as widgets
from IPython.display import clear_output, display
except ImportError:
raise ImportError(
"The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the "
"`ipywdidgets` module: `pip install ipywidgets`."
)
box_layout = widgets.Layout(
display="flex", flex_flow="column", align_items="center", width="50%"
)
token_widget = widgets.Password(description="Token:")
token_finish_button = widgets.Button(description="Login")
switch_button = widgets.Button(description="Use password")
login_token_widget = widgets.VBox(
[
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
token_widget,
token_finish_button,
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
switch_button,
],
layout=box_layout,
)
display(login_token_widget)
# Deprecated page for login
input_widget = widgets.Text(description="Username:")
password_widget = widgets.Password(description="Password:")
password_finish_button = widgets.Button(description="Login")
login_password_widget = widgets.VBox(
[
widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML),
widgets.HBox([input_widget, password_widget]),
password_finish_button,
],
layout=box_layout,
)
# On click events
def login_token_event(t):
token = token_widget.value
# Erase token and clear value to make sure it's not saved in the notebook.
token_widget.value = ""
clear_output()
_login(HfApi(), token=token)
token_finish_button.on_click(login_token_event)
def login_password_event(t):
username = input_widget.value
password = password_widget.value
# Erase password and clear value to make sure it's not saved in the notebook.
password_widget.value = ""
clear_output()
_login(HfApi(), username=username, password=password)
password_finish_button.on_click(login_password_event)
def switch_event(t):
clear_output()
display(login_password_widget)
switch_button.on_click(switch_event)
def _login(hf_api, username=None, password=None, token=None):
if token is None:
try:
token = hf_api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
print(ANSI.red(e.response.text))
exit(1)
else:
token, name = hf_api._validate_or_retrieve_token(token)
hf_api.set_access_token(token)
HfFolder.save_token(token)
print("Login successful")
print("Your token has been saved to", HfFolder.path_token)
helpers = currently_setup_credential_helpers()
if "store" not in helpers:
print(
ANSI.red(
"Authenticated through git-credential store but this isn't the helper defined on your machine.\nYou "
"might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your "
"terminal in case you want to set this credential helper as the default\n\ngit config --global credential.helper store"
)
)
| 0
| 2,294
| 0
| 4,841
| 0
| 2,259
| 0
| 207
| 469
|
293ccee023fc91f0aa073e2ba442d3ed89f6b0d4
| 8,365
|
py
|
Python
|
src/ramstk/logger.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 26
|
2019-05-15T02:03:47.000Z
|
2022-02-21T07:28:11.000Z
|
src/ramstk/logger.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 815
|
2019-05-10T12:31:52.000Z
|
2022-03-31T12:56:26.000Z
|
src/ramstk/logger.py
|
TahaEntezari/ramstk
|
f82e5b31ef5c4e33cc02252263247b99a9abe129
|
[
"BSD-3-Clause"
] | 9
|
2019-04-20T23:06:29.000Z
|
2022-01-24T21:21:04.000Z
|
# -*- coding: utf-8 -*-
#
# ramstk.logger.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK Logger Module."""
# Standard Library Imports
import logging
# Third Party Imports
LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s")
| 39.64455
| 82
| 0.68416
|
# -*- coding: utf-8 -*-
#
# ramstk.logger.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK Logger Module."""
# Standard Library Imports
import logging
import sys
from typing import Dict
# Third Party Imports
from pubsub import pub
LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s")
class RAMSTKLogManager:
"""Class to manage logging of RAMSTK messages."""
loggers: Dict[str, logging.Logger] = {}
def __init__(self, log_file: str) -> None:
"""Initialize an instance of the LogManager.
:param log_file: the absolute path to the log file to use with this
log manager.
"""
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.log_file = log_file
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_log_fail_message, "fail_connect_program_database")
pub.subscribe(self._do_log_fail_message, "fail_delete_environment")
pub.subscribe(self._do_log_fail_message, "fail_delete_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_delete_fmea")
pub.subscribe(self._do_log_fail_message, "fail_delete_function")
pub.subscribe(self._do_log_fail_message, "fail_delete_hazard")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_delete_revision")
pub.subscribe(self._do_log_fail_message, "fail_import_module")
pub.subscribe(self._do_log_fail_message, "fail_insert_action")
pub.subscribe(self._do_log_fail_message, "fail_insert_cause")
pub.subscribe(self._do_log_fail_message, "fail_insert_control")
pub.subscribe(self._do_log_fail_message, "fail_insert_environment")
pub.subscribe(self._do_log_fail_message, "fail_insert_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_insert_mechanism")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_insert_mode")
pub.subscribe(self._do_log_fail_message, "fail_insert_function")
pub.subscribe(self._do_log_fail_message, "fail_insert_hazard")
pub.subscribe(self._do_log_fail_message, "fail_insert_hardware")
pub.subscribe(self._do_log_fail_message, "fail_insert_validation")
pub.subscribe(self._do_log_fail_message, "fail_insert_stakeholder")
pub.subscribe(self._do_log_fail_message, "fail_insert_revision")
pub.subscribe(self._do_log_fail_message, "fail_insert_requirement")
pub.subscribe(self._do_log_fail_message, "fail_insert_opload")
pub.subscribe(self._do_log_fail_message, "fail_insert_opstress")
pub.subscribe(self._do_log_fail_message, "fail_insert_record")
pub.subscribe(self._do_log_fail_message, "fail_insert_test_method")
pub.subscribe(self._do_log_fail_message, "fail_update_fmea")
pub.subscribe(self._do_log_fail_message, "fail_update_function")
pub.subscribe(self._do_log_fail_message, "fail_update_hardware")
pub.subscribe(self._do_log_fail_message, "fail_update_record")
pub.subscribe(self._do_log_fail_message, "fail_update_requirement")
pub.subscribe(self._do_log_fail_message, "fail_update_revision")
pub.subscribe(self.do_log_debug, "do_log_debug_msg")
pub.subscribe(self.do_log_info, "do_log_info_msg")
pub.subscribe(self.do_log_warning, "do_log_warning_msg")
pub.subscribe(self.do_log_error, "do_log_error_msg")
pub.subscribe(self.do_log_critical, "do_log_critical_msg")
# Create a logger for the pypubsub fail_* messages.
self.do_create_logger(__name__, "WARN")
def _do_log_fail_message(self, error_message: str) -> None:
"""Log PyPubSub broadcast fail messages.
:param error_message: the error message that was part of the
broadcast package.
:return: None
:rtype: None
"""
self.loggers[__name__].warning(error_message)
@staticmethod
def _get_console_handler(log_level: str) -> logging.Handler:
"""Create the log handler for console output.
:return: _c_handler
:rtype: :class:`logging.Handler`
"""
_c_handler = logging.StreamHandler(sys.stdout)
_c_handler.setLevel(log_level)
_c_handler.setFormatter(LOGFORMAT)
return _c_handler
def _get_file_handler(self, log_level: str) -> logging.Handler:
"""Create the log handler for file output.
:return: _f_handler
:rtype: :class:`logging.Handler`
"""
_f_handler = logging.FileHandler(self.log_file)
_f_handler.setLevel(log_level)
_f_handler.setFormatter(LOGFORMAT)
return _f_handler
def do_create_logger(
self, logger_name: str, log_level: str, to_tty: bool = False
) -> None:
"""Create a logger instance.
:param logger_name: the name of the logger used in the application.
:param log_level: the level of messages to log.
:param to_tty: boolean indicating whether this logger will
also dump messages to the terminal.
:return: None
:rtype: None
"""
_logger = logging.getLogger(logger_name)
_logger.setLevel(log_level)
_logger.addHandler(self._get_file_handler(log_level))
if to_tty:
_logger.addHandler(self._get_console_handler(log_level))
self.loggers[logger_name] = _logger
def do_log_debug(self, logger_name: str, message: str) -> None:
"""Log DEBUG level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.DEBUG):
self.loggers[logger_name].debug(message)
def do_log_exception(self, logger_name: str, exception: object) -> None:
"""Log EXCEPTIONS.
:param logger_name: the name of the logger used in the application.
:param exception: the exception to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].exception(exception)
def do_log_info(self, logger_name: str, message: str) -> None:
"""Log INFO level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.INFO):
self.loggers[logger_name].info(message)
def do_log_warning(self, logger_name: str, message: str) -> None:
"""Log WARN level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].warning(message)
def do_log_error(self, logger_name: str, message: str) -> None:
"""Log ERROR level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.ERROR):
self.loggers[logger_name].error(message)
def do_log_critical(self, logger_name: str, message: str) -> None:
"""Log CRITICAL level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
self.loggers[logger_name].critical(message)
| 0
| 357
| 0
| 7,572
| 0
| 0
| 0
| -8
| 89
|
f4bbd3c26bf1e8d647337c4dd66784c1c9d86a9f
| 2,458
|
py
|
Python
|
examples/demo/status_overlay.py
|
martinRenou/chaco
|
1888da3ecee89f9b2d11900cda9333b32fc5e89a
|
[
"BSD-3-Clause"
] | 3
|
2017-09-17T17:32:06.000Z
|
2022-03-15T13:04:43.000Z
|
examples/demo/status_overlay.py
|
martinRenou/chaco
|
1888da3ecee89f9b2d11900cda9333b32fc5e89a
|
[
"BSD-3-Clause"
] | null | null | null |
examples/demo/status_overlay.py
|
martinRenou/chaco
|
1888da3ecee89f9b2d11900cda9333b32fc5e89a
|
[
"BSD-3-Clause"
] | 5
|
2015-05-17T16:08:11.000Z
|
2021-02-23T09:23:42.000Z
|
import numpy
index = numpy.array([1,2,3,4,5])
data_series = index**2
my_plot = MyPlot(index, data_series)
my_plot.configure_traits()
| 32.342105
| 74
| 0.614321
|
import numpy
from chaco.api import Plot, ArrayPlotData
from chaco.layers.api import ErrorLayer, WarningLayer, StatusLayer
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, Button
from traitsui.api import UItem, View, HGroup
class MyPlot(HasTraits):
""" Displays a plot with a few buttons to control which overlay
to display
"""
plot = Instance(Plot)
status_overlay = Instance(StatusLayer)
error_button = Button('Error')
warn_button = Button('Warning')
no_problem_button = Button('No problem')
traits_view = View( HGroup(UItem('error_button'),
UItem('warn_button'),
UItem('no_problem_button')),
UItem('plot', editor=ComponentEditor()),
width=700, height=600, resizable=True,
)
def __init__(self, index, data_series, **kw):
super(MyPlot, self).__init__(**kw)
plot_data = ArrayPlotData(index=index)
plot_data.set_data('data_series', data_series)
self.plot = Plot(plot_data)
self.plot.plot(('index', 'data_series'))
def _error_button_fired(self, event):
""" removes the old overlay and replaces it with
an error overlay
"""
self.clear_status()
self.status_overlay = ErrorLayer(component=self.plot,
align='ul', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _warn_button_fired(self, event):
""" removes the old overlay and replaces it with
an warning overlay
"""
self.clear_status()
self.status_overlay = WarningLayer(component=self.plot,
align='ur', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _no_problem_button_fired(self, event):
""" removes the old overlay
"""
self.clear_status()
self.plot.request_redraw()
def clear_status(self):
if self.status_overlay in self.plot.overlays:
# fade_out will remove the overlay when its done
self.status_overlay.fade_out()
index = numpy.array([1,2,3,4,5])
data_series = index**2
my_plot = MyPlot(index, data_series)
my_plot.configure_traits()
| 0
| 0
| 0
| 2,041
| 0
| 0
| 0
| 147
| 134
|
08cdc43106ee16eac03626a91a328ff78df10a22
| 681
|
py
|
Python
|
multi_threadpool_executor.py
|
Dev-Bobbie/multi_spider
|
8fd19ab70de04b6cac021d354850b07ffcf360f2
|
[
"Apache-2.0"
] | null | null | null |
multi_threadpool_executor.py
|
Dev-Bobbie/multi_spider
|
8fd19ab70de04b6cac021d354850b07ffcf360f2
|
[
"Apache-2.0"
] | null | null | null |
multi_threadpool_executor.py
|
Dev-Bobbie/multi_spider
|
8fd19ab70de04b6cac021d354850b07ffcf360f2
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
main()
| 24.321429
| 49
| 0.625551
|
from concurrent.futures import ThreadPoolExecutor
import time
def sayhello(a):
print("hello: "+a)
time.sleep(2)
def main():
seed=["a","b","c"]
start1=time.time()
for each in seed:
sayhello(each)
end1=time.time()
print("time1: "+str(end1-start1))
start2=time.time()
with ThreadPoolExecutor(3) as executor:
for each in seed:
executor.submit(sayhello,each)
end2=time.time()
print("time2: "+str(end2-start2))
start3=time.time()
with ThreadPoolExecutor(3) as executor1:
executor1.map(sayhello,seed)
end3=time.time()
print("time3: "+str(end3-start3))
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 535
| 0
| 18
| 90
|
61b5793ee25a599b5e5738633cc2cd220b7bf9e9
| 7,541
|
py
|
Python
|
boss2.py
|
Jamhacks2018/TheJamExpansion
|
1acec353e666fef6608e06b57e82683053e7f060
|
[
"MIT"
] | null | null | null |
boss2.py
|
Jamhacks2018/TheJamExpansion
|
1acec353e666fef6608e06b57e82683053e7f060
|
[
"MIT"
] | null | null | null |
boss2.py
|
Jamhacks2018/TheJamExpansion
|
1acec353e666fef6608e06b57e82683053e7f060
|
[
"MIT"
] | 3
|
2018-05-05T19:59:56.000Z
|
2020-11-15T21:06:27.000Z
|
init()
fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30)
fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15)
| 40.983696
| 287
| 0.502851
|
from pygame import *
from enemies import *
import random
init()
fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30)
fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15)
class Cap():
def __init__(self):
#initialize the image and pos of cap:
self.img = image.load('resources/jam/boss/cap.png')
self.x = 0
self.y = -150
self.rect = Rect(self.x, self.y, 722, 149)
def draw(self, screen):
screen.blit(self.image[self.phase], self.Rect())
self.rect = Rect(self.x, self.y, self.image[1].get_width(), self.image[1].get_height())
def check(self):
for b in bullets:
if b.rect.colliderrect(self.rect):
self.health -= b.dmg
#check if it is supposed to die, if dead start boss phase 2:
class Boss():
def __init__(self):
#initialize the image and pos:
self.image = image.load('resources/jam/boss/uncapped.png').convert_alpha()
self.w = self.image.get_width() // 5
self.h = self.image.get_width() // 5
self.x = 300
self.y = 25
self.rect = Rect(self.x, self.y, self.w, self.h)
self.image = transform.scale(self.image, (self.w, self.h))
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] + self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
self.firing_speed = [25, 20, 15]
self.firing_time = 0
#grace time is reset if grace time is reached
self.grace_timers = [120, 90, 65]
self.grace_time = 180
#initialize boss properties
self.phase = 0
self.max_health = 12000
self.health = self.max_health
self.vulnerable = True
self.attacks = [False, False]
self.directions = 0
#counter of how much boss moved
self.frames_spent_moving = 0
#draws itself and its health
def draw(self, screen):
screen.blit(self.image, self.rect)
draw.rect(screen, (255, 0, 255), (15, 700 - 85, int(985 * self.health / self.max_health), 75))
screen.blit(fontGeneral.render("Boss health: %i/%i" %(self.health, self.max_health), 1, (0, 255, 0)), (467 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[0] // 2, 700 - 55 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[1] // 2))
def update(self, pl, eb):
if self.grace_time == 0:
#handles attack timings with some randomness
self.attacks[random.randint(0,1)] = True
self.directions = random.randint(0,3)
#resets movement during attacks
self.frames_spent_moving = 0
#handles in between attack grace timers
self.grace_time = self.grace_timers[self.phase]
else:
#handles movement between attacks
if self.frames_spent_moving <= 30:
self.move()
self.frames_spent_moving += 1
self.grace_time -= 1
self.rect = Rect(self.x, self.y, self.w, self.h)
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] - self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
#tries to fire each attack
self.sweeper(eb)
self.ring(eb)
def check(self, bullets, pickups, pl):
for b in bullets:
if b.rect.colliderect(self.rect):
self.health -= b.dmg + pl.dmg_upG
#if health permits, spawns a randomly placed heart
if 0 <= self.health%500 <= 10 and self.health != self.max_health:
pickups.append(Heart(random.randint(300, 700), random.randint(200, 500), random.randint(250, 500)))
if 0 <= self.health%250 <= 10 and self.health != self.max_health:
self.weakpoint = random.randint(0, 4)
self.health -= 11
# checks if it is supposed to die
if self.health <= 0:
self.health = self.max_health
return False
#check for phase change
elif self.health < 8000:
self.phase = 2
elif self.health < 4000:
self.phase = 3
return True
def move(self):
#very similar to pl.directions, moves if it can
if self.directions == 0:
if self.y < 100:
self.y += 3
print("move 1")
elif self.directions == 1:
if 0 < self.y:
self.y -= 3
print("move 2")
elif self.directions == 2:
if 0 < self.x:
self.x -= 10
print("move 3")
elif self.directions == 3:
if self.x + 800 < 1000:
self.x += 10
print("move 4")
def sweeper(self, enemyBullets):
#shoots stream of bullets from left to right from random guns
if self.attacks[1]:
for angle in range(10, 170, 5):
#checks if timer conditions are just right
if self.firing_time + 10 == angle:
self.target_angle = (self.gun2[0] + 50 * cos(radians(angle)),
self.gun2[1] + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.gun2[0], self.gun2[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
self.target_angle = (self.gun3[0] + 50 * cos(radians(180 - angle)),
self.gun3[1] + 50 * sin(radians(180 -angle)))
enemyBullets.append(JamBullet(self.gun3[0], self.gun3[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
#ends attack
if self.firing_time + 10 >= 170:
self.attacks[1] = False
self.firing_time = 0
break
else: self.firing_time += 2
def ring(self, enemyBullets):
if self.attacks[0]:
for angle in range(0, 360, 10):
if self.firing_time == angle:
self.target_angle = (self.rect.centerx + 50 * cos(radians(angle)),
self.rect.centery + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.rect.centerx, self.rect.centery, self.target_angle[0], self.target_angle[1], 15 * self.phase))
if self.firing_time >= 360:
self.attacks[0] = False
self.firing_time = 0
break
else: self.firing_time += 2.5
| 0
| 0
| 0
| 7,222
| 0
| 0
| 0
| -9
| 134
|
9084b7ccd8e3dba852fd6469469662507b5a8c2b
| 24,781
|
py
|
Python
|
src/simulator/network_wrong_mi.py
|
ChenGeng-ZJU/PCC-RL
|
6627a186643175ea68269d78e206e6bc45ac634f
|
[
"Apache-2.0"
] | null | null | null |
src/simulator/network_wrong_mi.py
|
ChenGeng-ZJU/PCC-RL
|
6627a186643175ea68269d78e206e6bc45ac634f
|
[
"Apache-2.0"
] | null | null | null |
src/simulator/network_wrong_mi.py
|
ChenGeng-ZJU/PCC-RL
|
6627a186643175ea68269d78e206e6bc45ac634f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Nathan Jay and Noga Rotman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
from gym.envs.registration import register
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
| 38.183359
| 130
| 0.561438
|
# Copyright 2019 Nathan Jay and Noga Rotman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import heapq
import os
import random
import sys
import time
import math
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import gym
from gym import spaces
from gym.envs.registration import register
from gym.utils import seeding
import numpy as np
from common import sender_obs
from common.utils import pcc_aurora_reward, read_json_file
from simulator.trace import Trace
import pandas as pd
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
def debug_print(msg):
if DEBUG:
print(msg, file=sys.stderr, flush=True)
class EmuReplay:
def __init__(self, ):
df = pd.read_csv('aurora_emulation_log.csv')
self.ts = df['timestamp'].tolist()
self.send_rate = df['send_rate'].tolist()
self.idx = 0
def get_ts(self):
if self.idx > len(self.ts):
self.idx = len(self.ts) -1
ts = self.ts[self.idx]
self.idx += 1
return ts
def get_rate(self):
return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET
def reset(self):
self.idx = 0
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
self.pkt_in_queue = max(0, self.pkt_in_queue -
(event_time - self.queue_delay_update_time) *
self.get_bandwidth(event_time))
self.queue_delay_update_time = event_time
cur_queue_delay = math.ceil(
self.pkt_in_queue) / self.get_bandwidth(event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# print("{}\tDrop!".format(event_time))
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
# TODO: Do not use timestamp 0.
print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0)))
# TODO: Do not use timestamp 0.
print("Delay: %.3fms" % (self.trace.get_delay(0)))
print("Queue Delay: %.3fms" % (self.queue_delay * 1000))
print("One Packet Queue Delay: %.3fms" % (
1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET)))
print("Queue size: %dpackets" % self.queue_size)
print("Loss: %.4f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
class Network():
def __init__(self, senders, links, env):
self.event_count = 0
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.queue_initial_packets()
self.env = env
self.pkt_log = []
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND,
0, 0.0, False, self.event_count, sender.rto, 0))
self.event_count += 1
def reset(self):
self.pkt_log = []
self.cur_time = 0.0
self.q = []
[link.reset() for link in self.links]
[sender.reset() for sender in self.senders]
self.queue_initial_packets()
def get_cur_time(self):
return self.cur_time
def run_for_dur(self, dur, action=None):
# if self.cur_time > 1.75:
# pass
# else:
# self.senders[0].rate = self.env.replay.get_rate()
# dur = self.env.replay.get_ts() - self.cur_time
end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1])
debug_print('MI from {} to {}, dur {}'.format(
self.cur_time, end_time, dur))
for sender in self.senders:
sender.reset_obs()
while True:
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = self.q[0]
if event_time >= end_time:
self.cur_time = end_time
break
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
new_event_queue_delay = event_queue_delay
push_new_event = False
debug_print("Got %d event %s, to link %d, latency %f at time %f, "
"next_hop %d, dropped %s, event_q length %f, "
"sender rate %f, duration: %f, queue_size: %f, "
"rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, "
"pkt in flight %d, wait time %d" % (
event_id, event_type, next_hop, cur_latency,
event_time, next_hop, dropped, len(self.q),
sender.rate, dur, self.links[0].queue_size,
rto, sender.cwnd, sender.ssthresh, sender.rto,
int(sender.bytes_in_flight/BYTES_PER_PACKET),
sender.pkt_loss_wait_time))
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
# if cur_latency > 1.0:
# sender.timeout(cur_latency)
# sender.on_packet_lost(cur_latency)
if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0:
sender.timeout()
dropped = True
new_dropped = True
elif dropped:
sender.on_packet_lost(cur_latency)
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'lost',
BYTES_PER_PACKET])
else:
sender.on_packet_acked(cur_latency)
debug_print('Ack packet at {}'.format(self.cur_time))
# log packet acked
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'acked',
BYTES_PER_PACKET, cur_latency,
event_queue_delay])
else:
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
elif event_type == EVENT_TYPE_SEND:
if next_hop == 0:
if sender.can_send_packet():
sender.on_packet_sent()
# print('Send packet at {}'.format(self.cur_time))
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'sent',
BYTES_PER_PACKET])
push_new_event = True
heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate),
sender, EVENT_TYPE_SEND, 0, 0.0,
False, self.event_count, sender.rto,
0))
self.event_count += 1
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
# link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(
self.cur_time)
if not new_dropped:
sender.queue_delay_samples.append(new_event_queue_delay)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type,
new_next_hop, new_latency, new_dropped,
event_id, rto, new_event_queue_delay))
for sender in self.senders:
sender.record_run()
sender_mi = self.senders[0].get_run_data()
throughput = sender_mi.get("recv rate") # bits/sec
latency = sender_mi.get("avg latency") # second
loss = sender_mi.get("loss ratio")
debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % (
throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked))
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET)
if latency > 0.0:
self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time))
# self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate))
# print(self.env.run_dur)
return reward * REWARD_SCALE
class Sender():
def __init__(self, rate, path, dest, features, cwnd=25, history_len=10,
delta_scale=1):
self.id = Sender._get_next_id()
self.delta_scale = delta_scale
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.queue_delay_samples = []
self.prev_rtt_samples = self.rtt_samples
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.cwnd = cwnd
self.use_cwnd = False
self.rto = -1
self.ssthresh = 0
self.pkt_loss_wait_time = -1
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
_next_id = 1
def _get_next_id():
result = Sender._next_id
Sender._next_id += 1
return result
def apply_rate_delta(self, delta):
# if self.got_data:
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def apply_cwnd_delta(self, delta):
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_cwnd(self.cwnd * (1.0 + delta))
else:
self.set_cwnd(self.cwnd / (1.0 - delta))
def can_send_packet(self):
if self.use_cwnd:
return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd
else:
return True
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way
self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0
self.acked += 1
self.rtt_samples.append(rtt)
# self.rtt_samples.append(self.estRTT)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self, rtt):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
# print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def set_cwnd(self, new_cwnd):
self.cwnd = int(new_cwnd)
#print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
# if self.cwnd > MAX_CWND:
# self.cwnd = MAX_CWND
# if self.cwnd < MIN_CWND:
# self.cwnd = MIN_CWND
def record_run(self):
smi = self.get_run_data()
# if not self.got_data and smi.rtt_samples:
# self.got_data = True
# self.history.step(smi)
# else:
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.get_cur_time()
#obs_dur = obs_end_time - self.obs_start_time
#print("Got %d acks in %f seconds" % (self.acked, obs_dur))
#print("Sent %d packets in %f seconds" % (self.sent, obs_dur))
#print("self.rate = %f" % self.rate)
# print(self.acked, self.sent)
rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples
# if not self.rtt_samples:
# print(self.obs_start_time, obs_end_time, self.rate)
# rtt_samples is empty when there is no packet acked in MI
# Solution: inherit from previous rtt_samples.
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
queue_delay_samples=self.queue_delay_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
if self.rtt_samples:
self.prev_rtt_samples = self.rtt_samples
self.rtt_samples = []
self.queue_delay_samples = []
self.obs_start_time = self.net.get_cur_time()
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
#print("Resetting sender!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
def timeout(self):
# placeholder
pass
class SimulatedNetworkEnv(gym.Env):
def __init__(self, traces, history_len=10,
features="sent latency inflation,latency ratio,send ratio",
congestion_control_type="aurora", train_flag=False,
delta_scale=1.0):
"""Network environment used in simulation.
congestion_control_type: aurora is pcc-rl. cubic is TCPCubic.
"""
assert congestion_control_type in {"aurora", "cubic"}, \
"Unrecognized congestion_control_type {}.".format(
congestion_control_type)
# self.replay = EmuReplay()
self.delta_scale = delta_scale
self.traces = traces
self.current_trace = np.random.choice(self.traces)
self.train_flag = train_flag
self.congestion_control_type = congestion_control_type
if self.congestion_control_type == 'aurora':
self.use_cwnd = False
elif self.congestion_control_type == 'cubic':
self.use_cwnd = True
self.history_len = history_len
# print("History length: %d" % history_len)
self.features = features.split(",")
# print("Features: %s" % str(self.features))
self.links = None
self.senders = None
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.run_dur = None
self.run_period = 0.1
self.steps_taken = 0
self.debug_thpt_changes = False
self.last_thpt = None
self.last_rate = None
if self.use_cwnd:
self.action_space = spaces.Box(
np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
else:
self.action_space = spaces.Box(
np.array([-1e12]), np.array([1e12]), dtype=np.float32)
self.observation_space = None
# use_only_scale_free = True
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len),
np.tile(single_obs_max_vec,
self.history_len),
dtype=np.float32)
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
def seed(self, seed=None):
self.rand, seed = seeding.np_random(seed)
return [seed]
def _get_all_sender_obs(self):
sender_obs = self.senders[0].get_obs()
sender_obs = np.array(sender_obs).reshape(-1,)
return sender_obs
def step(self, actions):
#print("Actions: %s" % str(actions))
# print(actions)
for i in range(0, 1): # len(actions)):
#print("Updating rate for sender %d" % i)
action = actions
self.senders[i].apply_rate_delta(action[0])
if self.use_cwnd:
self.senders[i].apply_cwnd_delta(action[1])
# print("Running for %fs" % self.run_dur)
reward = self.net.run_for_dur(self.run_dur, action=actions[0])
self.steps_taken += 1
sender_obs = self._get_all_sender_obs()
should_stop = self.current_trace.is_finished(self.net.get_cur_time())
self.reward_sum += reward
# print('env step: {}s'.format(time.time() - t_start))
return sender_obs, reward, should_stop, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
# self.replay.reset()
self.links = [Link(self.current_trace), Link(self.current_trace)]
if self.congestion_control_type == "aurora":
if not self.train_flag:
self.senders = [Sender( #self.replay.get_rate(),
# 2500000 / 8 /BYTES_PER_PACKET / 0.048,
# 12000000 / 8 /BYTES_PER_PACKET / 0.048,
# 10 / (self.current_trace.get_delay(0) *2/1000),
100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
else:
# self.senders = [Sender(random.uniform(0.3, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len)]
# self.senders = [Sender(random.uniform(10/bw, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len,
# delta_scale=self.delta_scale)]
self.senders = [Sender(100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
elif self.congestion_control_type == "cubic":
raise NotImplementedError
else:
raise RuntimeError("Unrecognized congestion_control_type {}".format(
self.congestion_control_type))
# self.run_dur = 3 * lat
# self.run_dur = 1 * lat
if not self.senders[0].rtt_samples:
# self.run_dur = 0.473
# self.run_dur = 5 / self.senders[0].rate
self.run_dur = 0.01
# self.run_dur = self.current_trace.get_delay(0) * 2 / 1000
# self.run_dur = self.replay.get_ts() - 0
def reset(self):
self.steps_taken = 0
self.net.reset()
self.current_trace = np.random.choice(self.traces)
self.current_trace.reset()
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.episodes_run += 1
# self.replay.reset()
self.net.run_for_dur(self.run_dur)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
# print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
| 0
| 0
| 0
| 23,125
| 0
| 62
| 0
| -21
| 471
|
e2b17755e0aaa5b3a5cbb71d2ff79a60e5f99eea
| 3,150
|
py
|
Python
|
cheddar_oauth_example/settings.py
|
brianbrunner/cheddar-oauth-demo
|
7768023a355d9cdc2e861aded2c05ebe3246c930
|
[
"MIT"
] | 1
|
2015-05-26T18:21:32.000Z
|
2015-05-26T18:21:32.000Z
|
cheddar_oauth_example/settings.py
|
brianbrunner/cheddar-oauth-demo
|
7768023a355d9cdc2e861aded2c05ebe3246c930
|
[
"MIT"
] | null | null | null |
cheddar_oauth_example/settings.py
|
brianbrunner/cheddar-oauth-demo
|
7768023a355d9cdc2e861aded2c05ebe3246c930
|
[
"MIT"
] | null | null | null |
"""
Django settings for cheddar_oauth_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'django.contrib.humanize',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cheddar_oauth_example.urls'
WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application'
AUTHENTICATION_BACKENDS = (
'oauth.cheddar.CheddarOAuth2',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# SOCIAL_AUTH_CHEDDAR_SCOPE = []
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error'
# Logging
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Import Local Settings
try:
except ImportError as e:
print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
| 23.333333
| 89
| 0.699365
|
"""
Django settings for cheddar_oauth_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'django.contrib.humanize',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cheddar_oauth_example.urls'
WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application'
AUTHENTICATION_BACKENDS = (
'oauth.cheddar.CheddarOAuth2',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# SOCIAL_AUTH_CHEDDAR_SCOPE = []
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error'
# Logging
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Import Local Settings
try:
from local_settings import *
except ImportError as e:
print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 26
|
1097c97b6b77b2f181e0c5a9531a0851278011cb
| 470
|
py
|
Python
|
django101/django102/urls.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | 4
|
2020-10-30T23:13:50.000Z
|
2020-12-26T21:35:00.000Z
|
django101/django102/urls.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | null | null | null |
django101/django102/urls.py
|
Minkov/python-web-2020-09
|
a43baf4dd4dd811caf25aad971a0f1a4d3d486a4
|
[
"MIT"
] | 7
|
2020-09-17T13:08:35.000Z
|
2020-10-31T15:01:46.000Z
|
from django.urls import path
from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, raises_exception, create_game
urlpatterns = [
path('', index_view, name='index'),
path('2/', UsersListView.as_view()),
path('games/', GamesListView.as_view()),
path('smth/', something),
path('methods/', methods_demo),
path('raises/', raises_exception),
path('creategame/', create_game),
]
| 33.571429
| 106
| 0.676596
|
from django.urls import path
from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, \
raises_exception, create_game
urlpatterns = [
path('', index_view, name='index'),
path('2/', UsersListView.as_view()),
path('games/', GamesListView.as_view()),
path('smth/', something),
path('methods/', methods_demo),
path('raises/', raises_exception),
path('creategame/', create_game),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0
|