hexsha
stringlengths
40
40
size
int64
6
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.53
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
6
1.04M
filtered:remove_non_ascii
int64
0
538k
filtered:remove_decorators
int64
0
917k
filtered:remove_async
int64
0
722k
filtered:remove_classes
int64
-45
1M
filtered:remove_generators
int64
0
814k
filtered:remove_function_no_docstring
int64
-102
850k
filtered:remove_class_no_docstring
int64
-3
5.46k
filtered:remove_unused_imports
int64
-1,350
52.4k
filtered:remove_delete_markers
int64
0
59.6k
699f409bdd5d561bb93770f28b38f939f53fc421
5,483
py
Python
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
3_dataset_create.py
shivanirmishra/musicgenre
954214b6f7756c05de1253702811fd69dd99b0e2
[ "MIT" ]
null
null
null
from google.colab import drive drive.mount('/content/drive') import librosa import os import pandas as pd from numpy import mean import warnings; warnings.filterwarnings('ignore'); folders_5s = { 'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s', 'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s', 'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s', 'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s', 'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s' } folders_10s = { 'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s', 'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s', 'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s', 'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s', 'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s' } folders_20s = { 'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s', 'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s', 'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s', 'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s', 'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s' } label = { 'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4, 'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4, 'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4 } data_5s = [] data_10s = [] data_20s = [] for name, path in folders_5s.items(): #count_5s = 3000 for filename in os.listdir(path): # if(count_5s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_5s.append(songData) #count_5s -= 1 for name, path in folders_10s.items(): #count_10s = 1500 for filename in os.listdir(path): # if(count_10s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_10s.append(songData) #count_10s -= 1 for name, path in folders_20s.items(): #count_20s = 900 for filename in os.listdir(path): # if(count_20s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_20s.append(songData) #count_20s -= 1 data_5s = pd.DataFrame(data_5s) data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv') data_10s = pd.DataFrame(data_10s) data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv') data_20s = pd.DataFrame(data_20s) data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv') data_10s
28.857895
89
0.666423
from google.colab import drive drive.mount('/content/drive') import librosa import os import pandas as pd from numpy import mean import warnings; warnings.filterwarnings('ignore'); folders_5s = { 'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s', 'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s', 'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s', 'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s', 'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s' } folders_10s = { 'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s', 'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s', 'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s', 'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s', 'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s' } folders_20s = { 'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s', 'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s', 'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s', 'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s', 'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s' } label = { 'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4, 'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4, 'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4 } data_5s = [] data_10s = [] data_20s = [] for name, path in folders_5s.items(): #count_5s = 3000 for filename in os.listdir(path): # if(count_5s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_5s.append(songData) #count_5s -= 1 for name, path in folders_10s.items(): #count_10s = 1500 for filename in os.listdir(path): # if(count_10s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_10s.append(songData) #count_10s -= 1 for name, path in folders_20s.items(): #count_20s = 900 for filename in os.listdir(path): # if(count_20s == 0): # break songData = [] songname = f'{path}/{filename}' y, sr = librosa.load(songname, mono=True) tempo, beats = librosa.beat.beat_track(y=y, sr=sr) songData.append(tempo) songData.append(mean(beats)) chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr) songData.append(mean(chroma_stft)) rmse = librosa.feature.rmse(y=y) songData.append(mean(rmse)) spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr) songData.append(mean(spec_cent)) spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr) songData.append(mean(spec_bw)) rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr) songData.append(mean(rolloff)) zcr = librosa.feature.zero_crossing_rate(y) songData.append(mean(zcr)) mfcc = librosa.feature.mfcc(y=y, sr=sr) for i in mfcc: songData.append(mean(i)) songData.append(label[name]) data_20s.append(songData) #count_20s -= 1 data_5s = pd.DataFrame(data_5s) data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv') data_10s = pd.DataFrame(data_10s) data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv') data_20s = pd.DataFrame(data_20s) data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv') data_10s
0
0
0
0
0
0
0
0
0
412a8b42be8c6054311e076c95465833bdd45355
1,206
py
Python
data/train_test_split.py
ttaoREtw/A-Pytorch-Implementation-of-Tacotron-End-to-end-Text-to-speech-Deep-Learning-Model
6b0f615cafb0530370631a880aac5736fe9a2c64
[ "MIT" ]
105
2018-09-13T02:45:10.000Z
2021-06-24T03:31:15.000Z
data/train_test_split.py
henryhenrychen/Tacotron-pytorch
4a4d1ea0d83fd88a50464999f5d55fe012c86687
[ "MIT" ]
9
2018-12-11T02:37:58.000Z
2021-03-18T02:42:40.000Z
data/train_test_split.py
henryhenrychen/Tacotron-pytorch
4a4d1ea0d83fd88a50464999f5d55fe012c86687
[ "MIT" ]
31
2018-09-15T14:51:31.000Z
2021-01-19T07:37:14.000Z
import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description='Split the data') parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True) parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False) args = parser.parse_args() split_and_save(args)
31.736842
114
0.662521
import os import argparse import random def split_and_save(args): meta_all_path = args.meta_all meta_dir = os.path.dirname(os.path.realpath(meta_all_path)) meta_tr_path = os.path.join(meta_dir, 'meta_train.txt') meta_te_path = os.path.join(meta_dir, 'meta_test.txt') with open(meta_all_path) as f: meta_all = f.readlines() meta_tr = [] meta_te = [] n_meta = len(meta_all) n_test = int(args.ratio_test * n_meta) indice_te = random.sample(range(n_meta), n_test) for idx, line in enumerate(meta_all): if idx in indice_te: meta_te.append(line) else: meta_tr.append(line) with open(meta_tr_path, 'w') as ftr: ftr.write(''.join(meta_tr)) with open(meta_te_path, 'w') as fte: fte.write(''.join(meta_te)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Split the data') parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True) parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False) args = parser.parse_args() split_and_save(args)
0
0
0
0
0
762
0
-20
67
4d36c18720eb25777d76206398891b1da5c803d3
10,711
py
Python
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
item_sets.py
jay-maity/RecommendPCY
040eda27be46d241406d3cb8ce6605dde492fef9
[ "MIT" ]
null
null
null
""" Frequent item discovery by PCY algorithm""" import sys cluster = None session = None def main(): """ Handles parameters for the file to run :return: """ input_path = sys.argv[1] output_path = sys.argv[2] support_thresold = int(sys.argv[3]) broadcast = 1 if len(sys.argv) > 4: broadcast = int(sys.argv[4]) pcy = PCYFrequentItems(is_debug=True) if broadcast == 1: is_broadcast = True else: is_broadcast = False pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast) if __name__ == "__main__": main()
34.220447
98
0.527588
""" Frequent item discovery by PCY algorithm""" import operator import json import sys from pyspark import SparkContext, SparkConf import pyspark_cassandra from cassandra.cluster import Cluster cluster = None session = None class PCYFrequentItems: """ Find Frequent item list using PCY algorithm """ IS_DEBUGGING = False config_object = None def __init__(self, is_debug, config_file="config.json"): """ Sets the initial debiggin parameter :param is_debug: Print collect messages if set true """ self.IS_DEBUGGING = is_debug json_data = open(config_file).read() self.config_object = json.loads(json_data) @staticmethod def group_items(basket, group_size): """ Get item_groups from a basket Returns sorted items by their numerical number :param basket: Basket to search the item_group from (could be a single cart) :param group_size: Size of the item_group to form :return: """ assert (group_size >= 1 and isinstance(group_size, int)), \ "Please use group size as Integer and > 0" # In case of group size is one simply return each items if group_size == 1: return [(item,) for item in basket] item_groups = [] if len(basket) >= group_size: # Sort the basket basket = sorted(basket) # Loop through the basket for i in range(len(basket) - group_size + 1): # Gets the base and add all items for each group # until end # If base is [2,3] and basket is [2,3,4,5] # then creates [2,3,4], [2,3,5] base_item_count = i + (group_size - 1) base_items = basket[i:base_item_count] for item in basket[base_item_count:]: item_groups.append(tuple(base_items) + (item,)) return item_groups @staticmethod def map_nodes(line): """ Map line into graph node key = value as array """ key_values = line.split(":") # key = int(key_values[0]) values = [] if key_values[1].strip() != "": values = [int(node) for node in key_values[1].strip().split(' ')] return values @staticmethod def filter_pairs(pair, hosts, keyspace, hashfunction, item_table, bitmap_table): """ Filter pairs by querying from cassandra table :return: """ global cluster, session if cluster is None: cluster = Cluster(hosts) session = cluster.connect(keyspace) item1 = session.execute("select item from " + item_table + " where item = %d" % pair[0]) item2 = session.execute("select item from " + item_table + " where item = %d" % pair[1]) bitmap = session.execute("select hash from " + bitmap_table + " where hash = %d" % hashfunction(pair)) print("Pair checked " + str(pair[0])) return item1 and item2 and bitmap @staticmethod def filter_pairs_broadcast(pair, freq_pair, bitmap, hashfunction): """ Filter pairs from broadcast variables :return: """ return pair[0] in freq_pair and pair[1] in freq_pair and hashfunction(pair) in bitmap def pcy_freq_items(self, item_group_rdd, hash_function, support_count): """ Get Frequent items for a particular group of items :param item_group_rdd: :param passno: :param hash_function: :param support_count: :return: """ # Hash and Items mapping order_prod_hash = item_group_rdd \ .map(lambda x: (hash_function(x), 1)) # Group, filter and get unique item sets frequent_items = order_prod_hash.reduceByKey(operator.add) \ .filter(lambda x: x[1] > support_count) \ .map(lambda x: x[0]) return frequent_items def pcy_pass(self, order_prod, pass_no, support_count, hashn, hashnplus1, is_nplus1_cache=False): """ Calculates frequent items and bitmap after n th pass :param order_prod: :param pass_no: :param support_count: :param hashn: :param hashnplus1: :param is_nplus1_cache: :return: """ item_set_count = pass_no order_prod_single = order_prod. \ flatMap(lambda x: PCYFrequentItems. group_items(x, item_set_count)) frequent_items_n = self.pcy_freq_items(order_prod_single, hashn, support_count) item_set_count += 1 order_prod_pairs = order_prod. \ flatMap(lambda x: PCYFrequentItems.group_items(x, item_set_count)) if is_nplus1_cache: order_prod_pairs = order_prod_pairs.cache() bitmap_nplus1 = self.pcy_freq_items(order_prod_pairs, hashnplus1, support_count) return frequent_items_n, bitmap_nplus1, order_prod_pairs @staticmethod def pair_bitmap(items): """ Hash function for calculation for pairs :param items: :return: """ mul = 1 for item in items: mul *= ((2 * item) + 1) return mul % 999917 @staticmethod def single(items): """ Hash function for calculation :param items: :return: """ mul = 1 for item in items: mul *= item return mul % 100000000 def configure(self): """ Configure spark and cassandra objects :param is_local_host: :return: """ # Spark Configuration conf = SparkConf().setAppName('Frequent Item Sets'). \ set('spark.cassandra.connection.host', ','.join(self.config_object["CassandraHosts"])) return SparkContext(conf=conf) def frequent_items(self, inputs, output, support_count, is_broadcast=True): """Output correlation coefficient without mean formula Args: inputs:Input file location output:Output file location support_count: is_broadcast: Item pair will be found using broadcast or not """ spark_context = self.configure() # File loading text = spark_context.textFile(inputs) order_prod = text.map(PCYFrequentItems.map_nodes).cache() pass_no = 1 freq_items, bitmap, all_pairs = self.pcy_pass(order_prod, pass_no, support_count, PCYFrequentItems.single, PCYFrequentItems.pair_bitmap, is_nplus1_cache=True) if self.IS_DEBUGGING: print("Frequent " + str(pass_no) + "-group items after pass:" + str(pass_no)) print(freq_items.collect()) print("Bitmap for " + str(pass_no + 1) + "-group items after pass:" + str(pass_no)) print(bitmap.collect()) # System will use broadcast based on user input if is_broadcast: bitmap_set = set(bitmap.collect()) freq_items_set = set(freq_items.collect()) bitmap_broadast = spark_context.broadcast(bitmap_set) freq_items_set = spark_context.broadcast(freq_items_set) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs_broadcast(x, freq_items_set.value, bitmap_broadast.value, PCYFrequentItems.pair_bitmap )) else: # Making freq items Ready to save to cassandra freq_items = freq_items.map(lambda x: {'item': x}) freq_items.saveToCassandra(self.config_object["KeySpace"], self.config_object["Item1Table"]) # Making bitmap Ready to save to cassandra bitmap = bitmap.map(lambda x: {'hash': x}) bitmap.saveToCassandra(self.config_object["KeySpace"], self.config_object["Bitmap2Table"]) print(all_pairs.count()) frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems. filter_pairs(x, self.config_object["CassandraHosts"], self.config_object["KeySpace"], PCYFrequentItems.pair_bitmap, self.config_object["Item1Table"], self.config_object["Bitmap2Table"])) if self.IS_DEBUGGING: print(all_pairs.collect()) print(frequent_pairs.collect()) # Saves as text file frequent_pairs.saveAsTextFile(output) frequent_pairs = frequent_pairs.\ map(lambda x: {'productid1': x[0], 'productid2': x[1]}) # Save final output to cassandra frequent_pairs.saveToCassandra(self.config_object["KeySpace"], self.config_object["RecommendTable"]) all_pairs.unpersist() order_prod.unpersist() def main(): """ Handles parameters for the file to run :return: """ input_path = sys.argv[1] output_path = sys.argv[2] support_thresold = int(sys.argv[3]) broadcast = 1 if len(sys.argv) > 4: broadcast = int(sys.argv[4]) pcy = PCYFrequentItems(is_debug=True) if broadcast == 1: is_broadcast = True else: is_broadcast = False pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast) if __name__ == "__main__": main()
0
3,197
0
6,742
0
0
0
25
134
67a731ca62e5cbd2844ce988950efc73fd0d3ec6
5,201
pyw
Python
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
pncShell.pyw
BobBaylor/pnc
11b5a08a1fce5c605a203c4e46c9d9599024ad3c
[ "MIT" ]
null
null
null
""" A wrapper around my pnc.py module """ # app = MyApp(redirect=True) app = MyApp() #pylint: disable=invalid-name app.MainLoop()
38.242647
153
0.586233
""" A wrapper around my pnc.py module """ import os.path import wx import wx.lib.filebrowsebutton as filebrowse import pnc class MyFrame(wx.Frame): """ This is MyFrame. It just shows a few controls on a wxPanel, and has a simple menu. Use this file inFileBtn Write this root name TextEntry and starting number TextEntry To here outDirRootButton Optional subdirectory TextEntry Move the input file there, too CheckBox """ def __init__(self, parent, title): wide = 860 wx.Frame.__init__(self, parent, wx.ID_ANY, title, pos=(150, 150), size=(wide, 270)) # make a minimalist menu bar self.CreateStatusBar() menu_bar = wx.MenuBar() menu1 = wx.Menu() menu1.Append(101, '&Close', 'Close this frame') self.SetMenuBar(menu_bar) self.Bind(wx.EVT_MENU, self.Close, id=101) # Now create the Panel to put the other controls on. self.panel = wx.Panel(self, wx.ID_ANY) # Use a sizer to layout the controls, stacked vertically and with # a 6 pixel border around each space = 6 sflags = wx.ALL sizer = wx.BoxSizer(wx.VERTICAL) # x = self # sizer.Add(self.panel, wx.EXPAND ) # and a few controls text = wx.StaticText(self, -1, "Browse to the .pnc file, choose a root and folder name, and press Do It!") #pylint: disable=line-too-long text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD)) text.SetSize(text.GetBestSize()) sizer.Add(text, 0, sflags, space) self.btn_infile = filebrowse.FileBrowseButton(self, -1, size=(wide-10, -1), changeCallback=self.cback_infile, labelText='Use this PNC file') self.btn_infile.SetValue('/Users/guy/Downloads/JpegData.PNC') sizer.Add(self.btn_infile, 0, sflags, space) self.file_browse_root = filebrowse.DirBrowseButton(self, -1, size=(wide-10, -1), changeCallback=self.cback_file_root, #pylint: disable=line-too-long labelText='To put JPG files here') # self.file_browse_root.SetValue( '/Users/guy/Pictures' ) self.file_browse_root.SetValue('/Users/guy/python/test') # self.file_browse_root.callCallback = False sizer.Add(self.file_browse_root, 0, sflags, space) # file name root and starting number hsizer = wx.BoxSizer(wx.HORIZONTAL) hsizer.Add(wx.StaticText(self, -1, "Optional new dir:"), 0, sflags, space) self.tc_out_dir = wx.TextCtrl(self, -1, '') hsizer.Add(self.tc_out_dir, 0, sflags, space) hsizer.Add(wx.StaticText(self, -1, "Filename root:"), 0, sflags, space) self.tc_out_fname = wx.TextCtrl(self, -1, 'gcam') hsizer.Add(self.tc_out_fname, 0, sflags, space) # hsizer.Add(wx.StaticText(self, -1, "File number start:"), 0, sflags, space) sizer.Add(hsizer, 0, sflags, space) self.cb_move_file = wx.CheckBox(self, -1, 'Move Input file, too') sizer.Add(self.cb_move_file, 0, sflags, space) # bind the button events to handlers hsizer2 = wx.BoxSizer(wx.HORIZONTAL) funbtn = wx.Button(self, -1, "Do it") self.Bind(wx.EVT_BUTTON, self.evh_doit, funbtn) hsizer2.Add(funbtn, 0, sflags, space) btn = wx.Button(self, -1, "Close") self.Bind(wx.EVT_BUTTON, self.evh_close, btn) hsizer2.Add(btn, 0, sflags, space) sizer.Add(hsizer2, 0, sflags, space) self.SetSizer(sizer) def evh_close(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.Close() def evh_doit(self, evt): #pylint: disable=unused-argument """Event handler for the button click.""" self.SetStatusText('working...') print '' out_dir = self.file_browse_root.GetValue() out_new_dir = self.tc_out_dir.GetValue() out_dir = os.path.join(out_dir, out_new_dir) b_success = pnc.get_photos(self.btn_infile.GetValue(), out_dir, self.tc_out_fname.GetValue(), self.cb_move_file.GetValue()) if b_success: self.SetStatusText('Done!') else: self.SetStatusText('Failed') def cback_infile(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass def cback_file_root(self, evt): #pylint: disable=unused-argument """ dummy callback """ pass class MyApp(wx.App): """ a simple GUI """ def OnInit(self): #pylint: disable=invalid-name """ let's get this party started """ frame = MyFrame(None, "Panasonic .PNC to .JPG converter") self.SetTopWindow(frame) frame.Show(True) return True # app = MyApp(redirect=True) app = MyApp() #pylint: disable=invalid-name app.MainLoop()
0
0
0
4,927
0
0
0
-7
137
f7357be79ed5cf787004c67c6e35b3966042133a
659
py
Python
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
3
2022-01-09T02:40:31.000Z
2022-02-01T03:57:40.000Z
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
null
null
null
ouch_server.py
jahinzee/theouchteam
870767cae81ad37b4191ded64c3e83eb48be982a
[ "MIT" ]
1
2022-01-21T08:05:27.000Z
2022-01-21T08:05:27.000Z
import sys from src.Exchange import Exchange if __name__ == "__main__": exchange = None if len(sys.argv) == 2: if sys.argv[1] == "debug": # Exchange outputs using debug mode. exchange = Exchange(debug="dump") elif sys.argv[1] == "none": # Exchange won't output anything. exchange = Exchange(debug="none") else: raise Exception("Command line argument should be either 'dump' or 'none'") else: exchange = Exchange() exchange.open_exchange() input() # Pressing the enter key will cause the server process to terminate. exchange.close_exchange()
32.95
86
0.608498
import sys from src.Exchange import Exchange if __name__ == "__main__": exchange = None if len(sys.argv) == 2: if sys.argv[1] == "debug": # Exchange outputs using debug mode. exchange = Exchange(debug="dump") elif sys.argv[1] == "none": # Exchange won't output anything. exchange = Exchange(debug="none") else: raise Exception("Command line argument should be either 'dump' or 'none'") else: exchange = Exchange() exchange.open_exchange() input() # Pressing the enter key will cause the server process to terminate. exchange.close_exchange()
0
0
0
0
0
0
0
0
0
a3dcdb967f844c2c93436cc07445e0c92c4d3a7d
99
py
Python
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
4
2015-06-22T15:44:57.000Z
2015-06-22T15:57:03.000Z
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
null
null
null
server_prod.py
techx/evolution-chamber
dea9b7d563df6f06d270078f5c512e3f7e367a92
[ "MIT" ]
2
2015-07-09T15:21:37.000Z
2016-02-02T15:59:09.000Z
import server if __name__ == "__main__": server.app.run(host='0.0.0.0',port=5000,debug=False)
19.8
56
0.686869
import server if __name__ == "__main__": server.app.run(host='0.0.0.0',port=5000,debug=False)
0
0
0
0
0
0
0
0
0
da72584d02e46192004671f6611a889c0dd3c753
2,533
py
Python
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
6
2019-12-02T16:11:24.000Z
2022-03-18T10:02:02.000Z
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
1,696
2019-10-31T14:08:37.000Z
2022-03-29T12:35:57.000Z
datahub/email_ingestion/emails.py
Staberinde/data-hub-api
3d0467dbceaf62a47158eea412a3dba827073300
[ "MIT" ]
9
2019-11-22T12:42:03.000Z
2021-09-03T14:25:05.000Z
from logging import getLogger import mailparser from datahub.documents import utils as documents from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor logger = getLogger(__name__) BUCKET_ID = 'mailbox' def process_ingestion_emails(): """ Gets all new mail documents in the bucket and process each message. """ processor = CalendarInteractionEmailProcessor() for message in get_mail_docs_in_bucket(): source = message['source'] try: documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source']) except Exception as e: logger.exception('Error deleting message: "%s", error: "%s"', source, e) continue try: email = mailparser.parse_from_bytes(message['content']) processed, reason = processor.process_email(message=email) if not processed: logger.error('Error parsing message: "%s", error: "%s"', source, reason) else: logger.info(reason) except Exception as e: logger.exception('Error processing message: "%s", error: "%s"', source, e) logger.info( 'Successfully processed message "%s" and deleted document from bucket "%s"', source, BUCKET_ID, )
33.773333
95
0.649428
import tempfile from logging import getLogger import mailparser from django.conf import settings from django.core.exceptions import ImproperlyConfigured from datahub.documents import utils as documents from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor logger = getLogger(__name__) BUCKET_ID = 'mailbox' def get_mail_docs_in_bucket(): """ Gets all mail documents in the bucket. """ if BUCKET_ID not in settings.DOCUMENT_BUCKETS: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" is missing in settings') config = settings.DOCUMENT_BUCKETS[BUCKET_ID] if 'bucket' not in config: raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" not configured properly in settings') name = config['bucket'] if not name: raise ImproperlyConfigured( f'Bucket "{BUCKET_ID}" bucket value not configured properly in settings', ) client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID) paginator = client.get_paginator('list_objects') for page in paginator.paginate(Bucket=name): for doc in page.get('Contents') or []: key = doc['Key'] with tempfile.TemporaryFile(mode='w+b') as f: client.download_fileobj(Bucket=name, Key=key, Fileobj=f) f.seek(0) content = f.read() yield {'source': key, 'content': content} def process_ingestion_emails(): """ Gets all new mail documents in the bucket and process each message. """ processor = CalendarInteractionEmailProcessor() for message in get_mail_docs_in_bucket(): source = message['source'] try: documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source']) except Exception as e: logger.exception('Error deleting message: "%s", error: "%s"', source, e) continue try: email = mailparser.parse_from_bytes(message['content']) processed, reason = processor.process_email(message=email) if not processed: logger.error('Error parsing message: "%s", error: "%s"', source, reason) else: logger.info(reason) except Exception as e: logger.exception('Error processing message: "%s", error: "%s"', source, e) logger.info( 'Successfully processed message "%s" and deleted document from bucket "%s"', source, BUCKET_ID, )
0
0
0
0
1,053
0
0
39
89
0af7288a9052da637b85d240b67185965f20ec48
1,105
py
Python
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
1
2018-01-21T21:15:52.000Z
2018-01-21T21:15:52.000Z
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
null
null
null
classes/rooms.py
Loekring/Neversoft
a9e600131585741652b62b2dbbaa2febc1656843
[ "MIT" ]
null
null
null
offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du mtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."] roomSizeX, roomSizeY = 2, 1
31.571429
119
0.6181
import random as r offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du møtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."] roomSizeX, roomSizeY = 2, 1 class Rooms: # Dette er baseklassa til allle romma def __init__(self, name, smell, feel, taste, look, sound, jump): self.name = name self.smell = smell self.feel = feel self.taste = taste self.look = look self.sound = sound self.jump = jump def __str__(self): return "Du er no i {}.".format(self.name) def Roomsmell(self): return "Rommet luktar {}.".format(self.smell) def Roomfeel(self): return "Du kjenner {}.".format(self.feel) def Roomtaste(self): return "Du sleikjer rundt om i rommet og kjenner smaken av {}.".format(self.taste) def Roomlook(self): return "Du ser rundt i rommet og ser {}.".format(self.look) def Roomsound(self): return "Om du er heilt stille kan du høre lyden av {}.".format(self.sound) def Roomjump(self): return "Du hoppar opp og {}.".format(self.jump)
4
0
0
910
0
0
0
-3
45
9c6cb2f62249c9249426fed5a021326cf26ae2cd
3,970
py
Python
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
6
2020-03-06T16:13:08.000Z
2022-03-09T07:53:34.000Z
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
1
2021-10-02T02:23:08.000Z
2021-11-08T13:29:37.000Z
pymatflow/vasp/scripts/vasp-dfpt.py
DeqiTang/pymatflow
bd8776feb40ecef0e6704ee898d9f42ded3b0186
[ "MIT" ]
1
2021-07-10T16:28:14.000Z
2021-07-10T16:28:14.000Z
#!/usr/bin/env python # _*_ coding: utf-8 _*_ import argparse from pymatflow.vasp.dfpt import dfpt_run """ usage: """ params = {} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static", help="directory of the static running") parser.add_argument("-f", "--file", type=str, help="the xyz file name") parser.add_argument("--runopt", type=str, default="gen", help="gen, run, or genrun") parser.add_argument("--auto", type=int, default=3, help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf") parser.add_argument("--mode", type=int, default=0, choices=[0, 1], help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory") # -------------------------------------------------------- # INCAR PARAMETERS # -------------------------------------------------------- parser.add_argument("--prec", type=str, default="Normal", choices=["Normal", "Accurate", "A", "N"], help="PREC, default value: Normal") parser.add_argument("--encut", type=int, default=300, help="ENCUT, default value: 300 eV") parser.add_argument("--ediff", type=float, default=1.0e-4, help="EDIFF, default value: 1.0e-4") parser.add_argument("--kpoints-mp", type=int, nargs="+", default=[1, 1, 1, 0, 0, 0], help="set kpoints like -k 1 1 1 0 0 0") parser.add_argument("--ismear", type=int, default=0, help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0") parser.add_argument("--sigma", type=float, default=0.01, help="determines the width of the smearing in eV.") # ----------------------------------------------------------------- # ---------------------- # properties parametes # --------------------- #parser.add_argument("--lorbit", help="", type=int, default=None) #parser.add_argument("--loptics", help="", type=str, default="FALSE") # ----------------------------------------------------------------- # run params # ----------------------------------------------------------------- parser.add_argument("--mpi", type=str, default="", help="MPI command") parser.add_argument("--server", type=str, default="pbs", choices=["pbs", "yh", "lsf_sz"], help="type of remote server, can be pbs or yh or lsf_sz") parser.add_argument("--jobname", type=str, default="vasp-dfpt", help="jobname on the pbs server") parser.add_argument("--nodes", type=int, default=1, help="Nodes used in server") parser.add_argument("--ppn", type=int, default=32, help="ppn of the server") # ========================================================== # transfer parameters from the arg parser to static_run setting # ========================================================== args = parser.parse_args() params["PREC"] = args.prec params["ENCUT"] = args.encut params["EDIFF"] = args.ediff params["ISMEAR"] = args.ismear params["SIGMA"] = args.sigma task = dfpt_run() task.get_xyz(args.file) task.set_params(params=params) task.set_kpoints(kpoints_mp=args.kpoints_mp) task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn) task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
36.090909
251
0.522418
#!/usr/bin/env python # _*_ coding: utf-8 _*_ import os import argparse from pymatflow.vasp.dfpt import dfpt_run """ usage: """ params = {} if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static", help="directory of the static running") parser.add_argument("-f", "--file", type=str, help="the xyz file name") parser.add_argument("--runopt", type=str, default="gen", help="gen, run, or genrun") parser.add_argument("--auto", type=int, default=3, help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf") parser.add_argument("--mode", type=int, default=0, choices=[0, 1], help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory") # -------------------------------------------------------- # INCAR PARAMETERS # -------------------------------------------------------- parser.add_argument("--prec", type=str, default="Normal", choices=["Normal", "Accurate", "A", "N"], help="PREC, default value: Normal") parser.add_argument("--encut", type=int, default=300, help="ENCUT, default value: 300 eV") parser.add_argument("--ediff", type=float, default=1.0e-4, help="EDIFF, default value: 1.0e-4") parser.add_argument("--kpoints-mp", type=int, nargs="+", default=[1, 1, 1, 0, 0, 0], help="set kpoints like -k 1 1 1 0 0 0") parser.add_argument("--ismear", type=int, default=0, help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0") parser.add_argument("--sigma", type=float, default=0.01, help="determines the width of the smearing in eV.") # ----------------------------------------------------------------- # ---------------------- # properties parametes # --------------------- #parser.add_argument("--lorbit", help="", type=int, default=None) #parser.add_argument("--loptics", help="", type=str, default="FALSE") # ----------------------------------------------------------------- # run params # ----------------------------------------------------------------- parser.add_argument("--mpi", type=str, default="", help="MPI command") parser.add_argument("--server", type=str, default="pbs", choices=["pbs", "yh", "lsf_sz"], help="type of remote server, can be pbs or yh or lsf_sz") parser.add_argument("--jobname", type=str, default="vasp-dfpt", help="jobname on the pbs server") parser.add_argument("--nodes", type=int, default=1, help="Nodes used in server") parser.add_argument("--ppn", type=int, default=32, help="ppn of the server") # ========================================================== # transfer parameters from the arg parser to static_run setting # ========================================================== args = parser.parse_args() params["PREC"] = args.prec params["ENCUT"] = args.encut params["EDIFF"] = args.ediff params["ISMEAR"] = args.ismear params["SIGMA"] = args.sigma task = dfpt_run() task.get_xyz(args.file) task.set_params(params=params) task.set_kpoints(kpoints_mp=args.kpoints_mp) task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn) task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
0
0
0
0
0
0
0
-12
25
9d62cac37a74dba044cd1a53d16dc1255a546ab1
260
py
Python
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
python5.py
audstanley/nodePythonProcessWatcher
cf3b707af81c837b99c5b2d955cf0d718e286e81
[ "MIT" ]
null
null
null
from python5_unixSocket import interComs myInterComs = interComs() myInterComs.run() import sys from time import sleep while True: print("MESSAGES FROM PYTHON 5") sys.stdout.flush() myInterComs.send( {"wordDawg": "from python5"} ) sleep(0.500)
23.636364
52
0.726923
from python5_unixSocket import interComs myInterComs = interComs() myInterComs.run() import sys from time import sleep while True: print("MESSAGES FROM PYTHON 5") sys.stdout.flush() myInterComs.send( {"wordDawg": "from python5"} ) sleep(0.500)
0
0
0
0
0
0
0
0
0
2c4146e35515d5d11823006c020a481717320a31
1,909
py
Python
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py
jmcouffin/revitron-ui
f67739488b504cdb0cabe36e088a40fe3cd2b282
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button. """ import revitron import System.Windows from rpw.ui.forms import FlexForm, TextBox, Button, Label if not revitron.Document().isFamily(): config = revitron.DocumentConfigStorage().get('rpm.extensions') components = [ Label('You can define a list of pyRevit extensions to be used with the currently active model.\n' 'That list will be stored in the project information and therefore can be easily distributed\n' 'among other team members to easly create a common work environment.\n' 'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n' 'Enter one extension per line providing the type of the extension ("ui" or "lib")\n' 'and the repository URL separated by a TAB as follows:', FontSize=14, Height=140, Width=650), Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git', FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14, Height=50, Width=650), TextBox('extensions', Text=config, TextWrapping=System.Windows.TextWrapping.Wrap, AcceptsTab=True, AcceptsReturn=True, Multiline=True, Height=200, Width=650, FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14), Button('Open Documentation', on_click=openHelp, Width=650), Button('Save', Width=650) ] form = FlexForm('Project Extensions', components) form.show() if 'extensions' in form.values: revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
37.431373
130
0.707177
# -*- coding: utf-8 -*- """ Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button. """ import revitron import System.Windows from pyrevit import script from rpw.ui.forms import FlexForm, TextBox, Button, Label def openHelp(sender, e): script.open_url('https://revitron-ui.readthedocs.io/en/latest/tools/rpm.html') if not revitron.Document().isFamily(): config = revitron.DocumentConfigStorage().get('rpm.extensions') components = [ Label('You can define a list of pyRevit extensions to be used with the currently active model.\n' 'That list will be stored in the project information and therefore can be easily distributed\n' 'among other team members to easly create a common work environment.\n' 'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n' 'Enter one extension per line providing the type of the extension ("ui" or "lib")\n' 'and the repository URL separated by a TAB as follows:', FontSize=14, Height=140, Width=650), Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git', FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14, Height=50, Width=650), TextBox('extensions', Text=config, TextWrapping=System.Windows.TextWrapping.Wrap, AcceptsTab=True, AcceptsReturn=True, Multiline=True, Height=200, Width=650, FontFamily=System.Windows.Media.FontFamily('Consolas'), FontSize=14), Button('Open Documentation', on_click=openHelp, Width=650), Button('Save', Width=650) ] form = FlexForm('Project Extensions', components) form.show() if 'extensions' in form.values: revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
0
0
0
0
0
83
0
5
45
d570100b492c0df602a33bf7fd31f800015b364c
3,653
py
Python
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
1
2019-07-27T00:32:02.000Z
2019-07-27T00:32:02.000Z
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
null
null
null
src/features/spectrum.py
vikigenius/neural_speaker_identification
a723290808d748daf65163b71aef2c5376319db3
[ "MIT" ]
1
2019-07-27T00:32:06.000Z
2019-07-27T00:32:06.000Z
#!/usr/bin/env python import logging logger = logging.getLogger(__name__)
30.957627
79
0.595401
#!/usr/bin/env python import logging import numpy as np import librosa import scipy from random import randint from src.utils.math_utils import nextpow2 logger = logging.getLogger(__name__) class Spectrum(object): def __init__(self, hparams): self.sample_freq = hparams.sample_freq self.duration = hparams.duration self.preprocess = hparams.preprocess self.Tw = hparams.window_size self.Ts = hparams.window_shift self.win_type = hparams.window_type if self.sample_freq == 16000: self.dc_alpha = 0.99 elif self.sample_freq == 8000: self.dc_alpha = 0.999 else: raise ValueError('Only 16 and 8Khz supported') self.pe_alpha = 0.97 def _sample(self, signal, seqlen: int): """ Helper function to sample a contiguos subsequence of length seqlen from signal Args: signal: numpy.ndarray, the signal seqlen: int, the sequence length Returns: numpy.ndarray, the sampled signal """ nframes = len(signal) roffset = randint(0, nframes - seqlen) sampled = signal[roffset:roffset+seqlen] return sampled def _get_resampled_chunks(self, afile: str): """ Takes in a string path afile and returns chunks of audio each representing a 16-bit mono channel with sampling rate = 16000 Args: afile: path of audio file Returns: List[np.ndarray] """ # Load the file signal, _ = librosa.load(afile, sr=self.sample_freq, mono=True) nframes = len(signal) duration = nframes/self.sample_freq if duration <= self.duration: logger.warn(f'Duration less than specified for {afile}') chunks = [] if duration > 2*self.duration: # Can sample 2 chunks mid = int(nframes/2) chunks.append(signal[:mid]) chunks.append(signal[mid:]) else: chunks.append(signal) num_samples = int(self.duration*self.sample_freq) chunks = [self._sample(chunk, num_samples) for chunk in chunks] return chunks def _preprocess(self, signal): # Remove DC component and add a small dither signal = scipy.signal.lfilter([1, -1], [1, -self.dc_alpha], signal) dither = np.random.random_sample( signal.shape) + np.random.random_sample( signal.shape) - 1 spow = np.std(signal) signal = signal + 1e-6*spow*dither signal = scipy.signal.lfilter([1 - self.pe_alpha], 1, signal) return signal def generate(self, afile: str): """ Takes in a string path afile and returns a numpy nd array representing the magnitude spectrum of the signal Args: afile: path of audio file Returns: numpy.ndarray """ resampled_chunks = self._get_resampled_chunks(afile) if self.preprocess: processed = [self._preprocess(chunk) for chunk in resampled_chunks] else: processed = resampled_chunks # stft sf = self.sample_freq Tw = self.Tw # Window size Ts = self.Ts Nw = round(1e-3*Tw*sf) Ns = round(1e-3*Ts*sf) n_fft = 2**nextpow2(Nw) spectrograms = [librosa.core.stft( chunk, n_fft=n_fft, hop_length=Ns, win_length=Nw, window=self.win_type) for chunk in processed] mag_specs = [np.abs(chunk) for chunk in spectrograms] return mag_specs
0
0
0
3,437
0
0
0
6
133
01fd056ce41c1c67b73640a90525a86f7223ab98
51,070
py
Python
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
16
2018-10-12T16:33:52.000Z
2020-06-23T20:11:34.000Z
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
41
2018-10-14T21:28:38.000Z
2021-06-10T22:01:45.000Z
backend/grafit/migrations/0003_load_data.py
fossabot/grafit
c7328cc7ed4d37d36fc735944aa8763fad090d97
[ "MIT" ]
4
2018-10-28T10:47:26.000Z
2020-07-20T04:17:04.000Z
# Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators
100.928854
1,002
0.739495
# Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [ ('grafit', '0002_article'), ] operations = [ migrations.RunSQL(""" INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """), ]
159
0
0
50,790
0
0
0
16
89
ddfea5bd5d0e0cf8608cb0a07599e5e6b06f933e
494
py
Python
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
1
2020-04-13T00:16:16.000Z
2020-04-13T00:16:16.000Z
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py
juan1305/0.11-incremento_descremento
954ddb32180c3197e5b01cf95d20f5325ada8a29
[ "MIT" ]
null
null
null
import pandas as pd # Crear diccionario donde key sera columna a crear # y su valuela informacion de cada columna data = {'paises': ['Mexico', 'Espaa', 'Estados Unidos'], 'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'], 'Casos': [4291, 3829, 10283]} # Crear un DataFrame pasando el diccioario y # sealizar las columnas creadas df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos']) # Imprimir la info print(df) # Almacenar en archivo CSV df.to_csv('myDataFrame.csv')
27.444444
64
0.700405
import pandas as pd # Crear diccionario donde key sera columna a crear # y su valuela informacion de cada columna data = {'paises': ['Mexico', 'España', 'Estados Unidos'], 'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'], 'Casos': [4291, 3829, 10283]} # Crear un DataFrame pasando el diccioario y # señalizar las columnas creadas df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos']) # Imprimir la info print(df) # Almacenar en archivo CSV df.to_csv('myDataFrame.csv')
4
0
0
0
0
0
0
0
0
d3b313c3dd0ec4a73ea6c33bd5b776e0285a4fc6
30,581
py
Python
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
1
2021-09-25T12:49:37.000Z
2021-09-25T12:49:37.000Z
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
null
null
null
pxr/usd/usdLux/testenv/testUsdLuxLight.py
yurivict/USD
3b097e3ba8fabf1777a1256e241ea15df83f3065
[ "Apache-2.0" ]
1
2018-10-03T19:08:33.000Z
2018-10-03T19:08:33.000Z
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. from __future__ import print_function import unittest if __name__ == '__main__': unittest.main()
48.083333
82
0.632059
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. from __future__ import print_function from pxr import Gf, Sdf, Sdr, Tf, Usd, UsdGeom, UsdLux, UsdShade, Plug import unittest, math class TestUsdLuxLight(unittest.TestCase): def test_BlackbodySpectrum(self): warm_color = UsdLux.BlackbodyTemperatureAsRgb(1000) whitepoint = UsdLux.BlackbodyTemperatureAsRgb(6500) cool_color = UsdLux.BlackbodyTemperatureAsRgb(10000) # Whitepoint is ~= (1,1,1) assert Gf.IsClose(whitepoint, Gf.Vec3f(1.0), 0.1) # Warm has more red than green or blue assert warm_color[0] > warm_color[1] assert warm_color[0] > warm_color[2] # Cool has more blue than red or green assert cool_color[2] > cool_color[0] assert cool_color[2] > cool_color[1] def test_BasicConnectableLights(self): # Try checking connectableAPI on core lux types first before going # through the prim. self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.RectLight)) self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.PluginLightFilter)) stage = Usd.Stage.CreateInMemory() rectLight = UsdLux.RectLight.Define(stage, '/RectLight') self.assertTrue(rectLight) lightAPI = rectLight.LightAPI() self.assertTrue(lightAPI) self.assertTrue(lightAPI.ConnectableAPI()) # Rect light has the following built-in inputs attributes. inputNames = ['color', 'colorTemperature', 'diffuse', 'enableColorTemperature', 'exposure', 'height', 'intensity', 'normalize', 'specular', 'texture:file', 'width'] # GetInputs returns only authored inputs by default self.assertEqual(lightAPI.GetInputs(), []) # GetInputs(false) is a super-set of all the built-ins. # There could be other inputs coming from any auto applied APISchemas. allInputs = [inputName.GetBaseName() for inputName in lightAPI.GetInputs(onlyAuthored=False)] self.assertTrue(set(inputNames).issubset(set(allInputs))) # Verify each input's attribute is prefixed. for name in inputNames: self.assertEqual(lightAPI.GetInput(name).GetAttr().GetName(), "inputs:" + name) # Verify input attributes match the getter API attributes. self.assertEqual(lightAPI.GetInput('color').GetAttr(), rectLight.GetColorAttr()) self.assertEqual(lightAPI.GetInput('texture:file').GetAttr(), rectLight.GetTextureFileAttr()) # Create a new input, and verify that the input interface conforming # attribute is created. lightInput = lightAPI.CreateInput('newInput', Sdf.ValueTypeNames.Float) self.assertIn(lightInput, lightAPI.GetInputs()) # By default GetInputs() returns onlyAuthored inputs, of which # there is now 1. self.assertEqual(len(lightAPI.GetInputs()), 1) self.assertEqual(lightAPI.GetInput('newInput'), lightInput) self.assertEqual(lightInput.GetAttr(), lightAPI.GetPrim().GetAttribute("inputs:newInput")) # Rect light has no authored outputs. self.assertEqual(lightAPI.GetOutputs(), []) # Rect light has no built-in outputs, either. self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), []) # Create a new output, and verify that the output interface conforming # attribute is created. lightOutput = lightAPI.CreateOutput('newOutput', Sdf.ValueTypeNames.Float) self.assertEqual(lightAPI.GetOutputs(), [lightOutput]) self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [lightOutput]) self.assertEqual(lightAPI.GetOutput('newOutput'), lightOutput) self.assertEqual(lightOutput.GetAttr(), lightAPI.GetPrim().GetAttribute("outputs:newOutput")) # Do the same with a light filter lightFilter = UsdLux.LightFilter.Define(stage, '/LightFilter') self.assertTrue(lightFilter) self.assertTrue(lightFilter.ConnectableAPI()) # Light filter has no built-in inputs. self.assertEqual(lightFilter.GetInputs(), []) # Create a new input, and verify that the input interface conforming # attribute is created. filterInput = lightFilter.CreateInput('newInput', Sdf.ValueTypeNames.Float) self.assertEqual(lightFilter.GetInputs(), [filterInput]) self.assertEqual(lightFilter.GetInput('newInput'), filterInput) self.assertEqual(filterInput.GetAttr(), lightFilter.GetPrim().GetAttribute("inputs:newInput")) # Light filter has no built-in outputs. self.assertEqual(lightFilter.GetOutputs(), []) self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), []) # Create a new output, and verify that the output interface conforming # attribute is created. filterOutput = lightFilter.CreateOutput('newOutput', Sdf.ValueTypeNames.Float) self.assertEqual(lightFilter.GetOutputs(), [filterOutput]) self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), [filterOutput]) self.assertEqual(lightFilter.GetOutput('newOutput'), filterOutput) self.assertEqual(filterOutput.GetAttr(), lightFilter.GetPrim().GetAttribute("outputs:newOutput")) # Test the connection behavior customization. # Create a connectable prim with an output under the light. lightGraph = UsdShade.NodeGraph.Define(stage, '/RectLight/Prim') self.assertTrue(lightGraph) lightGraphOutput = lightGraph.CreateOutput( 'graphOut', Sdf.ValueTypeNames.Float) self.assertTrue(lightGraphOutput) # Create a connectable prim with an output under the light filter. filterGraph = UsdShade.NodeGraph.Define(stage, '/LightFilter/Prim') self.assertTrue(filterGraph) filterGraphOutput = filterGraph.CreateOutput( 'graphOut', Sdf.ValueTypeNames.Float) self.assertTrue(filterGraphOutput) # Light outputs can be connected. self.assertTrue(lightOutput.CanConnect(lightGraphOutput)) self.assertTrue(lightOutput.CanConnect(filterGraphOutput)) # Light inputs diverge from the default behavior and should be # connectable across its own scope (encapsulation is not required) self.assertTrue(lightInput.CanConnect(lightOutput)) self.assertTrue(lightInput.CanConnect(lightGraphOutput)) self.assertTrue(lightInput.CanConnect(filterGraphOutput)) # From the default behavior light filter outputs cannot be connected. self.assertFalse(filterOutput.CanConnect(lightGraphOutput)) self.assertFalse(filterOutput.CanConnect(filterGraphOutput)) # Light filters inputs diverge from the default behavior and should be # connectable across its own scope (encapsulation is not required) self.assertTrue(filterInput.CanConnect(filterOutput)) self.assertTrue(filterInput.CanConnect(filterGraphOutput)) self.assertTrue(filterInput.CanConnect(lightGraphOutput)) # The shaping API can add more connectable attributes to the light # and implements the same connectable interface functions. We test # those here. shapingAPI = UsdLux.ShapingAPI.Apply(lightAPI.GetPrim()) self.assertTrue(shapingAPI) self.assertTrue(shapingAPI.ConnectableAPI()) # Verify input attributes match the getter API attributes. self.assertEqual(shapingAPI.GetInput('shaping:cone:angle').GetAttr(), shapingAPI.GetShapingConeAngleAttr()) self.assertEqual(shapingAPI.GetInput('shaping:focus').GetAttr(), shapingAPI.GetShapingFocusAttr()) # These inputs have the same connectable behaviors as all light inputs, # i.e. they should also diverge from the default behavior of only be # connected to sources from immediate descendant (encapsultated) prims # of the light. shapingInput = shapingAPI.GetInput('shaping:focus') self.assertTrue(shapingInput.CanConnect(lightOutput)) self.assertTrue(shapingInput.CanConnect(lightGraphOutput)) self.assertTrue(shapingInput.CanConnect(filterGraphOutput)) # The shadow API can add more connectable attributes to the light # and implements the same connectable interface functions. We test # those here. shadowAPI = UsdLux.ShadowAPI.Apply(lightAPI.GetPrim()) self.assertTrue(shadowAPI) self.assertTrue(shadowAPI.ConnectableAPI()) # Verify input attributes match the getter API attributes. self.assertEqual(shadowAPI.GetInput('shadow:color').GetAttr(), shadowAPI.GetShadowColorAttr()) self.assertEqual(shadowAPI.GetInput('shadow:distance').GetAttr(), shadowAPI.GetShadowDistanceAttr()) # These inputs have the same connectable behaviors as all light inputs, # i.e. they should also diverge from the default behavior of only be # connected to sources from immediate descendant (encapsultated) prims # of the light. shadowInput = shadowAPI.GetInput('shadow:color') self.assertTrue(shadowInput.CanConnect(lightOutput)) self.assertTrue(shadowInput.CanConnect(lightGraphOutput)) self.assertTrue(shadowInput.CanConnect(filterGraphOutput)) # Even though the shadow and shaping API schemas provide connectable # attributes and an interface for the ConnectableAPI, the typed schema # of the prim is still what provides its connectable behavior. Here # we verify that applying these APIs to a prim whose type is not # connectable does NOT cause the prim to conform to the Connectable API. nonConnectablePrim = stage.DefinePrim("/Sphere", "Sphere") shadowAPI = UsdLux.ShadowAPI.Apply(nonConnectablePrim) self.assertTrue(shadowAPI) self.assertFalse(shadowAPI.ConnectableAPI()) shapingAPI = UsdLux.ShapingAPI.Apply(nonConnectablePrim) self.assertTrue(shapingAPI) self.assertFalse(shapingAPI.ConnectableAPI()) def test_DomeLight_OrientToStageUpAxis(self): stage = Usd.Stage.CreateInMemory() # Try Y-up first. Explicitly set this to override any site-level # override. UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) # Create a dome. light = UsdLux.DomeLight.Define(stage, '/dome') # No Xform ops to begin with. self.assertEqual(light.GetOrderedXformOps(), []) # Align to up axis. light.OrientToStageUpAxis() # Since the stage is already Y-up, no additional xform op was required. self.assertEqual(light.GetOrderedXformOps(), []) # Now change the stage to Z-up and re-align the dome. UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z) light.OrientToStageUpAxis() # That should require a +90 deg rotate on X. ops = light.GetOrderedXformOps() self.assertEqual(len(ops), 1) self.assertEqual(ops[0].GetBaseName(), UsdLux.Tokens.orientToStageUpAxis) self.assertEqual(ops[0].GetOpType(), UsdGeom.XformOp.TypeRotateX) self.assertEqual(ops[0].GetAttr().Get(), 90.0) def test_UsdLux_HasConnectableAPI(self): self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.LightAPI)) self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI( UsdLux.LightFilter)) def test_GetShaderId(self): # Test the LightAPI shader ID API # UsdLuxLightAPI and UsdLuxLightFilter implement the same API for # their shaderId attributes so we can test them using the same function. def _TestShaderIDs(lightOrFilter, shaderIdAttrName): # The default render context's shaderId attribute does exist in the # API. These attributes do not yet exist for other contexts. self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("ri")) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # By default LightAPI shader IDs are empty for all render contexts. self.assertEqual(lightOrFilter.GetShaderId([]), "") self.assertEqual(lightOrFilter.GetShaderId(["other", "ri"]), "") # Set a value in the default shaderID attr. lightOrFilter.GetShaderIdAttr().Set("DefaultLight") # No new attributes were created. self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("ri")) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # The default value is now the shaderID returned for all render # contexts since no render contexts define their own shader ID self.assertEqual( lightOrFilter.GetShaderId([]), "DefaultLight") self.assertEqual( lightOrFilter.GetShaderId(["other", "ri"]), "DefaultLight") # Create a shaderID attr for the "ri" render context with a new ID # value. lightOrFilter.CreateShaderIdAttrForRenderContext("ri", "SphereLight") # The shaderId attr for "ri" now exists self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(), shaderIdAttrName) self.assertEqual( lightOrFilter.GetShaderIdAttrForRenderContext("ri").GetName(), "ri:" + shaderIdAttrName) self.assertFalse( lightOrFilter.GetShaderIdAttrForRenderContext("other")) # When passed no render contexts we still return the default # shader ID. self.assertEqual(lightOrFilter.GetShaderId([]), "DefaultLight") # Since we defined a shader ID for "ri" but not "other", the "ri" # shader ID is returned when queryring for both. Querying for just # "other" falls back to the default shaderID self.assertEqual( lightOrFilter.GetShaderId(["other", "ri"]), "SphereLight") self.assertEqual( lightOrFilter.GetShaderId(["ri"]), "SphereLight") self.assertEqual( lightOrFilter.GetShaderId(["other"]), "DefaultLight") # Create an untyped prim with a LightAPI applied and test the ShaderId # functions of UsdLux.LightAPI stage = Usd.Stage.CreateInMemory() prim = stage.DefinePrim("/PrimLight") light = UsdLux.LightAPI.Apply(prim) self.assertTrue(light) _TestShaderIDs(light, "light:shaderId") # Create a LightFilter prim and test the ShaderId functions of # UsdLux.LightFilter lightFilter = UsdLux.LightFilter.Define(stage, "/PrimLightFilter") self.assertTrue(lightFilter) _TestShaderIDs(lightFilter, "lightFilter:shaderId") def test_LightExtentAndBBox(self): # Test extent and bbox computations for the boundable lights. time = Usd.TimeCode.Default() # Helper for computing the extent and bounding boxes for a light and # comparing against an expect extent pair. def _VerifyExtentAndBBox(light, expectedExtent): self.assertEqual( UsdGeom.Boundable.ComputeExtentFromPlugins(light, time), expectedExtent) self.assertEqual( light.ComputeLocalBound(time, "default"), Gf.BBox3d( Gf.Range3d( Gf.Vec3d(expectedExtent[0]), Gf.Vec3d(expectedExtent[1])), Gf.Matrix4d(1.0))) # Create a prim of each boundable light type. stage = Usd.Stage.CreateInMemory() rectLight = UsdLux.RectLight.Define(stage, "/RectLight") self.assertTrue(rectLight) diskLight = UsdLux.DiskLight.Define(stage, "/DiskLight") self.assertTrue(diskLight) cylLight = UsdLux.CylinderLight.Define(stage, "/CylLight") self.assertTrue(cylLight) sphereLight = UsdLux.SphereLight.Define(stage, "/SphereLight") self.assertTrue(sphereLight) # Verify the extent and bbox computations for each light given its # fallback attribute values. _VerifyExtentAndBBox(rectLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) _VerifyExtentAndBBox(diskLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) _VerifyExtentAndBBox(cylLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)]) _VerifyExtentAndBBox(sphereLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)]) # Change the size related attribute of each light and verify the extents # and bounding boxes are updated. rectLight.CreateWidthAttr(4.0) rectLight.CreateHeightAttr(6.0) _VerifyExtentAndBBox(rectLight, [(-2.0, -3.0, 0.0), (2.0, 3.0, 0.0)]) diskLight.CreateRadiusAttr(5.0) _VerifyExtentAndBBox(diskLight, [(-5.0, -5.0, 0.0), (5.0, 5.0, 0.0)]) cylLight.CreateRadiusAttr(4.0) cylLight.CreateLengthAttr(10.0) _VerifyExtentAndBBox(cylLight, [(-4.0, -4.0, -5.0), (4.0, 4.0, 5.0)]) sphereLight.CreateRadiusAttr(3.0) _VerifyExtentAndBBox(sphereLight, [(-3.0, -3.0, -3.0), (3.0, 3.0, 3.0)]) # Special case for portal light. Portal lights don't have any attributes # that affect their extent. Extent values are used only when # explicitly authored but portal lights' do register a # ComputeExtentFuction, which sets the extent as default from the # schema. portalLight = UsdLux.PortalLight.Define(stage, "/PortalLight") self.assertTrue(portalLight) _VerifyExtentAndBBox(portalLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)]) # For completeness verify that distant and dome lights are not # boundable. domeLight = UsdLux.DomeLight.Define(stage, "/DomeLight") self.assertTrue(domeLight) self.assertFalse(UsdGeom.Boundable(domeLight)) distLight = UsdLux.DistantLight.Define(stage, "/DistLight") self.assertTrue(distLight) self.assertFalse(UsdGeom.Boundable(distLight)) def test_SdrShaderNodesForLights(self): """ Test the automatic registration of SdrShaderNodes for all the UsdLux light types. """ # The expected shader node inputs that should be found for all of our # UsdLux light types. expectedLightInputNames = [ # LightAPI 'color', 'colorTemperature', 'diffuse', 'enableColorTemperature', 'exposure', 'intensity', 'normalize', 'specular', # ShadowAPI 'shadow:color', 'shadow:distance', 'shadow:enable', 'shadow:falloff', 'shadow:falloffGamma', # ShapingAPI 'shaping:cone:angle', 'shaping:cone:softness', 'shaping:focus', 'shaping:focusTint', 'shaping:ies:angleScale', 'shaping:ies:file', 'shaping:ies:normalize' ] # Map of the names of the expected light nodes to the additional inputs # we expect for those types. expectedLightNodes = { 'CylinderLight' : ['length', 'radius'], 'DiskLight' : ['radius'], 'DistantLight' : ['angle'], 'DomeLight' : ['texture:file', 'texture:format'], 'GeometryLight' : [], 'PortalLight' : [], 'RectLight' : ['width', 'height', 'texture:file'], 'SphereLight' : ['radius'], 'MeshLight' : [], 'VolumeLight' : [] } # Get all the derived types of UsdLuxBoundableLightBase and # UsdLuxNonboundableLightBase that are defined in UsdLux lightTypes = list(filter( Plug.Registry().GetPluginWithName("usdLux").DeclaresType, Tf.Type(UsdLux.BoundableLightBase).GetAllDerivedTypes() + Tf.Type(UsdLux.NonboundableLightBase).GetAllDerivedTypes())) self.assertTrue(lightTypes) # Augment lightTypes to include MeshLightAPI and VolumeLightAPI lightTypes.append( Tf.Type.FindByName('UsdLuxMeshLightAPI')) lightTypes.append( Tf.Type.FindByName('UsdLuxVolumeLightAPI')) # Verify that at least one known light type is in our list to guard # against this giving false positives if no light types are available. self.assertIn(UsdLux.RectLight, lightTypes) self.assertEqual(len(lightTypes), len(expectedLightNodes)) stage = Usd.Stage.CreateInMemory() prim = stage.DefinePrim("/Prim") usdSchemaReg = Usd.SchemaRegistry() for lightType in lightTypes: print("Test SdrNode for schema type " + str(lightType)) if usdSchemaReg.IsAppliedAPISchema(lightType): prim.ApplyAPI(lightType) else: typeName = usdSchemaReg.GetConcreteSchemaTypeName(lightType) if not typeName: continue prim.SetTypeName(typeName) light = UsdLux.LightAPI(prim) self.assertTrue(light) sdrIdentifier = light.GetShaderId([]) self.assertTrue(sdrIdentifier) prim.ApplyAPI(UsdLux.ShadowAPI) prim.ApplyAPI(UsdLux.ShapingAPI) # Every concrete light type and some API schemas (with appropriate # shaderId as sdr Identifier) in usdLux domain will have an # SdrShaderNode with source type 'USD' registered for it under its # USD schema type name. node = Sdr.Registry().GetNodeByIdentifier(sdrIdentifier, ['USD']) self.assertTrue(node is not None) self.assertIn(sdrIdentifier, expectedLightNodes) # Names, identifier, and role for the node all match the USD schema # type name self.assertEqual(node.GetIdentifier(), sdrIdentifier) self.assertEqual(node.GetName(), sdrIdentifier) self.assertEqual(node.GetImplementationName(), sdrIdentifier) self.assertEqual(node.GetRole(), sdrIdentifier) self.assertTrue(node.GetInfoString().startswith(sdrIdentifier)) # The context is always 'light' for lights. # Source type is 'USD' self.assertEqual(node.GetContext(), 'light') self.assertEqual(node.GetSourceType(), 'USD') # Help string is generated and encoded in the node's metadata (no # need to verify the specific wording). self.assertTrue(set(node.GetMetadata().keys()), {'primvars', 'help'}) self.assertEqual(node.GetMetadata()["help"], node.GetHelp()) # Source code and URIs are all empty. self.assertFalse(node.GetSourceCode()) self.assertFalse(node.GetResolvedDefinitionURI()) self.assertFalse(node.GetResolvedImplementationURI()) # Other classifications are left empty. self.assertFalse(node.GetCategory()) self.assertFalse(node.GetDepartments()) self.assertFalse(node.GetFamily()) self.assertFalse(node.GetLabel()) self.assertFalse(node.GetVersion()) self.assertFalse(node.GetAllVstructNames()) self.assertEqual(node.GetPages(), ['']) # The node will be valid for our light types. self.assertTrue(node.IsValid()) # Helper for comparing an SdrShaderProperty from node to the # corresponding UsdShadeInput/UsdShadeOutput from a UsdLux light def _CompareLightPropToNodeProp(nodeInput, primInput): # Input names and default values match. primDefaultValue = primInput.GetAttr().Get() self.assertEqual(nodeInput.GetName(), primInput.GetBaseName()) self.assertEqual(nodeInput.GetDefaultValue(), primDefaultValue) # Some USD property types don't match exactly one to one and are # converted to different types. In particular relevance to # lights and Token becomes String. expectedTypeName = primInput.GetTypeName() # Array valued attributes have their array size determined from # the default value and will be converted to scalar in the # SdrProperty if the array size is zero. if expectedTypeName.isArray: if not primDefaultValue or len(primDefaultValue) == 0: expectedTypeName = expectedTypeName.scalarType elif expectedTypeName == Sdf.ValueTypeNames.Token: expectedTypeName = Sdf.ValueTypeNames.String # Bool SdfTypes should Have Int SdrTypes, but still return as # Bool when queried for GetTypeAsSdfType if expectedTypeName == Sdf.ValueTypeNames.Bool: self.assertEqual(nodeInput.GetType(), Sdf.ValueTypeNames.Int) # Verify the node's input type maps back to USD property's type # (with the noted above exceptions). self.assertEqual( nodeInput.GetTypeAsSdfType()[0], expectedTypeName, msg="{}.{} Type {} != {}".format( str(node.GetName()), str(nodeInput.GetName()), str(nodeInput.GetTypeAsSdfType()[0]), str(expectedTypeName))) # If the USD property type is an Asset, it will be listed in # the node's asset identifier inputs. if expectedTypeName == Sdf.ValueTypeNames.Asset: self.assertIn(nodeInput.GetName(), node.GetAssetIdentifierInputNames()) # There will be a one to one correspondence between node inputs # and prim inputs. Note that the prim may have additional inputs # because of auto applied API schemas, but we only need to verify # that the node has ONLY the expected inputs and the prim at least # has those input proerties. expectedInputNames = \ expectedLightInputNames + expectedLightNodes[sdrIdentifier] # Verify node has exactly the expected inputs. self.assertEqual(sorted(expectedInputNames), sorted(node.GetInputNames())) # Verify each node input matches a prim input. for inputName in expectedInputNames: nodeInput = node.GetInput(inputName) primInput = light.GetInput(inputName) self.assertFalse(nodeInput.IsOutput()) _CompareLightPropToNodeProp(nodeInput, primInput) # None of the UsdLux base lights have outputs self.assertEqual(node.GetOutputNames(), []) self.assertEqual(light.GetOutputs(onlyAuthored=False), []) # The reverse is tested just above, but for all asset identifier # inputs listed for the node there is a corresponding asset value # input property on the prim. for inputName in node.GetAssetIdentifierInputNames(): self.assertEqual(light.GetInput(inputName).GetTypeName(), Sdf.ValueTypeNames.Asset) # These primvars come from sdrMetadata on the prim itself which # isn't supported for light schemas so it will always be empty. self.assertFalse(node.GetPrimvars()) # sdrMetadata on input properties is supported so additional # primvar properties will correspond to prim inputs with that # metadata set. for propName in node.GetAdditionalPrimvarProperties(): self.assertTrue(light.GetInput(propName).GetSdrMetadataByKey( 'primvarProperty')) # Default input can also be specified in the property's sdrMetadata. if node.GetDefaultInput(): defaultInput = light.GetInput( node.GetDefaultInput().GetName()) self.assertTrue(defaultInput.GetSdrMetadataByKey('defaultInput')) if __name__ == '__main__': unittest.main()
0
0
0
29,301
0
0
0
55
46
0e80c9e7dca15d7cd5266e3c0a1290507d1a7a09
3,801
py
Python
scripts/fix_rttm.py
sehgal-simran/RPNSD
5ec70d11e3d177fb87a8499b63cd1c5ba60549b6
[ "MIT" ]
59
2020-02-19T11:23:14.000Z
2022-02-06T09:31:32.000Z
scripts/fix_rttm.py
yuzhms/RPNSD
031377388cb498c0dee080a76bd588a9ee8b39e0
[ "MIT" ]
11
2020-03-05T10:23:43.000Z
2021-10-11T02:15:28.000Z
scripts/fix_rttm.py
yuzhms/RPNSD
031377388cb498c0dee080a76bd588a9ee8b39e0
[ "MIT" ]
13
2020-02-19T02:30:43.000Z
2021-01-13T03:06:42.000Z
#!/usr/bin/env python3 # This script fixes some problems the RTTM file # including invalid time boundaries and others if __name__ == "__main__": main()
36.548077
146
0.594843
#!/usr/bin/env python3 # This script fixes some problems the RTTM file # including invalid time boundaries and others import os import sys import numpy as np import argparse def get_args(): parser = argparse.ArgumentParser( description="Fix RTTM file") parser.add_argument("rttm_file", type=str, help="Input RTTM file") parser.add_argument("rttm_output_file", type=str, help="Output RTTM file") parser.add_argument("--channel", type=int, default=1, help="Channel information in the RTTM file") parser.add_argument("--add_uttname", type=int, default=0, help="Whether to add uttname to spkname") args = parser.parse_args() return args def load_rttm(filename): utt2seg = {} with open(filename, 'r') as fh: content = fh.readlines() for line in content: line = line.strip('\n') line_split = line.split() uttname, start_t, duration, spkname = line_split[1], float(line_split[3]), float(line_split[4]), line_split[7] if duration <= 0: print("Invalid line") print(line) continue end_t = start_t + duration if uttname not in utt2seg: utt2seg[uttname] = [] utt2seg[uttname].append([start_t, end_t, spkname]) return utt2seg def merge_same_spk(seg_array): spk_list = list(set(seg_array[:, 2])) seg_array_list = [] for spk in spk_list: seg_array_spk = seg_array[seg_array[:, 2] == spk] seg_list_spk = [] for i in range(len(seg_array_spk)): if i == 0: seg_list_spk.append(seg_array_spk[i, :]) else: if seg_array_spk[i, 0] > seg_list_spk[-1][1]: seg_list_spk.append(seg_array_spk[i, :]) else: seg_list_spk[-1][1] = max(seg_list_spk[-1][1], seg_array_spk[i, 1]) seg_array_spk_new = np.array(seg_list_spk) seg_array_list.append(seg_array_spk_new) seg_array_new = np.concatenate(seg_array_list) seg_array_new = seg_array_new[seg_array_new[:, 0].argsort(), :] return seg_array_new def fix_rttm(utt2seg): uttlist = list(utt2seg.keys()) uttlist.sort() utt2seg_new = {} for utt in uttlist: seg_list = utt2seg[utt] spk_list = list(set([seg[2] for seg in seg_list])) spk_list.sort() seg_array = np.array([[seg[0], seg[1], spk_list.index(seg[2])] for seg in seg_list]) seg_array = seg_array[seg_array[:, 0].argsort(), :] seg_array_new = merge_same_spk(seg_array) seg_list = [] for i in range(len(seg_array_new)): seg_list.append([seg_array_new[i, 0], seg_array_new[i, 1], spk_list[int(seg_array_new[i, 2])]]) utt2seg_new[utt] = seg_list return utt2seg_new def write_rttm(utt2seg, rttm_output_file, add_uttname, channel): uttlist = list(utt2seg.keys()) uttlist.sort() with open(rttm_output_file, 'w') as fh: for utt in uttlist: seg_list = utt2seg[utt] for seg in seg_list: if add_uttname: fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {}_{} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], utt, seg[2])) else: fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], seg[2])) return 0 def main(): args = get_args() # load input RTTM utt2seg = load_rttm(args.rttm_file) # fix RTTM file utt2seg_new = fix_rttm(utt2seg) # write output RTTM write_rttm(utt2seg_new, args.rttm_output_file, args.add_uttname, args.channel) return 0 if __name__ == "__main__": main()
0
0
0
0
0
3,448
0
-32
228
78c5929686706d7b4c5c6bb30eecae092b7caa4b
997
py
Python
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
3
2021-11-16T04:52:53.000Z
2022-02-07T20:28:41.000Z
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
null
null
null
polymorphism/polymorphism_demos.py
Minkov/python-oop
db9651eef374c0e74c32cb6f2bf07c734cc1d051
[ "MIT" ]
1
2021-12-07T07:04:38.000Z
2021-12-07T07:04:38.000Z
r = Rect(2, 5) c = Circle(3) shapes: list[Shape] = [ r, c, ] [print_area(s) for s in shapes] print(isinstance(r, Rect)) print(isinstance(r, Circle)) print(isinstance(r, Shape)) # print_area(2) print(Rect.mro()) Person().say_hello()
16.616667
50
0.608826
import math class Shape: def area(self): pass class Rect(Shape): def __init__(self, width, height): self.width = width self.height = height def area(self): return self.width * self.height class Circle(Shape): def __init__(self, radius): self.radius = radius def area(self): return self.radius * self.radius * math.pi def print_area(shape: Shape): # if isinstance(shape, Rect): # print(shape.rect_area()) # elif isinstance(shape, Circle): # print(shape.circle_area()) print(shape.area()) # print(shape.width, shape.height) r = Rect(2, 5) c = Circle(3) shapes: list[Shape] = [ r, c, ] [print_area(s) for s in shapes] print(isinstance(r, Rect)) print(isinstance(r, Circle)) print(isinstance(r, Shape)) # print_area(2) print(Rect.mro()) class Person: def say_hello(self): print("Hello! 1") def say_hello(self): print("Hello! 2") Person().say_hello()
0
0
0
404
0
215
0
-10
137