query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Read stop words from input file (filename) and insert each word as a key into the stop words hash table.
def load_stop_table(self, filename): self.stop_table = HashTable(191) try: a = open(filename, "r") lines = a.readlines() a.close() except: raise FileNotFoundError() for n in range(len(lines)): self.stop_table.insert(lines[n][:-1], n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_stop_table(self, filename):\n self.stop_table = HashTable(191)\n with open(filename, 'r') as f:\n for word in f.readlines():\n self.stop_table.insert(word.replace('\\n',''),None)", "def load_stop_words(stop_word_file):\n stop_words = []\n for line in open(stop_word_file):\n if line.strip()[0:1] != \"#\":\n for word in line.split(): # in case more than one per line\n stop_words.append(word)\n return stop_words", "def load():\n for line in open(config.filepath, 'r'):\n line = line.strip()\n line_sorted = ''.join(sorted(line))\n\n if line_sorted not in Words.hashed:\n Words.hashed[line_sorted] = []\n\n # Store the real hashed as a list\n # We need line_sorted as the key for fast lookup later\n Words.hashed[line_sorted].append(line)\n\n # Also add the word to a standard list\n # We'll use this to quickly determine wordiness later\n Words.words.append(line)", "def _stopwords():\n global _stopword_set\n if _stopword_set:\n return _stopword_set\n f_name = \"stopword.list\"\n if os.path.isfile(f_name):\n res = set()\n with open(f_name) as f:\n for line in f:\n res.add(line.strip())\n _stopword_set = res\n return res\n else:\n error(\"stop words - not a file: %s\" % f_name)", "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def read_stopwords(fileName='stopwords.txt', lower_case=True):\n stopwords = set()\n with open(fileName) as f:\n for w in f:\n w = w.strip()\n if w:\n if lower_case:\n w = w.lower()\n stopwords.add(w)\n return stopwords", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def __init__(self):\n stopwords_file = open(self.filepath, \"r\")\n for line in stopwords_file.readlines():\n line2 = line.replace(\"\\n\", \"\") \n self.add(line2)", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def rm_stopwords(file_path, word_dict):\n\n # read stop word dict and save in stop_dict\n stop_dict = {}\n with open(word_dict) as d:\n for word in d:\n stop_dict[word.strip(\"\\n\")] = 1\n # remove tmp file if exists\n if os.path.exists(file_path + \".tmp\"):\n os.remove(file_path + \".tmp\")\n\n print(\"now remove stop words in %s.\" % file_path)\n # read source file and rm stop word for each line.\n with open(file_path) as f1, open(file_path + \".tmp\", \"w\") as f2:\n for line in f1:\n tmp_list = [] # save words not in stop dict\n words = line.split()\n for word in words:\n if word not in stop_dict:\n tmp_list.append(word)\n words_without_stop = \" \".join(tmp_list)\n to_write = words_without_stop + \"\\n\"\n f2.write(to_write)\n\n # overwrite origin file with file been removed stop words\n shutil.move(file_path + \".tmp\", file_path)\n print(\"stop words in %s has been removed.\" % file_path)", "def load_stop_words() -> list:\r\n with open(f'{ENGINE}/stop_words.txt', 'r') as i:\r\n stop_words = i.read().splitlines()\r\n stop_words = list(map(lambda x: x.upper(), stop_words)) # Force all stop words to UPPER case.\r\n return stop_words", "def load_stop_words():\n with open('../data/stop_words.txt', 'r') as stop_words_file:\n return stop_words_file.read().split()", "def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words", "def readFile(filename):\n listOfWords = []\n currentLine = 1\n f = open(filename, \"r\")\n for line in f:\n line = stripPunctuation(line)\n for word in line.split():\n word = word.lower()\n if len(word) > 1:\n if not word[0].isdigit():\n tempObj = contains(listOfWords, word)\n if tempObj != None:\n tempObj.incOccurrence(currentLine)\n else:\n temp = Word(word, currentLine)\n listOfWords.append(temp)\n currentLine = currentLine + 1\n return listOfWords", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def create_dictionary(filename):\n\tword_set = set()\n\tif os.path.isfile(filename):\n\t\twith open(filename, 'r') as f:\n\t\t\tfor line in iter(f):\n\t\t\t\tword_set.add(line.strip('\\n'))\n\telse:\n\t\tprint \"File not found!\"\n\treturn word_set", "def get_stop_words(stop_file_path):\n \n with open(stop_file_path, 'r', encoding=\"utf-8\") as f:\n stopwords = f.readlines()\n stop_set = set(m.strip() for m in stopwords)\n return frozenset(stop_set)", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def load_dictionary(hash_table, filename):\n\n file = open(filename)\n lines = file.readlines()\n start = timeit.default_timer()\n for line in lines:\n hash_table.insert(line.rstrip(),1)\n if timeit.default_timer() - start > 4:\n break\n file.close()", "def create_index(path):\n words = {}\n\n for l in open(path):\n linewords = l.strip().split(\" \")\n student = linewords[0]\n linewords = linewords[1:]\n\n for word in linewords:\n if word in words:\n if int(student) not in words[word]:\n words[word].append(int(student))\n else:\n words[word] = [int(student)]\n\n return words", "def word_frequency_in_file(filename):\n words = {}\n fin = open(filename)\n punctuation = string.punctuation\n for line in fin:\n line = line.translate( # Replace punctuation with spaces\n str.maketrans(punctuation, ' ' * len(punctuation)))\n line = line.lower()\n line_words = line.split()\n for word in line_words: # Process each word in the line.\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n return words", "def load_file(self, file_path):\n f = open(file_path, \"r\")\n sentences = f.readlines()\n \n word_count = 0\n\n for sentence in sentence: \n for word in sentence.strip().split(\" \"):\n if not (word in self.word_id): #word not in dictionary\n word_id[word] = word_count\n word_count += 1\n\n #self.doc = [[self.word_id[word] for word in sentence.strip().split(\" \")] for sentence in sentences]", "def load_dictionary(filename):\n\n word_list = []\n freq_sum = 0\n\n # nacitanie zo suboru\n with open(filename) as f:\n for line in f:\n freq, val = line.split()\n word_list.append(Word(int(freq), val))\n freq_sum += int(freq)\n\n # lexikograficke usporiadanie slov\n word_list_sorted = sorted(word_list, key=operator.attrgetter('value'))\n\n return word_list_sorted, freq_sum", "def load_concordance_table(self, filename):\n self.concordance_table = HashTable(191)\n with open(filename, 'r') as f:\n for linenum,words in enumerate(f.readlines()):\n for i in words.translate(self.ttable).split():\n i = i.casefold()\n if not self.stop_table.in_table(i):\n self.concordance_table.insert(i,linenum + 1)", "def read_dictionary():\n global dic\n with open(FILE, 'r') as f:\n for line in f:\n word_list = line.split()\n word = word_list[0].strip()\n dic.append(word)", "def read_words(f, words):\n with open(f) as file:\n for line in file:\n w = tokenizer.tokenize(line.strip())\n for word in w:\n try:\n words[word] += 1\n except:\n words[word] = 1", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def load_input_word_list(file_path):\n if not os.path.isfile(file_path):\n return False\n\n word_list = list()\n\n with open(file_path, 'r') as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n\n data = line.split(' ')\n text = data[0].lower().strip(Setting.NONWORD_CHARACTERS)\n\n if not text:\n continue\n\n text = text.replace('_', ' ')\n\n score = float(data[1])\n\n if score < 0:\n kind = WordKindEnum.NEG\n else:\n kind = WordKindEnum.POS\n\n word = Word(text, score, kind)\n word_list.append(word)\n\n return word_list", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def word_list():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip().lower()\n d[word] = True\n return d", "def make_stopwords(filepath='stopwords.txt'):\n sw = open(filepath, \"r\")\n my_stopwords = sw.read()\n my_stopwords = my_stopwords.split(\", \")\n sw.close()\n\n all_stopwords = stopwords.words('english')\n all_stopwords.extend(my_stopwords)\n return all_stopwords", "def _read_words(self, path):\r\n\r\n word_file = open(path)\r\n for line in word_file.readlines():\r\n pair = line.split('::')\r\n self.insert(pair[0], pair[1].rstrip())\r\n word_file.close()", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def loadStopWordList(swFile):\n f = open(swFile, 'r')\n lines = f.readlines()\n f.close()\n result = list()\n for line in lines:\n sWord = line.strip('\\n')\n result.append(sWord)\n return result", "def import_words(file_name):\n with open(file_name) as word_list:\n words = []\n for line in word_list:\n number, word = line.strip().split(\"\\t\")\n words.append(word.strip())\n # print(f\"Imported {(len(word_dict))} words\")\n\n return words", "def __init__(self,dir_stopwords):\n \n arc = open(dir_stopwords, \"r\", encoding='utf-8')\n self.stp_wrds = [line.strip() for line in arc]\n arc.close()", "def init(wordlist_filename):\n global WORDS\n if WORDS == None:\n WORDS = []\n bad_line = lambda x: x.strip() == '' or x.startswith('#')\n with codecs.open(wordlist_filename, 'r', 'utf-8') as filehandle:\n lines = filehandle.readlines()\n WORDS = set([x.lower().strip() for x in lines if not bad_line(x)])", "def preprocess(in_file: str, file_out: str) -> tuple:\r\n try:\r\n stop_words = []\r\n if os.path.isfile(\"StopWords.pkl\"):\r\n with open(\"StopWords.pkl\", mode=\"rb\") as pkl:\r\n stop_words = pickle.load(pkl)\r\n else:\r\n with open(\"StopWords.txt\", mode=\"r\") as f1:\r\n for line in f1:\r\n stop_words.append(line.strip(\"\\n\"))\r\n with open(\"StopWords.pkl\", mode=\"wb\") as pkl:\r\n pickle.dump(stop_words, pkl)\r\n\r\n vocab = []\r\n corpus = []\r\n with open(in_file, mode=\"r\") as f:\r\n for line in f:\r\n line = line.rsplit(sep=\"\\t\")\r\n sent = line[1].strip(\" \\n\")\r\n sent = int(sent)\r\n\r\n sentence = process_sentence(line[0])\r\n\r\n [sentence.remove(w) for w in sentence if w in stop_words or w == \"\"]\r\n corpus.append([sentence, sent])\r\n vocab += list(set(sentence) - set(vocab))\r\n\r\n # Sort lists alphabetically\r\n corpus = sorted(corpus)\r\n vocab = sorted(vocab)\r\n vocab.append(\"classlabel\")\r\n to_file(file_out, vocab, corpus)\r\n\r\n return vocab, corpus\r\n except:\r\n catch_err()", "def get_glove_dictionary(self, file_path=\"./glove.twitter.27B.25d.txt\"):\n file = open(file_path, \"r\",encoding='utf-8')\n dictionary = {}\n keys = []\n for word_vector in file:\n dictionary[word_vector.split()[0]] = word_vector.split()[1:]\n keys.append(word_vector.split()[0])\n file.close()\n\n file = open(\"./Glove_dict.txt\", \"a\",encoding='utf-8')\n for word in keys:\n file.write(word + '\\n')\n file.close()", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))", "def create_dictionary(file_dir):\r\n\tword_list = []\r\n\tfile_list = read_files(file_dir, \"lab\") # step 7\r\n\tfor file in file_list:\r\n\t\twith open(file, 'r') as f:\r\n\t\t\ttext = f.read()\r\n\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def loadWords():\n inFile = open(wordFile, 'r')\n wordlist = []\n for line in inFile:\n wordlist.append(line)\n return wordlist", "def buildCorpus(self, filename, stopwords_file=None):\n with open(filename, 'r') as infile:\n # use pattern.subs\n # doclines = [line.rstrip().lower().split(' ') for line in infile]\n doclines = [self.help_clean(line) for line in infile]\n n_docs = len(doclines)\n self.vocab = list({v for doc in doclines for v in doc})\n if stopwords_file:\n with open(stopwords_file, 'r') as stopfile:\n stops = stopfile.read().split()\n self.vocab = [x for x in self.vocab if x not in stops]\n self.vocab.sort()\n self.documents = []\n for i in range(n_docs):\n self.documents.append({})\n for j in range(len(doclines[i])):\n if doclines[i][j] in self.vocab:\n self.documents[i][j] = self.vocab.index(doclines[i][j])", "def load_keywords():\n keywords = set()\n with open(os.path.join(BASE, \"data/keywords.txt\")) as fp:\n for line in fp:\n keywords.add(line.strip().lower())\n return keywords", "def __init__(self, file_name=None):\n self.word_list = {} # Dict of {word: frequency}\n self.replacement_words = {}\n self.ignored_words = []\n [self.add_word(w) for w in self.ADDITIONAL_VALID_WORDS]\n if file_name:\n self.load(file_name)", "def get_word_freq(filein):\n freq = {}\n\n # Open file handles with context manager\n with open(filein) as f:\n\n # Read a single line at a time so as not to crush memory\n for line in f:\n\n # Tokenize and iterate\n for word in line.split():\n\n # Use try/except instead of if/then for performance\n # Likely after the first 1M tweets that the key will be contained\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\n return freq", "def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table", "def load_words():\n with open(DICTIONARY) as f:\n return [line.strip() for line in f]", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def assignWordList(filename, thisDataEntry):\n oldArr = []\n newArr = []\n try:\n with open(filename, encoding=\"latin-1\") as file:\n lines = [line.rstrip() for line in file]\n idx = 0\n while(lines[idx] != \"***\"):\n oldArr.append(lines[idx].lower())\n idx += 1\n idx += 1 #Skip the delimitter\n for x in range(idx, len(lines)):\n newArr.append(lines[x].lower())\n file.close()\n except IOError:\n print(\"Error opening: \" + str(filename))\n for x in oldArr:\n thisDataEntry.old[x] = 0\n for y in newArr:\n thisDataEntry.new[y] = 0", "def read_file(filename):\n print(\"Reading dictionary: \" +filename)\n word_dict = set()\n\n dictionary = open(filename)\n\n # Read each word from the dictionary\n for word in dictionary:\n # Remove the trailing newline character\n word = word.rstrip('\\n')\n\n # Convert to lowercase\n word = word.lower()\n\n word_dict.add(word)\n\n dictionary.close()\n\n return word_dict", "def generate_dictionary(location):\n f = open('../data/wordlist.txt', 'rb')\n words = Counter(re.findall('[a-z]+', f.read().lower().decode()))\n joblib.dump(words, location)", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def getstopwords():\n file = open('stopWords.txt', 'r')\n stoplist = []\n for word in file.readlines():\n word = word.strip('\\n')\n stoplist.append(word)\n return stoplist", "def word_counts(file):\n words = defaultdict(int)\n regex = re.compile('[' + string.punctuation + ']')\n for line in open(file):\n for word in [regex.sub('', w) for w in line.lower().split()]:\n words[word] += 1\n\n return words", "def learn(filename):\n word_dict = {} # Create empty dictionary\n first = None\n prev = None\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n list_words = line.lower().split()\n text = []\n for word in list_words:\n # take out leading and trailing punctuation characters\n words = word.strip(string.punctuation + string.digits)\n word_len = len(words)\n if word_len >= 1:\n text.append(words)\n\n if first is None:\n # Get the first word in the text file\n first = text[0]\n # iterate over text\n if prev:\n text.insert(0, prev)\n for counter, word in enumerate(text):\n if word not in word_dict:\n word_dict[word] = list()\n if counter < (len(text) - 1):\n following = counter + 1\n word_dict[word].append(text[following])\n prev = text[-1]\n return first, word_dict # return a tuple", "def load_cows(filename):\r\n print(\"Loading words from file...\")\r\n # inFile: file\r\n inFile = open(filename, 'r')\r\n # wordlist: list of strings\r\n wordlist = {}\r\n for line in inFile:\r\n cow = line.split(',')\r\n wordlist[cow[0]] = int(cow[1]) # 0: name, 1: weight\r\n inFile.close()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def get_word_list(file_name, to_skip_or_not_to_skip):\n fin = open(file_name) #opening file\n histogram={} \n if to_skip_or_not_to_skip == True: #if I want to skip the header this is set to True\n skip_first_part(fin)\n for line in fin: #runs through lines of book file\n line = line.replace(\"-\",\" \") #takes out dashed, underscroes, numbers, whitespaces, and punctuation\n line = line.replace(\"_\",\" \")\n to_remove = string.punctuation + string.whitespace + '0123456789' \n for word in line.split():\n word = word.strip(to_remove) #running through all words in each line \n if word == 'God' or 'Lord':\n pass\n else:\n word = word.lower()\n histogram[word] = histogram.get(word, 0)+1\n return histogram", "def read_dictionary_from_file(self, stem_flag):\n file_name = \"/dictionary.txt\" if not stem_flag else \"/dictionaryWithStemming.txt\"\n with open(self.posting_and_dictionary_path + file_name, \"r\") as f:\n txt = f.readlines()\n for line in txt:\n l = line.split(\":\")\n pos = l[1].split(\",\")\n e = DictionaryElement(pos[0])\n e.pointer = int(pos[1])\n e.corpus_tf = int(pos[2])\n if not stem_flag:\n self.term_dictionary[l[0]] = e\n else:\n self.term_dictionary_with_stemming[l[0]] = e\n f.close()", "def ReadAndTokenize(filename):\n global CACHE\n global VOCABULARY\n if filename in CACHE:\n return CACHE[filename]\n comment = open(filename).read()\n words = Tokenize(comment)\n\n terms = collections.Counter()\n for w in words:\n VOCABULARY[w] += 1\n terms[w] += 1\n\n CACHE[filename] = terms\n return terms", "def importBrainstormWordsFile(filename):\n #init the list with all words in the file\n allWords = []\n \n #open the brainstorming words file and read the lines\n with open(filename, 'r') as fp:\n lines = fp.read().splitlines()\n \n #split the lines for the idiots that didn't read the instructions and add them to the output\n for curLine in lines:\n if curLine.startswith('Please type one'):\n continue\n cutLines = curLine.replace(',',' ').split()\n \n #cycle the word and add them\n for curWord in cutLines:\n allWords.append(curWord.strip().lower())\n \n return allWords", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def load_stopwords():\r\n\tglobal stopwords\r\n\tif os.path.exists(paths.path_data_stopwords_txt):\r\n\t\tprint('\\nloading stopwords')\r\n\t\twith open(paths.path_data_stopwords_txt,'r') as inf:\r\n\t\t\tstopwords = inf.read().split('\\n')\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def load_stop_list():\n stop_list = []\n with open(STOP_LIST, \"r\") as f:\n lines = f.readlines()\n stop_list = [word.strip() for word in lines]\n return stop_list", "def LoadWords(self,FileName) :\r\n\t\ttry :\r\n\t\t\twith open(FileName,'r') as fhan :\r\n\t\t\t\tWords = fhan.read()\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to read file %s: %s\"%(FileName,detail))\r\n\t\ttry :\r\n\t\t\tWordList = Words.rstrip().split('\\n')\r\n\t\t\tWordList = filter(None,WordList)\r\n\t\t\tWordList = [(Word,) for Word in WordList]\r\n\t\t\tDictRef = self.CreateDict(FileName)\r\n\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectDictTable'],(DictRef,))\r\n\t\t\tDictName = self.DB_Cursor.fetchone()[0]\r\n\t\t\tself.DB_Cursor.executemany(self.SQLCMDs['InsertAllWordsToDict']%DictName,WordList)\r\n\t\t\tself.DB_Connect.commit()\r\n\t\t\tlist_id = self.CreateWordList(FileName,DictRef)\r\n\t\t\tself.UpdateWordList(list_id,False)\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.error(\"Failed to add words to the new dictionary: %s\"%detail)\r\n\t\t\tself.DB_Connect.rollback()\r\n\t\treturn DictRef", "def load_words():\r\n## print \"Loading word list from file...\"\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r', 0)\r\n # wordlist: list of strings\r\n wordlist = []\r\n for line in inFile:\r\n wordlist.append(line.strip().lower())\r\n## print \" \", len(wordlist), \"words loaded.\"\r\n return wordlist", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def get_words_from_file(filename):\n words_by_len = {}\n f = open(filename, \"r\", 1, \"utf8\")\n for word in f:\n word = word.strip().lower()\n w_len = len(word)\n if w_len > 1:\n words_by_len[w_len] = words_by_len.get(w_len, []) + [word]\n return words_by_len", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def read_data(file_path):\n words=[]\n dic_word={}\n actual_text=[]\n for line in open(file_path,encoding='utf-8'):\n words_line=line.strip().split(' ')\n for ite in words_line:\n if ite not in dic_word:\n dic_word[ite]=1\n words.extend(words_line)\n actual_text.append(words_line)\n\n\n #with zipfile.ZipFile(file_path) as f:\n #words = tf.compat.as_str(f.read(f.namelist()[0])).split()\n\n return words,len(dic_word),actual_text", "def make_mimic_dict(filename):\r\n with open(filename, 'r') as file:\r\n text = file.read().lower().replace(\"'\",'').split()\r\n mimic_dict = {}\r\n prev = ''\r\n for word in text:\r\n if not prev in mimic_dict:\r\n mimic_dict[prev] = [word]\r\n else:\r\n mimic_dict[prev].append(word)\r\n prev = word\r\n return mimic_dict", "def getDictionary(tsvFile, wordIndex, ngram):\r\n \r\n reader = DescriptionReader(tsvFile, wordIndex, ngram)\r\n dictionary = corpora.Dictionary( d for d in reader )\r\n\r\n # remove stop words and words that appear only once\r\n stoplist = [] # might be specified in the furture\r\n stop_ids = [dictionary.token2id[stopword] for stopword in stoplist\r\n if stopword in dictionary.token2id]\r\n once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]\r\n dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once\r\n dictionary.compactify() # remove gaps in id sequence after words that were removed\r\n \r\n return dictionary", "def removeOwnStopWords(self, sort=True, lc=False):\n\t\tself.textFile = self.removeStopWords(text=self.textFile, sort=sort, lc=lc)", "def read_file(fp):\n scrabble_words_dict = {}\n \n for line in fp:\n line = line.lower()\n line = line.strip()\n if len(line) < 3:\n continue\n elif \"-\" in line or \"'\" in line:\n continue\n else:\n scrabble_words_dict[line] = 1\n return scrabble_words_dict", "def load_defs():\n # Load word definitions\n fname = 'word-definitions.txt'\n with open(fname) as fh:\n lines = fh.readlines()\n \n # Create dictionary keyed by lowercase word\n def_tbl = dict()\n for line in lines:\n # split the dictionary line at the first space\n word, word_def = line.split(sep=None, maxsplit=1)\n # add this entry to the dictionary\n word = word.lower()\n def_tbl[word] = word_def.rstrip()\n return def_tbl", "def load_words():\n # Load all the words from the scrabble dictionary into a python list, words\n fname = 'words.txt'\n with open(fname) as fh:\n words = fh.readlines()\n \n # Create a python dict keyed by sorted letters, with value equal to a list\n # of all the anagrams of that collection of letters\n anagram_tbl = dict()\n for word in words:\n word_lc = word.rstrip().lower()\n key = word_key(word_lc)\n value = anagram_tbl.get(key, []) + [word_lc]\n anagram_tbl[key] = value\n return anagram_tbl", "def load_words():\n print(\"Loading word list from file..\")\n WORDLIST_FILENAME = \"words.txt\"\n # with open('words.txt', 'r') as f:\n # inFile = f.read()\n inFile = open(WORDLIST_FILENAME, 'r')\n wordlist = []\n\n for line in inFile:\n wordlist.append(line.strip().lower())\n return wordlist", "def dictionary_creation(filename):\n\tfp = open(filename)\n\td = dict()\n\tfor line in fp:\n\t\t# print line\n\t\tfor word in line.split():\n\t\t\tword = word.strip(string.punctuation + string.whitespace)\n\t\t\t# print word\n\t\t\tif len(word) >5:\n\t\t\t\tif word not in d:\n\t\t\t\t\t# print 'in'\n\t\t\t\t\td[word] = 1\n\t\t\t\telse:\n\t\t\t\t\t# print 'not in'\n\t\t\t\t\td[word] += 1\n\treturn d\n\n\tfp.close()", "def word_tag_counts (count_file):\r\n wordtagcounts = defaultdict(list)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split(\" \")\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0].strip())\r\n tag = fields[2].strip()\r\n word = fields[3].strip()\r\n wordtagcounts[word].append((tag, count)) \r\n f.close() \r\n return wordtagcounts", "def setKeys():\n keywords['c++'] = {}\n with open('cppkeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['c++'][key] = list(words)\n for j in words:\n MyDict.insert(j)\n keywords['py'] = {}\n with open('pykeywords.txt', 'r') as f:\n for i in f:\n i = i.strip('\\n')\n words = map(str, i.split())\n key = words[0]\n words.pop(0)\n keywords['py'][key] = list(words)\n for j in words:\n MyDict.insert(j)", "def read_files(self,corpus):\n\n file = open(corpus)\n markov_dictionary = {}\n word_key = ['None', 'None']\n word_list = []\n lastword = \"\"\n #use for loop to make lines in file a list\n for line in file:\n line = line.strip()\n words = line.split(\" \")\n \n # generate keys\n word_key[0] = lastword\n word_key[1] = words[0]\n \n if lastword:\n markov_dictionary[tuple(word_key)] = self.make_values(corpus, word_key)\n\n i = 0\n while i < len(words) - 1:\n word_key[0] = words[i]\n word_key[1] = words[i + 1]\n \n markov_dictionary[tuple(word_key)] = self.make_values(corpus, word_key)\n\n i += 1\n\n lastword = words[len(words) - 1]\n\n # print \"make_chains\", markov_dictionary\n return markov_dictionary", "def read_word_list(file_name):\r\n\twith open(file_name) as word_list_file:\r\n\t\treturn set(word.strip() for word in word_list_file)", "def load_dictionary(filename, encoding='utf_8', skip=0, max_words=50000):\n\n d = dict()\n with codecs.open(filename, 'r', encoding=encoding) as f:\n line_counter = 0\n index_counter = 1 # we use 1 for the <EOS> symbol in both languages and 0 for <UNK> words\n\n d['<EOS>'] = index_counter\n index_counter += 1\n\n for line in f.readlines():\n\n line_counter += 1\n\n # check if we have to skip something\n if line_counter > skip:\n # split the line\n s = line.split()\n # get word and its index\n # if index > max. number of words, set it to 0\n if index_counter < max_words:\n word = s[0]\n d[word] = index_counter\n index_counter += 1\n return d", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def countWords(words, filename):\n\ttry:\n\t\tfile = codecs.open(filename, \"r\", \"utf8\")\n\t\ttokens = [ string.strip(string.lower(i)) for i in file.read().split() ]\n\t\tfor i in tokens:\n\t\t\twords[i] = words.get(i, 0) + 1\n\t\tfile.close()\n\texcept IOError:\n\t\tprint \"Cannot read from file:\", filename\n\treturn words", "def process_file(self, filename, order=2):\n fp = open(filename)\n self.skip_gutenberg_header(fp)\n\n for line in fp:\n for word in line.rstrip().split():\n self.process_word(word, order)\n\n #print(\">>>DEBUG the suffix map\")\n #i = 0\n #for k,v in self.suffix_map.items():\n # print(\"key is {}, value is {}\".format(k, v))\n # i += 1\n # if i > 10:\n # break", "def create_english_word_list(filename):\n global global_english_word_list\n\n if not global_english_word_list:\n with open(filename) as f:\n for line in f:\n global_english_word_list.append(re.sub(r'\\s+', '', line))", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts" ]
[ "0.7675772", "0.6984068", "0.67051345", "0.65770996", "0.6555971", "0.65203714", "0.6489752", "0.6475122", "0.6467671", "0.6451879", "0.6377066", "0.63083595", "0.63079983", "0.6283576", "0.6268957", "0.6258872", "0.6253571", "0.6214078", "0.61646724", "0.6143819", "0.608474", "0.6073526", "0.6068786", "0.6065902", "0.60613567", "0.6052064", "0.6046372", "0.6042853", "0.60415167", "0.6040906", "0.60295177", "0.59983766", "0.59936625", "0.5988736", "0.5987943", "0.5976255", "0.59729934", "0.5966749", "0.59628665", "0.59528106", "0.5945419", "0.59452385", "0.5942639", "0.59383506", "0.5925877", "0.5908141", "0.5902478", "0.58904344", "0.588735", "0.5849045", "0.58342755", "0.5830125", "0.5798432", "0.57905126", "0.5788999", "0.5785569", "0.5784483", "0.5779273", "0.5775147", "0.5769087", "0.57593477", "0.57565105", "0.5749934", "0.57453054", "0.57395786", "0.5739182", "0.5737352", "0.5734947", "0.57284075", "0.5726179", "0.570873", "0.5707878", "0.57050043", "0.5704106", "0.568509", "0.56668484", "0.566292", "0.56574434", "0.5651551", "0.565081", "0.56492203", "0.56425697", "0.56345403", "0.5618813", "0.56128454", "0.56111825", "0.5608342", "0.56067157", "0.55998313", "0.55942154", "0.55927545", "0.55899787", "0.55746", "0.5572658", "0.5571997", "0.5567334", "0.55641556", "0.55524105", "0.55454993", "0.5521379" ]
0.69579995
2
Read words from input text file (filename) and insert them into the concordance hash table, after processing for punctuation, numbers and filtering out words that are in the stop words hash table. Do not include duplicate line numbers (word appearing on same line more than once, just one entry for that line)
def load_concordance_table(self, filename): self.concordance_table = HashTable(191) try: a = open(filename, "r") lines = a.readlines() a.close() except: raise FileNotFoundError() for n in range(len(lines)): lone = clean(lines[n]) line = lone.split(" ") for i in line: if (i != None) and (self.stop_table.in_table(i) == False) and (i != ""): self.concordance_table.insert(i, n+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_text_file(self, filepath: str):\n with open(filepath) as fh:\n for line in fh:\n for word in re.split('\\W+', line):\n word = word.lower()\n if len(word):\n l = self.hash_map.lookup(word)\n self.hash_map.insert(word, l + 1 if l > 0 else 1)", "def load_concordance_table(self, filename):\n self.concordance_table = HashTable(191)\n with open(filename, 'r') as f:\n for linenum,words in enumerate(f.readlines()):\n for i in words.translate(self.ttable).split():\n i = i.casefold()\n if not self.stop_table.in_table(i):\n self.concordance_table.insert(i,linenum + 1)", "def analyzeFile(filename): \n fileData = open(filename, encoding=\"utf-8\") # open the file\n \n counts = {}\n\n for line in fileData:\t\t # iterates over every line of the file\n words = line.split() # turns each line into a list\n for word in words: #iterates over the words in each line list\n word = word.lower().strip(string.whitespace+string.punctuation)\n if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary\n counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary\n #when it gets here for the first line it goes back up to the top and repeats for the 2nd line\n mostCommonWord = [word]\n leastCommonWord = [word]\n shortestWord = [word]\n longestWord = [word]\n \n for item in counts:\n if counts[mostCommonWord[0]] < counts[item]:\n mostCommonWord = [item]\n elif counts[mostCommonWord[0]] == counts[item]:\n mostCommonWord.append(item)\n if counts[leastCommonWord[0]] > counts[item]:\n leastCommonWord = [item]\n elif counts[leastCommonWord[0]] == counts[item]:\n leastCommonWord.append(item)\n if len(shortestWord[0]) > len(item):\n shortestWord = [item] \n elif len((shortestWord[0])) == len(item):\n shortestWord.append(item)\n if len(longestWord[0]) < len(item):\n longestWord = [item]\n elif len(longestWord[0]) == len(item):\n longestWord.append(item)\n \n return (mostCommonWord, leastCommonWord, shortestWord, longestWord)", "def readFile(filename):\n listOfWords = []\n currentLine = 1\n f = open(filename, \"r\")\n for line in f:\n line = stripPunctuation(line)\n for word in line.split():\n word = word.lower()\n if len(word) > 1:\n if not word[0].isdigit():\n tempObj = contains(listOfWords, word)\n if tempObj != None:\n tempObj.incOccurrence(currentLine)\n else:\n temp = Word(word, currentLine)\n listOfWords.append(temp)\n currentLine = currentLine + 1\n return listOfWords", "def load():\n for line in open(config.filepath, 'r'):\n line = line.strip()\n line_sorted = ''.join(sorted(line))\n\n if line_sorted not in Words.hashed:\n Words.hashed[line_sorted] = []\n\n # Store the real hashed as a list\n # We need line_sorted as the key for fast lookup later\n Words.hashed[line_sorted].append(line)\n\n # Also add the word to a standard list\n # We'll use this to quickly determine wordiness later\n Words.words.append(line)", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500,hash_function_2)\n\n # This block of code will read a file one word as a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # set up index for hash map\n key = w.lower()\n hash = ht._hash_function(key)\n hash_index = hash % ht.capacity\n cur_bucket = ht._buckets[hash_index]\n new_node = cur_bucket.head\n # if key already exists in hash map, find and increment value\n if ht.contains_key(key):\n while new_node is not None:\n if new_node.key == key:\n new_node.value = new_node.value + 1\n new_node = new_node.next\n # else, add key to hashmap with value of 1\n else:\n cur_bucket.add_front(key, 1)\n # make empty list\n list = []\n # add all buckets to list as tuples\n for i in range(ht.capacity):\n bucket = ht._buckets[i]\n if bucket.head is not None:\n new_node = bucket.head\n while new_node is not None:\n list.append((new_node.key, new_node.value))\n new_node = new_node.next\n # Sort list in reverse by key value (word count)\n # Source: https://www.geeksforgeeks.org/python-program-to-sort-a-list-of-tuples-by-second-item/\n list.sort(key = lambda x: x[1], reverse=True)\n # Return list from 0 to user number\n return(list[0:number])", "def train(self, filename):\n with open(filename, 'r') as f:\n phrases_and_words = []\n\n for index, line in enumerate(f):\n # decoding, since input is not unicode\n cleaned_line = self.get_cleaned_line(line.decode('utf-8', 'ignore'))\n\n if cleaned_line:\n phrases_and_words.extend(self.get_phrase_and_words_from_line(cleaned_line))\n\n if index % 10000 == 0:\n self.db_storage.store_phrases_and_words(phrases_and_words)\n phrases_and_words = []\n\n self.db_storage.store_phrases_and_words(phrases_and_words)", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`. It should be left as starter code.\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n # convert word to lowercase to avoid inconsistent hash values\n # due to different cases of the same word.\n w = w.lower()\n\n # check if the current word already exists as a key\n if w in keys:\n current_count = ht.get(w) # fetch the current count for that word\n current_count += 1 # increment count by one\n ht.put(w, current_count) # update value for the key\n else:\n # word does not exist in hash map\n keys.add(w) # add current word to keys set\n ht.put(w, 1) # insert key into hash map with value of 1\n\n # fetch unsorted list of tuples from parsed data\n word_count_list = compile_list(ht, keys)\n\n # sort word count tuple list\n word_count_list = word_count_sort(word_count_list)\n\n # initialize and fill final word list\n final_list = []\n\n for index in range(0, number):\n final_list.append(word_count_list[index])\n\n return final_list", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def load_stop_table(self, filename):\n self.stop_table = HashTable(191)\n with open(filename, 'r') as f:\n for word in f.readlines():\n self.stop_table.insert(word.replace('\\n',''),None)", "def collate(filename):\r\n x=open(filename,\"r\")\r\n total_words=[]\r\n for line in x:\r\n line=line.strip(\"\\n\")\r\n line=line.split(\":\")\r\n if len(total_words)<1:\r\n total_words.append(line)\r\n else:\r\n x= len(total_words)\r\n if line[0] == total_words[x-1][0]:\r\n if int(line[1]) > int(total_words[x-1][len(total_words[x-1])-1]):\r\n total_words[x-1].append(line[1])\r\n else:\r\n total_words.append(line)\r\n y = open(\"collated_ids.txt\", \"w\")\r\n # for i in range(len(total_words)):\r\n # if len(total_words[i])<3:\r\n # total_words[i]=\":\".join(total_words[i])+\"\\n\"\r\n # else:\r\n # id=\" \".join(total_words[i][1:])\r\n # total_words[i]=total_words[i][0]+\":\"+id+\"\\n\"\r\n # y.writelines(total_words)\r\n for i in range(len(total_words)):\r\n id=\"\"\r\n for j in range(1,len(total_words[i])):\r\n id=id +total_words[i][j] +\" \"\r\n y.write(str(total_words[i][0]) + \":\" +str(id) + \"\\n\")", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def word_frequency_in_file(filename):\n words = {}\n fin = open(filename)\n punctuation = string.punctuation\n for line in fin:\n line = line.translate( # Replace punctuation with spaces\n str.maketrans(punctuation, ' ' * len(punctuation)))\n line = line.lower()\n line_words = line.split()\n for word in line_words: # Process each word in the line.\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n return words", "def create_index(path):\n words = {}\n\n for l in open(path):\n linewords = l.strip().split(\" \")\n student = linewords[0]\n linewords = linewords[1:]\n\n for word in linewords:\n if word in words:\n if int(student) not in words[word]:\n words[word].append(int(student))\n else:\n words[word] = [int(student)]\n\n return words", "def load_file(self, file_path):\n f = open(file_path, \"r\")\n sentences = f.readlines()\n \n word_count = 0\n\n for sentence in sentence: \n for word in sentence.strip().split(\" \"):\n if not (word in self.word_id): #word not in dictionary\n word_id[word] = word_count\n word_count += 1\n\n #self.doc = [[self.word_id[word] for word in sentence.strip().split(\" \")] for sentence in sentences]", "def load_word_counts(filename):\n raw_rows = csv_rows(filename)\n word_counts = defaultdict(lambda: 0)\n\n for line_number, raw_row in enumerate(raw_rows, 2):\n count = int(raw_row[\"count\"])\n ipa = raw_row[\"IPA\"]\n if '*' in ipa:\n continue\n\n # Fixes random badness.. hopefully doesn't hide anything?\n mod_ipa = ipa.replace('(', '').replace(')', '')\n\n # Work around a passage with an error in it:\n gloss = raw_row[\"Gloss\"] or raw_row[\"Text\"]\n\n category = raw_row[\"Category\"]\n\n skipword_characters = {'?'}\n try:\n for i, g in izip(mod_ipa.split('/'), gloss.split('/')):\n word = make_word(i, g, category)\n word_counts[word] += count\n except WordParseError as e:\n print (u\"Error on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n except IndexError as e:\n unknown_index = e.args[0]\n if unknown_index in skipword_characters:\n print (u\"Bad char on line %d: %s [%s || %s]\" %\n (line_number, repr(e), ipa, gloss)).encode('utf-8')\n else:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n except:\n print \"FATAL ERROR ON LINE %d\" % line_number\n raise\n return word_counts", "def process_file(filename, skip_header):\n hist = {}\n fp = open(filename, encoding='utf8')\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n\n for line in fp:\n if line.startswith('*** END OF THIS PROJECT'):\n break\n line = line.replace('-', ' ')\n strippables = string.punctuation + string.whitespace\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(strippables)\n word = word.lower()\n\n #update the histrogram\n hist[word] = hist.get(word, 0) + 1\n\n\n return hist", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def read_file(fp):\n scrabble_words_dict = {}\n \n for line in fp:\n line = line.lower()\n line = line.strip()\n if len(line) < 3:\n continue\n elif \"-\" in line or \"'\" in line:\n continue\n else:\n scrabble_words_dict[line] = 1\n return scrabble_words_dict", "def process(filename):\r\n x = open(filename, \"r\")\r\n words_from_songs=[]\r\n for line in x:\r\n array =line.split(\":\")\r\n songid= array[0]\r\n lyrics=array[1]\r\n lyrics=lyrics.replace(\"\\n\", \"\")\r\n lyrics=lyrics.split(\" \")\r\n for i in range(len(lyrics)):\r\n words_from_songs.append((lyrics[i],songid))\r\n words_from_songs=radixSortNumbers(words_from_songs)\r\n max1 = longestWord(words_from_songs)\r\n counting = []\r\n for _ in range(max1+1):\r\n counting.append([])\r\n for k in range(len(words_from_songs)-1,0,-1):\r\n counting[len(words_from_songs[k][0])].append(words_from_songs[k])\r\n new_list = []\r\n # for i in range(len(counting)-1,0,-1):\r\n # for k in range(len(counting[i])):\r\n # new_list.insert(0,counting[i][k])\r\n # for i in range(len(counting) - 1, 0, -1):\r\n # new_list = countingSort(new_list, i - 1)\r\n\r\n for i in range(len(counting)-1,0,-1):\r\n for k in range(len(counting[i])):\r\n new_list.insert(0,counting[i][k])\r\n new_list = countingSort(new_list,i-1)\r\n y = open(\"sorted_words.txt\",\"w\")\r\n for i in range(len(new_list)):\r\n y.write(str(new_list[i][0])+\":\"+str(new_list[i][1]+\"\\n\"))", "def dictionary_creation(filename):\n\tfp = open(filename)\n\td = dict()\n\tfor line in fp:\n\t\t# print line\n\t\tfor word in line.split():\n\t\t\tword = word.strip(string.punctuation + string.whitespace)\n\t\t\t# print word\n\t\t\tif len(word) >5:\n\t\t\t\tif word not in d:\n\t\t\t\t\t# print 'in'\n\t\t\t\t\td[word] = 1\n\t\t\t\telse:\n\t\t\t\t\t# print 'not in'\n\t\t\t\t\td[word] += 1\n\treturn d\n\n\tfp.close()", "def get_word_list(file_name, to_skip_or_not_to_skip):\n fin = open(file_name) #opening file\n histogram={} \n if to_skip_or_not_to_skip == True: #if I want to skip the header this is set to True\n skip_first_part(fin)\n for line in fin: #runs through lines of book file\n line = line.replace(\"-\",\" \") #takes out dashed, underscroes, numbers, whitespaces, and punctuation\n line = line.replace(\"_\",\" \")\n to_remove = string.punctuation + string.whitespace + '0123456789' \n for word in line.split():\n word = word.strip(to_remove) #running through all words in each line \n if word == 'God' or 'Lord':\n pass\n else:\n word = word.lower()\n histogram[word] = histogram.get(word, 0)+1\n return histogram", "def read_words(f, words):\n with open(f) as file:\n for line in file:\n w = tokenizer.tokenize(line.strip())\n for word in w:\n try:\n words[word] += 1\n except:\n words[word] = 1", "def load_wordlist(self, filename):\n reg1 = re.compile(\"^([1-6]{5})[ \\t]+(.*)$\")\n f = open(filename, 'r')\n \n if(self.generate):\n wordlist = []\n reg2 = re.compile(\"^(\\S*)$\")\n for line in f:\n m1 = reg1.match(line)\n m2 = reg2.match(line)\n \n if(m1):\n wordlist.append(m1.group(2))\n elif(m2):\n wordlist.append(m2.group(1))\n \n else:\n wordlist = {}\n for line in f:\n m = reg1.match(line)\n if(m):\n wordlist[int(m.group(1))] = m.group(2)\n \n if((not self.generate and len(wordlist) < 7776) or \n (self.generate and len(wordlist) < 2**13)):\n stderr.write(\"Word list is too short\\n\")\n exit(5)\n \n self.wordlist = wordlist", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def importBrainstormWordsFile(filename):\n #init the list with all words in the file\n allWords = []\n \n #open the brainstorming words file and read the lines\n with open(filename, 'r') as fp:\n lines = fp.read().splitlines()\n \n #split the lines for the idiots that didn't read the instructions and add them to the output\n for curLine in lines:\n if curLine.startswith('Please type one'):\n continue\n cutLines = curLine.replace(',',' ').split()\n \n #cycle the word and add them\n for curWord in cutLines:\n allWords.append(curWord.strip().lower())\n \n return allWords", "def getDictionary(tsvFile, wordIndex, ngram):\r\n \r\n reader = DescriptionReader(tsvFile, wordIndex, ngram)\r\n dictionary = corpora.Dictionary( d for d in reader )\r\n\r\n # remove stop words and words that appear only once\r\n stoplist = [] # might be specified in the furture\r\n stop_ids = [dictionary.token2id[stopword] for stopword in stoplist\r\n if stopword in dictionary.token2id]\r\n once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]\r\n dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once\r\n dictionary.compactify() # remove gaps in id sequence after words that were removed\r\n \r\n return dictionary", "def learn(filename):\n word_dict = {} # Create empty dictionary\n first = None\n prev = None\n with open(filename, 'r', encoding='utf-8') as file:\n for line in file:\n list_words = line.lower().split()\n text = []\n for word in list_words:\n # take out leading and trailing punctuation characters\n words = word.strip(string.punctuation + string.digits)\n word_len = len(words)\n if word_len >= 1:\n text.append(words)\n\n if first is None:\n # Get the first word in the text file\n first = text[0]\n # iterate over text\n if prev:\n text.insert(0, prev)\n for counter, word in enumerate(text):\n if word not in word_dict:\n word_dict[word] = list()\n if counter < (len(text) - 1):\n following = counter + 1\n word_dict[word].append(text[following])\n prev = text[-1]\n return first, word_dict # return a tuple", "def read_dictionary():\n with open(FILE, 'r') as f:\n for vocabulary in f:\n if vocabulary[0].strip() not in dict_txt:\n dict_txt[vocabulary[0].strip()] = [vocabulary.strip()]\n else:\n dict_txt[vocabulary[0].strip()].append(vocabulary.strip())", "def ReadAndTokenize(filename):\n global CACHE\n global VOCABULARY\n if filename in CACHE:\n return CACHE[filename]\n comment = open(filename).read()\n words = Tokenize(comment)\n\n terms = collections.Counter()\n for w in words:\n VOCABULARY[w] += 1\n terms[w] += 1\n\n CACHE[filename] = terms\n return terms", "def file_preprocessing(input_file, output_file):\n # print(\"processing file \" + input_file)z\n # replace the punctuations with space\n replace_punctuation = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n # stemming\n stemmer = PorterStemmer()\n\n with open(input_file, 'r', encoding='utf-8', errors='replace') as inFile, open(output_file,'w') as outFile:\n for line in inFile:\n # replace punctuations\n # convert camel case into space separated\n # convert snake case into space separated\n # remove language keywords\n custom_stopwords = [\"ENDCOND\",\"PVSCL\", \"IFCOND\", \"EVAL\", \"ENDCOND\", \"ELSECOND\", \"ELSEIFCOND\", \"WINDOW\", \"FUNCTION\",\n \"CALLBACK\", \"ABWA\", \"ERROR\", \"TODO\", \"RESOLVE\", \"DOCUMENT\", \"CLASS\", \"LINE\", \"ELEMENT\", \"UTILS\",\n \"NEW\", \"IS\", \"EMPTY\",\"ANNOTATIONS\",\"ANNOTATION\",\"UTILS\",\"CURRENT\",\"TEXT\",\"GET\",\"NAME\",\"LISTERNER\",\n \"ADD\", \"EVENT\", \"CREATE\",\"FOR\", \"FIND\", \"LENGTH\", \"USER\", \"VALUE\", \"ALERT\", \"ALERTS\", \"ID\", \"HANDLER\",\n \"MESSAGE\", \"GROUP\", \"RETRIEVE\", \"MANAGER\", \"LANGUAGE\", \"CONTENT\", \"INIT\"]\n line_witout_puncs = ' '.join([snake_to_spaces(camel_to_spaces(word))\n for word in line.translate(replace_punctuation).split()\n if len(word) >=4 and word not in stopwords.words('english') #and #word.upper() not in (name.upper() for name in custom_stopwords)\n and word not in all_keywords])\n\n\n # stemming\n # singles = []\n # for plural in line_witout_puncs.split():\n # try:\n # singles.append(stemmer.stem(plural))\n # except UnicodeDecodeError:\n # print(plural)\n\n # line_stemmed = ' '.join(singles)\n # print(line_stemmed, file=outFile)\n print(line_witout_puncs.encode(\"utf-8\"), file=outFile)", "def twitter_data(filename, dictionary):\r\n new_data = []\r\n with codecs.open(filename, 'r', 'utf8') as f:\r\n for line in f:\r\n new_line = []\r\n stuff = [x for x in line.lower().split() if\r\n ((has_letter(x) or len(x) >= 1) and keep_word(x, num_words, count_dict))]\r\n for word in stuff:\r\n new_line.append(dictionary.get(word, 1))\r\n if len(new_line) > 0:\r\n new_data.append(new_line)\r\n return new_data", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def buildGraph(file):\r\n dict = {}\r\n graph = Graph()\r\n wfile = open(file,'r')\r\n for line in wfile:\r\n word = line[:-1]\r\n for i in range(len(word)):\r\n bucket = word[:i] + '_' + word[i+1:]\r\n if bucket in dict:\r\n dict[bucket].append(word)\r\n else:\r\n dict[bucket] = [word]\r\n for bucket in dict.keys():\r\n for word1 in dict[bucket]:\r\n for word2 in dict[bucket]:\r\n if word1 != word2:\r\n graph.addEdge(word1,word2)\r\n return graph", "def init(wordlist_filename):\n global WORDS\n if WORDS == None:\n WORDS = []\n bad_line = lambda x: x.strip() == '' or x.startswith('#')\n with codecs.open(wordlist_filename, 'r', 'utf-8') as filehandle:\n lines = filehandle.readlines()\n WORDS = set([x.lower().strip() for x in lines if not bad_line(x)])", "def import_words(file_name):\n with open(file_name) as word_list:\n words = []\n for line in word_list:\n number, word = line.strip().split(\"\\t\")\n words.append(word.strip())\n # print(f\"Imported {(len(word_dict))} words\")\n\n return words", "def tokenize_and_split_bis(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n i = -1\n document = 0\n terms = 0\n new_document = True\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n document += 1\n new_document = True\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n list3.append(1)\n list4.append(1)\n new_document = False\n terms += 1\n else : \n i = i - 1\n w.append(dic[word])\n list4[dic[word]] += 1\n terms += 1\n if new_document: \n list3[dic[word]] += 1\n new_document = False\n \n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n\n moy = 0\n len_dic = len(dic.keys())\n list5 = [0 for x in range(len_dic)]\n for key in dic.keys():\n if list4[dic[key]] > 0:\n tf = list4[dic[key]] / terms\n idf = math.log(document / list3[dic[key]])\n tfIdf = tf * idf\n list5[dic[key]] = tfIdf\n # print(\"the word \" + str(key) + \" appairs \" + str(list4[dic[key]]) + \" times.\")\n # print(\"his frequency is \" + str(list4[dic[key]] / terms) )\n # print(\"the word \" + str(key) + \" appairs \" + str(list3[dic[key]]) + \" times in each document.\")\n # print(\"his frequency is \" + str(idf))\n # print(\"utility \" + str(tfIdf))\n moy += tfIdf\n \n moy = moy / len_dic \n # print(moy)\n dic_bis = {}\n i = -1\n for key in dic.keys():\n value = list5[dic[key]]\n # print(str(value))\n if (value > oracle * moy):\n i += 1\n dic_bis[key] = i\n # else:\n # print(\"not pass \" + key + \" \" + str(value))\n \n \n # print(dic_bis == dic)\n # print(dic)\n return dic_bis,list1,list2", "def load_dictionary(hash_table, filename):\n\n file = open(filename)\n lines = file.readlines()\n start = timeit.default_timer()\n for line in lines:\n hash_table.insert(line.rstrip(),1)\n if timeit.default_timer() - start > 4:\n break\n file.close()", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def word_counts(file):\n words = defaultdict(int)\n regex = re.compile('[' + string.punctuation + ']')\n for line in open(file):\n for word in [regex.sub('', w) for w in line.lower().split()]:\n words[word] += 1\n\n return words", "def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def spell_file(fn, wordcost, maxword):\n\n def infer_spaces(s):\n \"\"\"Uses dynamic programming to infer the location of spaces in a string\n without spaces.\"\"\"\n global unfolded\n if s in unfolded:\n return unfolded[s]\n\n # Find the best match for the i first characters, assuming cost has\n # been built for the i-1 first characters.\n # Returns a pair (match_cost, match_length).\n def best_match(i):\n candidates = enumerate(reversed(cost[max(0, i-maxword):i]))\n return min((c + wordcost.get(s[i-k-1:i], 9e999), k+1) for k,c in candidates)\n\n # Build the cost array.\n cost = [0]\n for i in range(1,len(s)+1):\n c,k = best_match(i)\n cost.append(c)\n\n # Backtrack to recover the minimal-cost string.\n out = []\n i = len(s)\n while i>0:\n c,k = best_match(i)\n assert c == cost[i]\n out.append(s[i-k:i])\n i -= k\n \n unfolded[s] = ' '.join(reversed(out))\n return ' '.join(reversed(out))\n\n\n\n speller = aspell.Speller('lang', 'en')\n for w in slang:\n speller.addtoSession(w)\n \n with open(tweet_tmp1_dir + fn, 'r') as fin:\n with open(tweet_tmp2_dir + fn, 'w') as fout:\n res = []\n for l in fin:\n prefix = ''\n if 'test' in fn:\n comma = l.find(',')\n prefix = l[:comma].strip()\n l = l[comma+1:]\n try:\n assert(prefix.isdigit())\n except:\n print(prefix, l)\n prefix += ','\n \n ll = ''\n \n ws = [w for w in l.strip().split(' ') if len(w) > 0]\n for w in ws:\n if w in correct_word:\n nw = correct_word[w]\n elif (w.startswith('<') and w.endswith('>')) or w in whitelist or speller.check(w):\n nw = w\n else:\n try:\n nw1, nw2 = speller.suggest(w)[:2]\n nwdist1 = jellyfish.levenshtein_distance(w,nw1)\n nwdist2 = jellyfish.levenshtein_distance(w,nw2)\n \n if nw2.count(' ') < nw1.count(' ') or (nwdist1 > MAX_DIST_CORRECTION and nwdist2 < nwdist1) :\n nw1 = nw2\n nwdist1 = nwdist2\n if nwdist1 <= MAX_DIST_CORRECTION:\n nw = nw1.lower()\n else:\n nw = w.lower()\n except:\n nw = infer_spaces(w)\n if nw.count('.') >= nw.count(' ')/3:\n nw = nw.replace('.', '')\n elif nw.count('-') >= nw.count(' ')/3:\n nw = nw.replace('-', '')\n nw = nw.replace(' ', ' ').lower()\n ll += nw + ' '\n correct_word[w] = nw\n res.append(prefix+ll.strip())\n# fout.write(prefix+ll.strip()+'\\n')\n fout.write('\\n'.join(res))", "def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def load_wordlist(filename):\n # YOUR CODE HERE\n words = {}\n f = open(filename, 'rU')\n text = f.read()\n text = text.split('\\n')\n for line in text:\n words[line] = 1\n f.close()\n return words", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def create_dictionary(filename):\n\tword_set = set()\n\tif os.path.isfile(filename):\n\t\twith open(filename, 'r') as f:\n\t\t\tfor line in iter(f):\n\t\t\t\tword_set.add(line.strip('\\n'))\n\telse:\n\t\tprint \"File not found!\"\n\treturn word_set", "def read_dictionary():\n global dic\n with open(FILE, 'r') as f:\n for line in f:\n word_list = line.split()\n word = word_list[0].strip()\n dic.append(word)", "def read_data(filename,words):\n try:\n f = open(filename)\n reader = f.read().splitlines()\n for line in reader:\n #print(line[0])\n words.add(line.lower())\n f.close()\n except IOError:\n print 'Input file reading failed,'\n return words", "def read_dictionary():\n\tglobal dictionary\n\twith open(FILE, \"r\") as f:\n\t\tfor words in f:\n\t\t\tdictionary += words.split()", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def __init__(self):\n stopwords_file = open(self.filepath, \"r\")\n for line in stopwords_file.readlines():\n line2 = line.replace(\"\\n\", \"\") \n self.add(line2)", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def load_words(file_path: str) -> List[Word]:\n \n words = load_words_raw(file_path)\n \n \n words = remove_stop_words(words)\n\n \n words = remove_duplicates(words)\n \n return words", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def create_dict(fd):\n # initialize an empty dictionary\n full_dict = {}\n # loop through file\n for line in fd:\n # lowercase everything in line, then split line into a list\n line = line.lower().split()\n # loop through elements in the list of words in the splitted line\n for word in line:\n # strip words from puncuation using string module\n word = word.strip(string.punctuation)\n # if words contains only alphabatic characters and of length > 1\n if word.isalpha() and len(word)!= 1:\n if len(word) in full_dict:\n full_dict[len(word)].add(word)\n else:\n full_dict[len(word)] = set()\n full_dict[len(word)].add(word)\n return full_dict", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def print_word_freq(file):\n# Opening file to be read\n with open(file, \"r\") as f:\n file_contents = f.read()\n\n\n# # Taking away punctuation and lowercase all words\n word_list = file_contents.lower().replace(',',' ').replace('.',' ').replace('!',' ').split()\n # print(word_list)\n\n nice_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n nice_list.append(word)\n # print(nice_list)\n\n d = {}\n for word in nice_list:\n if word not in d.keys():\n d[word] = 1\n else:\n d[word] += 1 \n # print(sorted(d, key=d.get, reverse=True)\n # sorted(d, key=d.get, reverse=true)\n # print(d)\n\n # for word in sorted(d):\n # print((word, d[word]), end = \" \")\n\n d_filtered = sorted(d, key=d.get, reverse=True)\n for x in d_filtered:\n print(x, d[x])", "def get_words(txtfile):\n\n global _wordset\n global _postrie\n\n f = open(txtfile,'r')\n _wordset = set([x.lower() for x in set(f.read().split()) \\\n if not re.match('.*[\\W,\\d]|^.$',x)])\n\n #print('building suffix trie')\n _postrie = trienode(pre = False)\n _postrie.grow(_wordset)\n\n # Since this will be recursed through later, take care of it now.\n if len(_wordset) > sys.getrecursionlimit():\n sys.setrecursionlimit(len(_wordset))", "def loadBrainstormingCorrectAnswersFile( filename ):\n #read the file and init the output struct\n with open(filename, 'r') as fp:\n lines = fp.readlines()\n synonymTable = {}\n curCategory = ''\n \n for curLine in lines:\n #skip empty lines and lines that start with # as they are comments\n curLine = curLine.strip().lower()\n if not curLine or curLine.startswith('#'):\n continue\n \n #the > symbol indicates a new category all other lines are synonys for this cateogry\n if curLine.startswith('>'):\n curCategory = curLine[1:].strip()\n synonymTable[curCategory] = [curCategory]\n continue\n \n synonymTable[curCategory].append(curLine)\n \n return synonymTable", "def get_analyze_per_file(self):\n \"\"\"Exclude tags, exclude binary (img), count words without non literal characters and digits\"\"\"\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n df_tmp = pd.DataFrame(columns=['word', 'cnt', 'word_low'])\n w_cnt = 0\n word_counter = {}\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n for word in word_list:\n\n if word not in word_counter:\n word_counter[word] = 1\n else:\n word_counter[word] = word_counter[word] + 1\n w_cnt += 1\n\n for word, occurance in word_counter.items():\n df_tmp = df_tmp.append({'word': '{:15}'.format(word), 'cnt': '{:3}'.format(occurance),\n 'word_low': '{:15}'.format(word).lower()}, ignore_index=True)\n df_tmp = df_tmp.sort_values(by='word_low')\n df_tmp.loc[(df_tmp.word != df_tmp.word_low), 'word'] = df_tmp.cnt\n df_tmp.loc[(df_tmp.word == df_tmp.cnt), 'cnt'] = 0\n df_tmp.loc[(df_tmp.word == df_tmp.word_low), 'word'] = 0\n df_tmp['word'] = df_tmp.word.astype(int)\n df_tmp['cnt'] = df_tmp.cnt.astype(int)\n df_tmp = df_tmp.groupby(['word_low'])['cnt', 'word'].sum().reset_index()\n conn = sqlite3.connect('for_python_ht.db')\n try:\n try:\n sqlite_for_ht.CreateTableSingle.delete_table(f_3, self.filename)\n print(datetime.now(), '-', self.filename, 'Table deleted at the start point')\n except Exception:\n print(datetime.now(), '-', 'Something went wrong')\n traceback.print_exc()\n df_tmp.to_sql(name=self.filename, con=conn, index=False)\n print(datetime.now(), '-', self.filename, 'Table created and filled with data')\n except Exception:\n print(datetime.now(), '-', 'file with name {} already exists'.format(self.filename))\n traceback.print_exc()\n print(datetime.now(), '-', 'word analyse for', self.filename, 'done')\n sqlite_for_ht.HandleTemp.update_table(f_2, 'status', 'Done', self.filename)\n return None", "def convert_from_text(self, file_name):\n with open(file_name, 'r') as reader:\n words_list = []\n for line in reader:\n words_list.extend(line.split())\n\n for word in set(words_list):\n if word.isalpha():\n self.insert_word(word.lower())\n else:\n self.insert_word(''.join([c for c in word if c.isalpha()]).lower())", "def create_B_words(path_to_pairs,\n path_to_librispeech_text,\n path_to_phonemes,\n path_save,\n freq_sim,\n len_sim,\n edit_sim):\n for i in range(len(path_to_pairs)):\n \n pairs = []\n dic_cl_eq = {} # Classe d'equivalence pour le sens des mots\n \n with open(path_to_pairs[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n pairs.append(line)\n if line[0] in dic_cl_eq:\n dic_cl_eq[line[0]].add(line[1])\n else:\n dic_cl_eq[line[0]] = {line[1]}\n if line[1] in dic_cl_eq:\n dic_cl_eq[line[1]].add(line[0])\n else:\n dic_cl_eq[line[1]] = {line[0]}\n \n dic_cl_eq_prev = {}\n while dic_cl_eq_prev != dic_cl_eq:\n dic_cl_eq_prev = copy.deepcopy(dic_cl_eq)\n for word in dic_cl_eq:\n for syn in dic_cl_eq[word]:\n dic_cl_eq[word] = set.union(dic_cl_eq[word], dic_cl_eq[syn])\n \n with open(path_to_librispeech_text) as f:\n text_librispeech = f.read()\n text_librispeech_split = text_librispeech.replace('\\n', ' ').split(' ')\n freq_libri = {}\n for word in text_librispeech_split:\n if word in dic_cl_eq:\n if word in freq_libri:\n freq_libri[word] += 1\n else:\n freq_libri[word] = 1\n \n phonemes = []\n with open(path_to_phonemes[i]) as f:\n for line in f:\n line = line.replace('\\n', '').split(' ')\n phonemes.append(line)\n \n dic_word_phonemes = {}\n for j in range(len(pairs)):\n dic_word_phonemes[pairs[j][0]] = phonemes[j][0]\n dic_word_phonemes[pairs[j][1]] = phonemes[j][1]\n \n file = open(path_save[i], 'w+')\n file.truncate(0)\n \n for j in range(len(pairs)):\n A, X = pairs[j]\n B_0 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > (1-len_sim)*len(A)) and \\\n (len(word) < (1+len_sim)*len(A)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_0.append(word)\n line_0 = ' '.join([A, X] + B_0)\n \n X, A = pairs[j]\n B_1 = []\n for word in dic_cl_eq:\n if word not in dic_cl_eq[A]:\n if np.abs(np.log(freq_libri[word])/np.log(freq_sim) \\\n - np.log(freq_libri[A])/np.log(freq_sim)) <= 1:\n if (len(word) > np.around((1-len_sim)*len(A), decimals=2)) and \\\n (len(word) < np.around((1+len_sim)*len(A), decimals=2)):\n p_A = dic_word_phonemes[A]\n p_X = dic_word_phonemes[X]\n p_word = dic_word_phonemes[word]\n if np.abs(dist(p_A, p_X) - dist(p_X, p_word)) < edit_sim:\n B_1.append(word)\n line_1 = ' '.join([A, X] + B_1)\n \n if max(len(B_0), len(B_1)) == 0:\n print(X, A)\n \n line = line_0 if len(line_0) > len(line_1) else line_1\n if j < len(pairs) - 1:\n line += '\\n'\n file.write(line)\n \n file.close()", "def process_dict(text, frequency_threshold):\n\n # Trying to load previous unique_words (pickle file)\n UNIQUE_WORDS_PICKLE = \"unique_words_with_frequency_\" + str(frequency_threshold) + \".pickle\"\n \n unique_words = None\n if os.path.isfile(UNIQUE_WORDS_PICKLE):\n try:\n with open(UNIQUE_WORDS_PICKLE, 'r') as f:\n unique_words = pickle.load(f)\n except:\n os.remove(UNIQUE_WORDS_PICKLE)\n unique_words = None\n\n if (type(unique_words) == list):\n return unique_words\n\n\n WORD_COUNT_PICKLE = \"word_count.pickle\"\n WORD_COUNT = 253855\n\n print(\"Processing dictionary. This will take a while.\")\n\n # Trying to load previous word_count (pickle file)\n word_count = None\n if os.path.isfile(WORD_COUNT_PICKLE):\n try:\n with open(WORD_COUNT_PICKLE, 'r') as f:\n word_count = pickle.load(f)\n if len(word_count) != WORD_COUNT:\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n except:\n raise\n os.remove(WORD_COUNT_PICKLE)\n word_count = None\n\n # count words\n if word_count == None:\n print(\"Pickle file not found. Counting word occurence...\")\n\n # grab all the words\n words = text.split(\" \")\n\n # counting word occurence\n word_count = dict(Counter(words).most_common())\n \n # saving word count for future reuse\n with open(WORD_COUNT_PICKLE, 'w') as f:\n pickle.dump(word_count, f)\n print(\"Word count saved for future reuse.\")\n \n # making sure we have the correct count loaded\n assert(type(word_count) == dict)\n assert(len(word_count) == WORD_COUNT)\n\n # remove the duplicates and single-character words.\n unique_words = [w for w in word_count.keys() if len(w) > 1]\n vocab_size = len(unique_words)\n print(\"Vocab size:\", vocab_size)\n\n # remove words with frequency lower than 1%\n unique_words = [word for word in unique_words if float(word_count[word]) / vocab_size > frequency_threshold]\n print(\"Vocab size (>%.3f%% frequency): %d\" % ((frequency_threshold * 100), len(unique_words)))\n\n unique_words.sort(key=lambda word: len(word), reverse=True)\n unique_words.append('a')\n unique_words.append('i')\n\n # save unique words for future reuse\n with open(UNIQUE_WORDS_PICKLE, 'w') as f:\n pickle.dump(unique_words, f)\n print(\"unique_words saved for future reuse.\")\n\n return unique_words", "def load_cows(filename):\r\n print(\"Loading words from file...\")\r\n # inFile: file\r\n inFile = open(filename, 'r')\r\n # wordlist: list of strings\r\n wordlist = {}\r\n for line in inFile:\r\n cow = line.split(',')\r\n wordlist[cow[0]] = int(cow[1]) # 0: name, 1: weight\r\n inFile.close()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def get_word_freq(filein):\n freq = {}\n\n # Open file handles with context manager\n with open(filein) as f:\n\n # Read a single line at a time so as not to crush memory\n for line in f:\n\n # Tokenize and iterate\n for word in line.split():\n\n # Use try/except instead of if/then for performance\n # Likely after the first 1M tweets that the key will be contained\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\n return freq", "def word_list():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip().lower()\n d[word] = True\n return d", "def set_words(data_path):\n w_df = pd.read_csv(data_path, names=['es','gn','syn1','syn2'], encoding='iso-8859-1') # file -i\n gn_df = w_df[['gn','syn1','syn2']].drop_duplicates()\n gn_lst = gn_df['gn'].tolist()+gn_df['syn1'].tolist()+gn_df['syn2'].tolist()\n cleanedList = [x for x in gn_lst if str(x) != 'nan' and len(x)>=3]\n gn_set = set(cleanedList)\n \n print(len(gn_set))\n \n f = open(data_path[:-4]+\".txt\", 'w')\n for w in gn_set:\n f.write('{}\\n'.format(w))\n f.close()\n \n return list(gn_set)", "def fill_words_table(self, statistics, path, filemoving, conn, logg, parser):\n logg.writing_log(conn, 'Starting filling words table')\n c = conn.cursor()\n val1 = statistics.book_name(path, filemoving, parser).replace(' ', '_')\n sql1 = \"CREATE TABLE \" + val1 + \" (word text, count integer, count_uppercase integer)\"\n c.execute(sql1)\n val2 = statistics.frequency(path, filemoving, parser)\n sql2 = \"INSERT INTO \" + val1 + \" VALUES(?,?,?)\"\n for key, value in val2.items():\n if not key.istitle():\n c.execute(sql2, (key, value, (0 if val2.get(key.capitalize()) == None else val2.get(key.capitalize()))))\n logg.writing_log(conn, 'Words table is filled')\n conn.commit()", "def generate_input_with_unknown_words(file_path):\r\n\tseen_tuples = []\r\n\tlabel_matches = dict()\r\n\tfile_lines = []\r\n\twith open(file_path) as f:\r\n\t\tfor line in f:\r\n\t\t\tfile_lines = file_lines + [line.lower().split()]\r\n\t\tword_tuples = zip(file_lines[0::3], file_lines[1::3], file_lines[2::3])\r\n\t\tfor (words, part_of_speech, word_type) in word_tuples:\r\n\t\t\ttype_tuple = zip(words, word_type)\r\n\t\t\tfor word_and_tag in type_tuple:\r\n\t\t\t\tif word_and_tag in seen_tuples:\r\n\t\t\t\t\tlabel_matches.update({word_and_tag : (label_matches.get(word_and_tag, 0) + 1)})\r\n\t\t\t\telse:\r\n\t\t\t\t\ttag = word_and_tag[1]\r\n\t\t\t\t\tunknown_entry = (\"<UNK>\", tag)\r\n\t\t\t\t\tlabel_matches.update({unknown_entry : (label_matches.get(unknown_entry, 0) + 1)})\r\n\t\t\t\t\tseen_tuples.append(word_and_tag)\r\n\treturn label_matches", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def uniquewords(self):\n vas = set({})\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.add(s_i)\n l_i = list(vas)\n self.print(l_i)\n self.write(l_i)\n logging.debug(\"Starting with to\")\n return l_i", "def read_data(file_path):\n words=[]\n dic_word={}\n actual_text=[]\n for line in open(file_path,encoding='utf-8'):\n words_line=line.strip().split(' ')\n for ite in words_line:\n if ite not in dic_word:\n dic_word[ite]=1\n words.extend(words_line)\n actual_text.append(words_line)\n\n\n #with zipfile.ZipFile(file_path) as f:\n #words = tf.compat.as_str(f.read(f.namelist()[0])).split()\n\n return words,len(dic_word),actual_text", "def get_words(f, letters):\n # lettrs = []\n # okay = True\n # words = []\n # nline = ''\n # with open(f, 'r') as vocabulary:\n # for line in vocabulary.readlines():\n # nline = line.replace(\"\\n\", \"\").lower()\n # if 4 <= len(nline) <= 9 and letters[4] in nline:\n # lettrs = list(nline)\n # for lettr in lettrs:\n # if lettr not in letters:\n # okay = False\n # break\n # else:\n # okay = True\n # if okay is True:\n # words.append(nline)\n #\n # lettrs = copy.copy(letters)\n # nwords = []\n # okay = True\n # for word in words[::1]:\n # lettrs = copy.copy(letters)\n # for letter in word:\n # if letter in lettrs:\n # lettrs[lettrs.index(letter)] = '0'\n # else:\n # okay = False\n # break\n # if okay is True:\n # nwords.append(word)\n # okay = True\n #\n # unique = True\n # words = []\n # for word in nwords:\n # if nwords.count(word) > 1:\n # nwords.remove(word)\n # nwords.sort()\n # return nwords\n res = []\n cort_letters = []\n our_letters = []\n res = []\n f = open(f, 'r')\n for line in f:\n line = line.replace(\"\\n\", \"\").strip().lower()\n if 4 <= len(line) <= 9:\n if letters[4] in line:\n count = 0\n for each_letter in line:\n if each_letter in letters:\n count += 1\n if count == len(line):\n our_letters.append(line)\n f.close()\n for each_word in our_letters:\n count_let = 0\n for each_letter in each_word:\n if each_word.count(each_letter) <= letters.count(each_letter):\n count_let += 1\n if count_let == len(each_word):\n res.append(each_word)\n for each in res:\n if res.count(each) > 1:\n res.remove(each)\n return sorted(res)", "def add_encryptors(word):\r\n\r\n assert isinstance(word, str), 'Strings only!'\r\n if word == \"\": return None\r\n file = open(r'words.txt', 'r')\r\n for line in file:\r\n first_word = line.split()[0]\r\n if word == first_word:\r\n print('Error, word is already added to the list!')\r\n return 'Error, word is already added to the list!'\r\n file.close()\r\n\r\n\r\n new_encryption = str(create_encryptors())\r\n blank = True\r\n while blank == True:\r\n file = open(r'words.txt', 'r')\r\n blank = False\r\n for line in file:\r\n if new_encryption == line.split()[2]:\r\n new_encryption = str(create_encryptors())\r\n blank = True\r\n file.close()\r\n break\r\n file.close()\r\n\r\n\r\n if len(word) < 4: tabs = 3\r\n elif len(word) <8: tabs = 2\r\n else: tabs = 1\r\n\r\n file = open(r'words.txt', 'a')\r\n file.write(word + '\\t'*tabs + \"= \" + new_encryption + \"\\n\")", "def words_occur():\n\n # Get the file name from keyboard\n # file_name = input(\"Enter the the file name: \")\n file_name = 'Untitled.txt'\n\n # File open, read and save in the word_list\n f = open(file_name, 'r')\n word_list = f.read().split()\n f.close()\n\n # Get the unic words inclusion's number\n occurs_dict = {}\n for word in word_list[:10]: # test on first ten\n if word.isalpha: # why isalpha isn't working?\n print(word)\n # Increment the counter\n occurs_dict[word] = occurs_dict.get(word, 0) + 1\n\n # Present results\n print(\"File %s has %d words (%d are unique)\" \\\n % (file_name, len(word_list), len(occurs_dict)))\n List = [occurs_dict.values].sort(reverse=True) # how to sort counts?\n print(List)\n print(occurs_dict)", "def _read_words(self, path):\r\n\r\n word_file = open(path)\r\n for line in word_file.readlines():\r\n pair = line.split('::')\r\n self.insert(pair[0], pair[1].rstrip())\r\n word_file.close()", "def parse_file(input_lst):\n word_dct = {}\n for line in input_lst:\n raw_output = line.split() # these are lists of strings\n for str_ in raw_output: # strings\n str_ = str_.lower()\n str_ = str_.replace(\"-\", \" \")\n str_ = str_.replace(\"?\", \"\")\n str_ = str_.replace(\"!\", \"\")\n str_ = str_.replace(\",\", \"\")\n str_ = str_.replace(\"\\'\", \"\")\n str_ = str_.replace('\\\"', \"\")\n str_ = str_.replace(\".\", \"\")\n if str_ not in word_dct:\n word_dct[str_] = 1\n else:\n word_dct[str_] += 1\n return word_dct", "def __line_parse(index: int, line: list, dictionary: dict, word_list: list):\n\n if index + 2 >= len(line):\n return\n word_1 = line[index + 2]\n word_2 = line[index + 1]\n word_3 = line[index]\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_1\"): {\n\n },\n str(word_1 + \"_2\"): {\n\n },\n str(word_1 + \"_3\"): {\n\n }\n }\n if word_2 not in dictionary:\n dictionary[word_2] = {\n str(word_2 + \"_1\"): {\n\n },\n str(word_2 + \"_2\"): {\n\n },\n str(word_2 + \"_3\"): {\n\n }\n }\n if word_3 not in dictionary:\n dictionary[word_3] = {\n str(word_3 + \"_1\"): {\n\n },\n str(word_3 + \"_2\"): {\n\n },\n str(word_3 + \"_3\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n if word_2 not in word_list:\n word_list.append(word_2)\n if word_3 not in word_list:\n word_list.append(word_3)\n \"\"\" word_3 word_2 word_1\"\"\"\n if word_2 not in dictionary[word_1][str(word_1 + \"_1\")]:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = 1\n else:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = dictionary[word_1][str(word_1 + \"_1\")][word_2] + 1\n if word_3 not in dictionary[word_1][str(word_1 + \"_2\")]:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = 1\n else:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = dictionary[word_1][str(word_1 + \"_2\")][word_3] + 1\n if word_3 not in dictionary[word_2][str(word_2 + \"_1\")]:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = 1\n else:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = dictionary[word_2][str(word_2 + \"_1\")][word_3] + 1\n if index + 3 >= len(line) or line[index + 3] == \"\":\n return\n word_0 = line[index + 3]\n if word_0 not in dictionary:\n dictionary[word_0] = {\n str(word_0 + \"_1\"): {\n\n },\n str(word_0 + \"_2\"): {\n\n },\n str(word_0 + \"_3\"): {\n\n }\n }\n\n if word_0 not in word_list:\n word_list.append(word_0)\n\n if word_3 not in dictionary[word_0][str(word_0 + \"_3\")]:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = 1\n else:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = dictionary[word_0][str(word_0 + \"_3\")][word_3] + 1", "def make_words(self,lm):\n if \" \" in self.corpus[0] and \" \" in self.corpus[1]: \n print \"assuming BLICK\"\n self.corpus = [convert_to_disc(i) for i in self.corpus]\n else:\n self.disc = 1\n print \"assuming Disc\" \n if not os.path.isfile(self.f): ##check if it already exists\n print \"generating 10 million words\"\n outfile = open(self.f, \"w\")\n outfile.write(\"word,blick,ngram,Real,T,disc\\n\")\n for word in self.corpus:\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Real\", \"1\")\n while len(self.wordlist)<10000000: \n words = lm.generate(100)\n for word in words:\n if word not in self.wordlist and len(word) < 9: #keep only words less than len9\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Simulated\", \"0\")\n self.wordlist[word] = 0\n return", "def refine_tokens( self, tokens ):\n k = 1.75\n b = 0.75\n stop_words_file = \"stop_words.txt\"\n all_stopwords = list()\n refined_tokens_sources = dict()\n \n # collect all the stopwords\n with open( stop_words_file ) as file:\n lines = file.read()\n all_stopwords = lines.split( \"\\n\" )\n \n for source in tokens:\n refined_tokens = dict()\n files = dict()\n inverted_frequency = dict()\n file_id = -1\n total_file_length = 0\n for item in tokens[ source ]:\n file_id += 1\n file_tokens = tokens[ source ][ item ].split(\" \")\n if source in \"name_desc_edam_help\":\n file_tokens = utils._clean_tokens( file_tokens, all_stopwords )\n total_file_length += len( file_tokens )\n term_frequency = dict()\n for token in file_tokens:\n if token is not '':\n file_ids = list()\n if token not in inverted_frequency:\n file_ids.append( file_id )\n else:\n file_ids = inverted_frequency[ token ]\n if file_id not in file_ids:\n file_ids.append( file_id )\n inverted_frequency[ token ] = file_ids\n # for term frequency\n if token not in term_frequency:\n term_frequency[ token ] = 1\n else:\n term_frequency[ token ] += 1\n files[ item ] = term_frequency\n N = len( files )\n average_file_length = float( total_file_length ) / N\n # find BM25 score for each token of each tool. It helps to determine\n # how important each word is with respect to the tool and other tools\n for item in files:\n file_item = files[ item ]\n file_length = len( file_item )\n for token in file_item:\n tf = file_item[ token ]\n # normalize the term freq of token for each document\n tf = float( tf ) / file_length\n idf = np.log2( N / len( inverted_frequency[ token ] ) )\n alpha = ( 1 - b ) + ( float( b * file_length ) / average_file_length )\n tf_star = tf * float( ( k + 1 ) ) / ( k * alpha + tf )\n tf_idf = tf_star * idf\n file_item[ token ] = tf_idf\n # filter tokens based on the BM25 scores and stop words. Not all tokens are important\n for item in files:\n file_tokens = files[ item ]\n tokens_scores = [ ( token, score ) for ( token, score ) in file_tokens.items() ]\n sorted_tokens = sorted( tokens_scores, key=operator.itemgetter( 1 ), reverse=True )\n refined_tokens[ item ] = sorted_tokens\n tokens_file_name = 'tokens_' + source + '.txt'\n token_file_path = os.path.join( os.path.dirname( self.tools_data_path ) + '/' + tokens_file_name )\n with open( token_file_path, 'w' ) as file:\n file.write( json.dumps( refined_tokens ) )\n file.close()\n refined_tokens_sources[ source ] = refined_tokens\n return refined_tokens_sources", "def process_raw_phrases(file_path):", "def load_input_word_list(file_path):\n if not os.path.isfile(file_path):\n return False\n\n word_list = list()\n\n with open(file_path, 'r') as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n\n data = line.split(' ')\n text = data[0].lower().strip(Setting.NONWORD_CHARACTERS)\n\n if not text:\n continue\n\n text = text.replace('_', ' ')\n\n score = float(data[1])\n\n if score < 0:\n kind = WordKindEnum.NEG\n else:\n kind = WordKindEnum.POS\n\n word = Word(text, score, kind)\n word_list.append(word)\n\n return word_list", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def word_count(filename):\n \n word_counts = {}\n\n with open(filename) as file_:\n for line in file_:\n # strip white space\n words = line.split()\n # iterate over words and strip excess punctutation then add to dict\n for word in words:\n word = word.strip(\",.\\\";:?_!\").lower()\n word_counts[word] = word_counts.get(word, 0) + 1\n\n # print list of words and count\n for word, count in word_counts.iteritems():\n print \"{} {}\".format(word, count)", "def read_file(filename):\n print(\"Reading dictionary: \" +filename)\n word_dict = set()\n\n dictionary = open(filename)\n\n # Read each word from the dictionary\n for word in dictionary:\n # Remove the trailing newline character\n word = word.rstrip('\\n')\n\n # Convert to lowercase\n word = word.lower()\n\n word_dict.add(word)\n\n dictionary.close()\n\n return word_dict", "def process_line(line, hist):\n # replace hyphens with spaces before splitting\n line = line.replace('-', ' ')\n wordlist=[]\n\n for word in line.split():\n # remove punctuation and convert to lowercase\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n\n wordlist.append(word)\n # update the histogram\n #hist[word] = hist.get(word, 0) + 1\n return wordlist", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"infile\", help=\"Text file to be analyzed.\")\n args = parser.parse_args()\n with open(args.infile, encoding=\"utf-8\") as f:\n text = f.read()\n words = text.split()\n unique_words(words)", "def analyze_file(file_contents):\r\n load_dict = [];\r\n for a in file_contents:\r\n load_dict.append((char_score(a)[0][0],a))# should \r\n with open(\"./scores.txt\",'w') as scr:\r\n scr.write(str(load_dict)); \r\n return load_dict;", "def buildCorpus(self, filename, stopwords_file=None):\n with open(filename, 'r') as infile:\n # use pattern.subs\n # doclines = [line.rstrip().lower().split(' ') for line in infile]\n doclines = [self.help_clean(line) for line in infile]\n n_docs = len(doclines)\n self.vocab = list({v for doc in doclines for v in doc})\n if stopwords_file:\n with open(stopwords_file, 'r') as stopfile:\n stops = stopfile.read().split()\n self.vocab = [x for x in self.vocab if x not in stops]\n self.vocab.sort()\n self.documents = []\n for i in range(n_docs):\n self.documents.append({})\n for j in range(len(doclines[i])):\n if doclines[i][j] in self.vocab:\n self.documents[i][j] = self.vocab.index(doclines[i][j])", "def tokenize_and_split(sms_file):\n \n dic = {}\n list1 = []\n list2 = []\n i = -1\n ham = True\n for line in open(sms_file, 'r').readlines():\n w = []\n for word in line.split():\n i = i + 1\n if word == \"ham\":\n ham = True\n i = i - 1\n elif word == \"spam\":\n ham = False\n i = i - 1\n else:\n if word not in dic:\n dic[word] = i\n w.append(dic[word])\n else : \n i = i - 1\n w.append(dic[word])\n if ham and w !=[]:\n list2.append(w)\n elif ham == False and w !=[]:\n list1.append(w)\n \n return dic,list1,list2", "def load_train_word_dict():\n train_dict = {}\n with open(TRANSCRIPTION_PATH) as file:\n for line in file:\n if int(line[0:3]) < 300:\n word_id, transcript = str.split(line, \" \")\n train_dict[word_id] = transcript.rstrip('\\n')\n return train_dict", "def read_data(filename, n_words):\n with open(filename) as f:\n filter_set = set()\n unsorted_res = []\n words = []\n count = []\n for line in f:\n word = line.strip()\n if len(word) == 0:\n continue\n word_idx_list = [int(idx) for idx in word.split(',')]\n filter_set.add(tuple(word_idx_list))\n words.append(tuple(sorted(word_idx_list)))\n words_counter = collections.Counter(words)\n most_common_words = dict()\n most_common_words_counter = words_counter.most_common(n_words)\n for item in most_common_words_counter:\n most_common_words[item[0]] = item[1]\n\n unsorted_res = dict()\n for w in filter_set:\n sorted_tuple = tuple(sorted(list(w)))\n if sorted_tuple in most_common_words:\n unsorted_res[w] = most_common_words[sorted_tuple]\n \n del words\n del count\n del filter_set\n del most_common_words\n\n return unsorted_res", "def add_cpd_synonyms_from_datafile(filename = '/Users/wbryant/work/cogzymes/data/SEED/SEED_met_table.csv'):\n \n \n num_syns_added = 0\n num_syns_tested = 0\n \n source = Source.objects.get(name='seed')\n \n met_db_dict = get_model_dictionary(Metabolite, 'id')\n syn_met_dict = get_synonym_met_dict()\n \n counter = loop_counter(count_lines(filename), 'Adding CPD IDs to database')\n \n f_in = open(filename, 'r')\n for line in f_in:\n counter.step()\n add_cpd_to_synonyms(line, source, syn_met_dict, met_db_dict)\n \n counter.stop()", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def generate_input(file_path):\r\n\tlabel_matches = dict()\r\n\tfile_lines = []\r\n\twith open(file_path) as f:\r\n\t\tfor line in f:\r\n\t\t\tfile_lines = file_lines + [line.lower().split()]\r\n\t\tword_tuples = zip(file_lines[0::3], file_lines[1::3], file_lines[2::3])\r\n\t\tfor (words, part_of_speech, word_type) in word_tuples:\r\n\t\t\ttype_tuples = zip(words, word_type)\r\n\t\t\tfor word_and_tag in type_tuples:\r\n\t\t\t\tlabel_matches.update({word_and_tag : (label_matches.get(word_and_tag, 0) + 1)})\r\n\treturn label_matches", "def common_words_safe(filename, min_chars):\n wordPattern = re.compile('[a-zA-Z]{' + str(min_chars) + ',}')\n occurance = dict()\n try:\n with open(filename, 'r') as f:\n contents = f.read()\n except IOError as e:\n print \"IOError {0}: {1}\".format(e.errno, e.strerror)\n return\n words = wordPattern.finditer(contents)\n for wordMatch in words:\n word = wordMatch.group(0).lower()\n if word in occurance:\n occurance[word] += 1\n else:\n occurance[word] = 1\n return sorted(occurance.items(), key=lambda item:item[1], reverse=True)" ]
[ "0.70385915", "0.6925145", "0.6899971", "0.6768178", "0.6651934", "0.6420912", "0.6312901", "0.6296871", "0.6231026", "0.6214866", "0.62069726", "0.6178269", "0.616501", "0.615068", "0.61372495", "0.6134684", "0.612722", "0.61159056", "0.6110232", "0.6060298", "0.6052872", "0.6024224", "0.60239846", "0.59918666", "0.5982138", "0.5981788", "0.5977596", "0.5972763", "0.59598434", "0.59594", "0.5954592", "0.5954314", "0.5950341", "0.5945586", "0.5939405", "0.592284", "0.59227294", "0.5921953", "0.5901857", "0.58915174", "0.58891827", "0.588435", "0.58632594", "0.5839867", "0.5836892", "0.58325803", "0.5831235", "0.5831054", "0.58259505", "0.58204377", "0.58100015", "0.5801213", "0.5798646", "0.57879025", "0.5783743", "0.5783632", "0.5777174", "0.5773674", "0.576629", "0.5762148", "0.5750121", "0.57435894", "0.5742887", "0.5738712", "0.573655", "0.57355934", "0.573217", "0.57258767", "0.572451", "0.5724229", "0.572188", "0.57089233", "0.5708431", "0.5706453", "0.5705184", "0.5672568", "0.5665531", "0.5659035", "0.56569177", "0.56409246", "0.5640634", "0.563733", "0.5634866", "0.563245", "0.56282216", "0.56178486", "0.5599717", "0.55991083", "0.55925417", "0.5591636", "0.55891705", "0.558677", "0.55867386", "0.5585658", "0.5582212", "0.557763", "0.55756855", "0.5572795", "0.5568265", "0.5555953" ]
0.65844196
5
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())", "def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)", "def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def write_conll(conll_file, sents):\n with codecs.open(conll_file, mode = 'w', errors = 'ignore', encoding = 'utf-8') as ofile:\n for sent in sents:\n if sent:\n for element in sent:\n word = element[0]\n tag = element[1]\n ofile.write(str(tag) + '\\t' + str(word) + '\\n')\n ofile.write('\\n')", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return", "def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)", "def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def write_crf_input(out_file, sentences, poss, lemmas, concepts):\n\n print '\\n\\tWrite out data in crf compliant format'\n f = open(out_file, 'w+')\n for position_i in range(len(sentences)):\n for position_j in range(len(sentences[position_i])):\n f.write(\n sentences[ position_i ][ position_j ] + '\\t' +\n poss[ position_i ][ position_j ] + '\\t' +\n lemmas[ position_i ][ position_j ] + '\\t' +\n concepts[ position_i ][ position_j ]\n + '\\n'\n )\n f.write('\\n')\n f.close()\n print '\\t--done'", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')", "def write_dialogue_to_file(utterances, dialogue_index, filename):\n with open(filename, 'a') as file:\n for sentence_index in range(len(utterances[dialogue_index][0])):\n file.write('{0} {1}\\n'.format(utterances[dialogue_index][0][sentence_index],\n utterances[dialogue_index][1][sentence_index]))", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_to_file(info, mode='w', file=\"output4.txt\"):\n with open(file, mode, encoding='utf-8') as f:\n for line in info:\n f.write(' '.join(map(str, line)) + '\\n')", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def write_conll(cls, filename, writer, document_id, sentences):\n with open(filename, 'w') as fd:\n writer.write(fd, document_id, sentences)", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)", "def writeAlltoFile(self):\n with open(self._fname, 'w') as f:\n for elem in self.getAll():\n line = self._writeGratoLine(elem)\n f.write(line + \"\\n\")\n f.close()", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def writeCentrality(filename, data):\n\n filePath = os.path.join(CONFIG['CENTRALITIES_PATH'], filename)\n f = open(filePath, \"w\")\n\n data = {k: v for k, v in sorted(\n data.items(), key=lambda x: x[1], reverse=True)}\n\n for k, v in data.items():\n text = f\"{k:<4}\\t{v:.6f}\\n\"\n f.write(text)", "def write(self, taxonomy, output_file):\n\n fout = open(output_file, 'w')\n for genome_id, taxa in taxonomy.items():\n fout.write(genome_id + '\\t' + ';'.join(taxa) + '\\n')\n fout.close()", "def write(filename):\n print(uc.write(filename))", "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "def write_output(self, output_file):\n\t\t# Create csv file header.\n\t\theader = ['Cohort', 'Customers',]\n\t\tfor start, end in self.day_ranges:\n\t\t\tday_range_str = '{}-{} days'.format(start, end)\n\t\t\theader.append(day_range_str)\n\n\t\twith open(output_file, 'wb') as fh:\n\t\t\twriter = csv.writer(fh)\n\t\t\twriter.writerow(header)\n\t\t\tfor cohort, cohort_value in self.output.items():\n\t\t\t\twriter.writerow(\n\t\t\t\t\tself.build_row(cohort, cohort_value)\n\t\t\t\t)", "def write_cif_file(self, file_name):\n cif_writer = CifWriter(self.dna_structure)\n cif_writer.write(file_name, self.infile, self.informat )", "def writeOutputToFile(self, expanded_acronyms, file_path):\n output_file = open(file_path, \"w\")\n if expanded_acronyms:\n for acronym in sorted(expanded_acronyms.keys()):\n output_file.write(\n acronym + \",\" + str(self._getExpansion(expanded_acronyms[acronym])) + \"\\n\")\n else:\n output_file.close(string_error_no_results_to_show)\n output_file.close()", "def write_output(word_dict):\n # create an empty output.txt file\n output = open('output.txt', 'w')\n\n for i in words_dict: \n output.write(i + \" : \" + str(words_dict[i]) + \"\\n\")", "def writexyz(self,fname):\n xyzfile = open(fname + \".xyz\",\"a+\")\n xyzfile.write(str(self.natoms) + \"\\n\\n\")\n for a in self.atoms:\n \tcxyz = a.xyz - np.array(self.pbc_correction(a.xyz))\n\t\t\txyzfile.write(str(a.type) + \"\\t\" + str(cxyz[0]) + \"\\t\" + str(cxyz[1]) + \"\\t\" + str(cxyz[2]) + \"\\n\")\n xyzfile.close()", "def writeIntrons(self, filenameout):\n printed = {}\n with open(filenameout, \"w\") as out:\n for features in self.feature_dictionary:\n intro = self.feature_dictionary[features].getIntrons()\n print(self.feature_dictionary[features].introns)\n for introns in intro:\n if \"-\".join([str(i) for i in introns]) not in printed:\n out.write(self.feature_dictionary[features].chromosome_name + \"\\t\"\n + str(introns[0]) + \"\\t\" + str(introns[1]) + \"\\t\" + self.feature_dictionary[features].strand + \"\\n\")\n printed[\"-\".join([str(i) for i in introns])] = 0", "def write_collected(self, names_file, kb_file, cat_file):\n with open(names_file, 'w') as fp:\n for kb_id, name in self.collected_names.items():\n fp.write('\\t'.join(['name', kb_id, name]) + '\\n')\n with open(kb_file, 'w') as fp:\n for kb_id, tail_set in self.collected_edges.items():\n for (rel, tail_id) in tail_set:\n fp.write('\\t'.join([rel, kb_id, tail_id]) + '\\n')\n with open(cat_file, 'w') as fp:\n for c, ms in self.collected_cat_mems.items():\n fp.write(c + '\\t' + self.kb[c].name + '\\t')\n fp.write('|'.join(ms) + '\\n')", "def write_data_file(output_file: str, companies: list):\n with open(output_file, \"w\") as f:\n # s = \"\\n\".join(companies)\n for i in range(len(companies)):\n for k in range(10):\n for j in range(len(companies[i].data[k])):\n s = f\"{i},{companies[i].data[k][j][0].__str__()},{companies[i].data[k][j][1]}\\n\"\n f.write(s)", "def WriteOutput(self, rows, fileName, access='wb'):\n \n outputFile = open(fileName, access)\n try: \n outputFile.write(self.GetBanner())\n csv.writer(outputFile, dialect='excel-tab').writerows(rows)\n print 'Wrote secondary output to: %s' %(fileName) \n except IOError:\n print 'Error writing output to: %s' %(fileName) \n finally:\n outputFile.close()", "def write_cadnano_file(self, file_name):\n cadnano_writer = CadnanoWriter(self.dna_structure)\n cadnano_writer.write(file_name)", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def write_output(self, failed_genes):\r\n file_prefix = self.file_name.strip('sambamba_output.txt')\r\n fieldnames = ['GeneSymbol;Accession', 'percentage30']\r\n with open (f'../results/{file_prefix}.coverage_output.csv', 'w', newline = '') as output:\r\n csvwriter = csv.DictWriter(output, fieldnames=fieldnames)\r\n csvwriter.writeheader()\r\n csvwriter.writerows(failed_genes)", "def writecontactstocsv(self , contact_entries):\n rx = re.compile('\\W+')\n allcontacts = []\n for entry in contact_entries:\n if entry.name is not None and len(entry.phone_number) > 0 and len(entry.group_membership_info) > 0:\n\n # Clean up characters in contact name; replace all non-alphanumerics with spaces\n fullname = entry.name.full_name.text\n fullname = rx.sub(' ', fullname).strip()\n for rawPhoneNumber in entry.phone_number:\n # Remove non-numeric characters from the phone number\n phone_number = re.sub(\"[^0-9]\", \"\", rawPhoneNumber.text)\n # Save contact for later insert\n allcontacts.append((fullname, phone_number))\n\n allcontacts = tuple(set(allcontacts))\n\n csvfilename = \"Downloads/ContactExport\"+time.strftime(\"%Y%m%d-%H%M%S\")+\".csv\"\n csvfile = open(csvfilename, \"w\")\n for csvFullName, csvPhoneNumber in allcontacts:\n line = \"\\\"%s\\\",%s\\n\" % (csvFullName, csvPhoneNumber)\n csvfile.write(line)\n\n csvfile.close()", "def build_catalog(filename):\n\n write_to_file(filename)", "def write_output(self):", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def write_to_file(self, filename: str) -> None:", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def to_cheetah_file(self, filename):\n translate.write_cheetah(self, filename)\n return", "def writetofile(invertedindex, filename):\n file = open(filename + '.txt', 'w', encoding='utf-8')\n for word in invertedindex.keys():\n file.write(word)\n file.write(' : ')\n for docid in invertedindex[word][0]:\n file.write(str(docid) + ' ')\n file.write('\\n')", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def write_gct_file(output_file, class_names, class_counts, expression_matrix):\n total_genes = len(expression_matrix)\n first_key = list(expression_matrix.keys())[0]\n total_samples = len(expression_matrix[first_key])\n\n headers = ['NAME', 'DESCRIPTION']\n\n for c_name, c_count in zip(class_names, class_counts):\n for i in range(c_count):\n headers.append('{}_{}'.format(c_name, i + 1))\n\n with open(output_file, 'w') as f:\n f.write('#1.2\\n')\n f.write('{} {}\\n'.format(total_genes, total_samples))\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for g_name, values in expression_matrix.items():\n f.write(g_name)\n f.write('\\tna\\t')\n f.write('\\t'.join(\n ['{0:.2f}'.format(v) for v in values]\n ))\n f.write('\\n')", "def corpusWriter(self):\n with open('corpus.txt', 'w') as file:\n for quote in self.quotes:\n file.write(quote + '\\n')", "def create_chceckfile(artist_list):\n with open(\"Udemy_Course/Object_Oriented_Programing_and_Classes/OOP_Song_Class/checkfile.txt\", \"w\") as checkfile:\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format\n (new_artist, new_album, new_song), file=checkfile)", "def write(read_file):\n princesses = filter_by_status(read(read_file))\n princesses = sort_by_status(princesses)\n princesses = sort_by_place(princesses)\n\n file = open(\"princesses_to_save.txt\", \"w\")\n for word in header:\n for something in word:\n file.write(\"{:20}\".format(something))\n file.write(\"\\n\")\n for i in range(len(princesses)):\n str1 = princesses[i]\n for word in str1:\n file.write(\"{:20}\".format(word))\n if i != len(princesses) - 1:\n file.write(\"\\n\")", "def write_csv(self, outputfile):\n d = csv.writer(outputfile, quoting=csv.QUOTE_ALL)\n for row in self.translations.iteritems():\n d.writerow(row)", "def write_candidates_file(self, min_count, stops, tags, filename):\n filename = os.path.join(filename)\n candidates = self.candidates(min_count, stops, tags)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n for wordi, wordj in candidates:\n file.write(\"{} {}\\n\".format(wordi, wordj))\n print(\"Success: Candidates written to '{}'\".format(filename))", "def handle(self, *args, **options):\n self.stdout.write('exporting corpus to text file')\n basetext = '\\n'.join([x.text_str for x in BaseText.objects.all() if x.check_age()])\n with open(os.path.join(BASE_DIR, 'corpus.txt'), 'w') as f:\n f.write(basetext)", "def write_to(self, filepath):\n output = self._generate_output()\n with open(filepath, 'wb') as out:\n out.write(output.encode('utf-8'))\n out.write(b'<!-- handrolled for excellence -->\\n')", "def write(self, file):\n\n # Initialize output buffer\n out = ''\n\n # Print specification\n for key, value in self.specification.items():\n out += f'{key} : {value}\\n'\n\n # Print the tour\n if self.tour:\n out += 'TOUR_SECTION\\n'\n for s in self.tour:\n out += str(s) + '\\n'\n out += '-1\\n'\n\n # Append EOF\n out += 'EOF\\n'\n\n # Write to file\n with open(file, 'w') as f:\n f.write(out)", "def output(self, filename):\n with open(filename, 'w') as f:\n op = {}\n layer_res = []\n alphas_res = []\n for layer in self._layers:\n weights = []\n alphas = []\n for neuron in layer._neurons:\n weights.append(neuron._weights)\n alphas.append(neuron._alpha)\n layer_res.append(weights)\n alphas_res.append(alphas)\n op['layers'] = layer_res\n op['alphas'] = alphas_res\n json.dump(op, f, indent='\\t')", "def _write_ce_collector_attributes_file(self, attributes_file):\n attributes_file_contents = (\n \"# Do not edit - file generated by osg-configure\\n\"\n + self.ce_attributes_str + \"\\n\"\n )\n return utilities.atomic_write(attributes_file, attributes_file_contents)", "def write_domains_to_file_by_category(self, file_name, row):\n with open(\"output/\"+file_name+\".csv\", \"a+\") as f:\n f.write(row[self.domain_label] + \",\" + row[self.category_label] + \"\\n\")", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def write(self, filename):\n f = open(filename, 'w')\n f.write(str(self.m) + \"\\n\")\n f.write(str(self.n) + \"\\n\")\n for i in self.values:\n for j in i:\n f.write(str(j)+\"\\n\")\n f.closed", "def write_analysis_details(self, csvfile):\n #filepath, total words, line count, most common word\n f = open(csvfile, 'w')\n most_common = self.most_common()\n f.write('filepath,total words,line count,most common word\\n')\n f.write(f'{self.filepath},{self.word_count()},{self.sentence_count()},{self.most_common()[0]}')\n f.close()", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def __export_file(self, filename, output):\n outfile = open(filename, \"w\")\n outfile.write(output)\n outfile.close\n print(\"Output written to file: \" + filename + \"\\n\")", "def write_exact_graph_to_file(self, output_file):\n print(\"Writing output file.\")\n with open(output_file, 'w') as f:\n f.write(\"# graph number = 0 name = interval_graph\\n\")\n f.write(str(len(self.vertices)) + \"\\n\")\n for node in self.vertices:\n for arc in self.out_arcs_lists[node]:\n s = self.arc_info[arc]['start']\n t = self.arc_info[arc]['destin']\n w = self.arc_info[arc]['weight']\n f.write(\"{} {} {}\\n\".format(s, t, w))", "def whriteInOuput(finalOutput):\n\n os.chdir(\"D:/IIHT/Python/Project/NRPT all companies scrapper/caches\")\n #open text file, return an object of type io.TextIOWrapper\n with open(\"Companies Website.txt\", \"w\") as writ:\n #write each line in the object op, return an object of type int\n writ.write('\\n'.join(finalOutput) + \"\\n\")", "def WriteFile(self, filename) :\n\n # open file for writing:\n f = open(filename, 'w')\n\n ## loop over key/value pairs:\n #for k,v in self.iteritems():\n # # add line; at least the specified number of characters \n # # is used for the key:\n # f.write( '%-20s:%s\\n' % (k,v) )\n ##endfor\n\n # write processed input:\n f.writelines(self.outfile)\n \n # close file:\n f.close()", "def output(owners, filename):\n\n out = open(filename, 'wb')\n writer = csv.writer(out)\n writer.writerow([\n 'Property Address',\n 'License Type',\n 'House',\n 'Street',\n 'License / Folio number',\n 'Civic address',\n 'Business name 1',\n 'Business name 2',\n 'Mail address 1',\n 'Mail address 2',\n 'Total Assess',\n 'Included Assess',\n 'Ann Chg',\n 'Unit'\n ])\n\n for owner in owners:\n owner.output_to(writer)", "def write_po(self, outputfile):\n raise NotImplementedError(\n \"Writing to this file format is not yet implemented\")", "def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()", "def write_output(output,fasta,CDR1_pos,CDR2_pos):\n # fasta file is the igblast input file\n with open(output, 'w') as f:\n header = \"\\t\".join(['Name', 'CDRL1_kabat_AA', 'CDRL2_kabat_AA'])\n f.write(header + '\\n')\n for record in SeqIO.parse(fasta, \"fasta\"):\n ID = str(record.id)\n seq = str(record.seq)\n CDR1_aa=''\n CDR2_aa = ''\n CDR1_index = CDR1_pos[ID]\n CDR2_index = CDR2_pos[ID]\n if CDR1_index != []:\n CDR1_start, CDR1_end = fix_aa_pos((int(CDR1_index[0]) - 1), int(CDR1_index[1]))\n CDR1_nuc = seq[CDR1_start:CDR1_end]\n CDR1_aa = translation(CDR1_nuc)\n if CDR2_index != []:\n CDR2_start, CDR2_end = fix_aa_pos((int(CDR2_index[0]) - 1), int(CDR2_index[1]))\n CDR2_nuc = seq[CDR2_start:CDR2_end]\n CDR2_aa = translation(CDR2_nuc)\n f.write(\"\\t\".join([ID, CDR1_aa, CDR2_aa]) + '\\n')", "def storePatternsInFile(self, outFile):\n self.oFile = outFile\n writer = open(self.oFile, 'w+')\n for x, y in self.finalPatterns.items():\n patternsAndSupport = str(x) + \":\" + str(y)\n writer.write(\"%s \\n\" % patternsAndSupport)", "def _write_interactions_to_file(self, results, writer):\n for index_pair, interactions in results.items():\n repetition = 0\n for interaction, results in interactions:\n\n if results is not None:\n (\n scores,\n score_diffs,\n turns,\n score_per_turns,\n score_diffs_per_turns,\n initial_cooperation,\n cooperations,\n state_distribution,\n state_to_action_distributions,\n winner_index,\n ) = results\n for index, player_index in enumerate(index_pair):\n opponent_index = index_pair[index - 1]\n row = [\n self.num_interactions,\n player_index,\n opponent_index,\n repetition,\n str(self.players[player_index]),\n str(self.players[opponent_index]),\n ]\n history = actions_to_str([i[index] for i in interaction])\n row.append(history)\n\n if results is not None:\n row.append(scores[index])\n row.append(score_diffs[index])\n row.append(turns)\n row.append(score_per_turns[index])\n row.append(score_diffs_per_turns[index])\n row.append(int(winner_index is index))\n row.append(initial_cooperation[index])\n row.append(cooperations[index])\n\n states = [(C, C), (C, D), (D, C), (D, D)]\n if index == 1:\n states = [s[::-1] for s in states]\n for state in states:\n row.append(state_distribution[state])\n for state in states:\n row.append(\n state_to_action_distributions[index][(state, C)]\n )\n row.append(\n state_to_action_distributions[index][(state, D)]\n )\n\n row.append(\n int(cooperations[index] >= cooperations[index - 1])\n )\n\n writer.writerow(row)\n repetition += 1\n self.num_interactions += 1", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write_index_to_file(output_file, items): \n \n file = open(output_file, 'w')\n for item in items: \n str0 = str(item[0])\n str1 = ' '.join(str(x) for x in item[1])\n file.write( str0 + ' ' + str1 + '\\n') \n # file.write(item)\n print ('An inverted index has been writted in file')\n file.close()", "def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))", "def write_to_file(self):\n\t\twith self.driver.session() as session:\n\t\t\ttry:\n\t\t\t\tfile_name = None\n\t\t\t\tdonor_file_name = None\n\t\t\t\tfull_file_name = None\n\t\t\t\tcomplete_file_name = None\n\t\t\tcyph = \"MATCH (d:Donor) RETURN d.email\"\n\t\t\tresult = session.run(cyph) \n\t\t\t\n\t\t\tfor donor in result:\n\t\t\t\tdonor_file_name = donor['email']\n\t\t\t\tfull_file_name = \"{}.txt\".format(donor_file_name)\n\t\t\t\tcomplete_file_name = os.path.join(os.getcwd(), full_file_name)\n\t\t\t\tletter = self.letter_to_file(donor_full_name,total_donations)\n\t\t\t\twith open(complete_file_name, 'w+') as f:\n\t\t\t\t\tf.write(letter)\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Error occurred. See below\")\n\t\t\t\tprint(e)\n\t\t\t\tprint('\\n')\n\n\tdef letter_to_file(self, donor):\n\t\tstr_letter = \"Dear {},\\n\\tThank you for your kind donation(s).\\n\\tIt will be put to very good use.\\n\\t\\tSincerely,\\n\\t\\t\\t-The Team\".format(donor)\n\t\treturn str_letter\n\n\tdef show_donors(self):\n\t\t\"\"\"\n\t\tLists the donor names\n\t\t\"\"\"\n\t\twith self.driver.session() as session:\n\t\t\tstr_build = \"\"\n\t\t\ttry:\n\t\t\t\tcyph = \"\"\"\n\t\t\t\tMATCH (d:Donor)\n\t\t\t\tRETURN d.full_name as full_name, d.email as email\n\t\t\t\t\"\"\"\n\t\t\t\tresult = session.run(cyph)\n\t\t\t\tfor record in result:\n\t\t\t\t\tstr_build += record['full_name'] + ' -- ' + record['email'] + '\\n'\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Error occurred. See below.\")\n\t\t\t\tprint(e)\n\t\treturn str_build", "def write(self, fname):\n pass", "def compDataWriter(sentences, predictions, output_file):\n assert len(sentences) == len(predictions), \"Missing predictions for sentences!\"\n lines = list()\n for k in range(len(sentences)):\n assert len(sentences) == len(predictions), \"Missing tag predictions for words!\"\n sentence = sentences[k]\n tags = predictions[k]\n line_list = [sentence[i]+TAGCHAR+tags[i] for i in range(len(sentence))]\n line = WHITESPACE.join(line_list)\n lines.append(line)\n assert len(lines) == len(sentences), \"Missing tagged sentence!\"\n with open(output_file, 'w') as file:\n file.write(\"\\n\".join(lines))", "def write_annotations(self, output_file):\n logging.info(self._header)\n np.savetxt(output_file, self._zeroes, header=\" \".join(self._header),fmt='%i',comments='')", "def write_to_file(content, filename):\n if not os.path.isfile(filename): # Checking if file already exists, don't append data if it does.\n for j in range(len(content)): # For each dialog in dialogues array.\n with open(filename, 'a') as file: # Open a text file in append mode and write data into it.\n for k in range(len(content[j][0])):\n file.write('{0} {1}\\n'.format(str(content[j][0][k]).lower().split(\"(\")[0],\n str(content[j][1][k])).lower())", "def write(self, outputfile):\n outfile = open(outputfile, 'w')\n if (outputfile.lower().endswith('.po')):\n self.write_po(outfile)\n elif (outputfile.lower().endswith('.json')):\n self.write_json(outfile)\n elif (outputfile.lower().endswith('.xml')):\n self.write_properties(outfile)\n outfile.close()", "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "def writeCADFile(self, filename):\n valid_filetypes = [\"brep\", \"bstl\", \"egads\", \"egg\", \"iges\", \"igs\", \"sens\", \"step\", \"stl\", \"stp\", \"tess\", \"grid\"]\n file_extension = filename.split(\".\")[-1]\n if file_extension.lower() not in valid_filetypes:\n raise OSError(\n \"CAD filename \"\n + filename\n + \" must have a valid exension. \"\n + \"Consult the EngineeringSketchPad docs for the DUMP function\"\n )\n if self.comm.rank == 0:\n modelCopy = self.espModel.Copy()\n n_branches, _, _ = modelCopy.Info()\n modelCopy.NewBrch(\n n_branches, modelCopy.GetCode(\"dump\"), \"<none>\", 0, filename, \"0\", \"0\", \"0\", \"\", \"\", \"\", \"\", \"\"\n )\n modelCopy.Build(0, 0)" ]
[ "0.7794726", "0.66742295", "0.64932483", "0.64526165", "0.6379942", "0.63655496", "0.63634735", "0.62910575", "0.6240714", "0.6233921", "0.6233921", "0.6233921", "0.61785156", "0.61412483", "0.61257005", "0.610843", "0.6082861", "0.60720426", "0.6064205", "0.60603034", "0.59847915", "0.5953382", "0.5949586", "0.59256744", "0.59232116", "0.59232116", "0.5918855", "0.5918259", "0.591524", "0.59104925", "0.5906709", "0.59009737", "0.58934045", "0.58857846", "0.58828205", "0.5871059", "0.58642375", "0.58520657", "0.5847941", "0.58440024", "0.5836401", "0.5835469", "0.58210695", "0.580742", "0.57888246", "0.5781007", "0.5777897", "0.5768409", "0.5767276", "0.5766805", "0.57632726", "0.5750538", "0.57470477", "0.5733779", "0.573351", "0.572666", "0.5726504", "0.57206035", "0.5718034", "0.57089895", "0.5704777", "0.5703387", "0.56983274", "0.56963384", "0.56868637", "0.5682352", "0.56822103", "0.56810313", "0.5677822", "0.5676366", "0.5673577", "0.56727034", "0.5670391", "0.5669521", "0.5668411", "0.566751", "0.5666728", "0.5647113", "0.56437045", "0.5642565", "0.5636676", "0.56290835", "0.5627325", "0.5618395", "0.5613953", "0.56119215", "0.5611875", "0.5611426", "0.560923", "0.5607486", "0.5606954", "0.560589", "0.560346", "0.5602905", "0.5598933", "0.55940706", "0.5592159", "0.55870605", "0.557992", "0.557991" ]
0.7876976
0
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory.
def factor_circulant_matrix(x, k): n=len(x) return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K", "def K(self):\n\n # Calculate and return the stiffness matrix in global coordinates\n return matmul(matmul(inv(self.T()), self.k()), self.T())", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)", "def calc_big_K(T, n_factors, tau, var_n, out=None):\n if out is None:\n K = np.zeros((T * n_factors, T * n_factors))\n else:\n K = out\n for delta_t in range(T):\n diag = calc_K(tau, delta_t, var_n)\n diag = np.tile(diag, T - delta_t)\n idxs_0 = np.arange(0, (T - delta_t) * n_factors)\n idxs_1 = np.arange(delta_t * n_factors, T * n_factors)\n K[idxs_0, idxs_1] = diag\n K[idxs_1, idxs_0] = diag\n return K", "def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)", "def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator", "def ckm(i,j):\n if i >= 1 and i <= 3 and j >= 1 and j <= 3:\n return _ckm_abs[i-1, j-1]\n else:\n raise(ValueError('Wrong generation index in CKM matrix: ({},{}).'.format(i,j)))", "def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def expansion_matrix_c(self):\n row = np.zeros(0)\n nnz = 0\n col = np.arange(nnz, dtype=np.int)\n data = np.zeros(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def _Kdiag(self, X):\r\n return self.mapping.f(X).flatten()**2", "def matrix_K1(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- d21(l, zt), d23(l, xt)))\n row2 = np.array((- d41(l, zt), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def k(self):\n return add(self.k_b(), self.k_m())", "def _compute_kTable(self, expand=False, factor=False, simplify=False):\n if self._has(\"k\"):\n return\n if self._has(\"p\"):\n k = tuple(self._.p[0, i, i] for i in range(self._.d + 1))\n else:\n if not self._has(\"P\"):\n self.eigenmatrix(expand=expand, factor=factor,\n simplify=simplify)\n k = tuple(integralize(x) for x in self._.P[0])\n assert k[0] == 1, \\\n \"the valency of the first relation is not 1\"\n self._.k = k", "def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def _knn_matrix(x, k=16, self_loop=True):\n x = x.transpose(2, 1).squeeze(-1)\n batch_size, n_points, n_dims = x.shape\n if self_loop:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k)\n else:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k+1)\n nn_idx = nn_idx[:, :, 1:]\n center_idx = torch.arange(0, n_points).repeat(batch_size, k, 1).transpose(2, 1)\n center_idx = center_idx.to(x.device)\n return torch.stack((nn_idx, center_idx), dim=0)", "def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)", "def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def make_mat_cp_le(cons_pot_mesh, lin_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == lin_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_le_DL_terms(K, cons_pot_mesh, lin_geo_mesh)\n add_cp_le_RBM_terms(K, cons_pot_mesh, lin_geo_mesh)\n return K", "def fastdiag_solver(KM):\n dim = len(KM)\n n = tuple(K.shape[0] for (K,_) in KM)\n EV = [scipy.linalg.eigh(_asdense(K), _asdense(M)) for (K,M) in KM]\n\n diags = []\n for d in range(dim):\n D = [np.ones(n[j]) for j in range(dim)]\n D[d] = EV[d][0] # eigenvalues\n diags.append(reduce(np.kron, D))\n diag = sum(diags)\n\n l_op = KroneckerOperator(*tuple(U for (_,U) in EV))\n r_op = KroneckerOperator(*tuple(U.T for (_,U) in EV))\n\n return l_op * DiagonalOperator(1.0 / diag) * r_op", "def make_mat_cp_qe(cons_pot_mesh, quad_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == quad_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_qe_DL_terms(K, cons_pot_mesh, quad_geo_mesh)\n add_cp_qe_RBM_terms(K, cons_pot_mesh, quad_geo_mesh)\n return K", "def bc_outgoing_mat(n, h, k):\n \n d = [1.0, 2.0j*k*h]\n i = [n-1, n-1]\n j = [n-2, n-1]\n return scipy.sparse.coo_matrix((d, (i, j)))", "def cntd_phi_k_class_coeff( L, m, k ):\n \n s_tkm2 = m + 2*(L - 1)\n if s_tkm2 % (k-2) != 0: return 0 \n s = s_tkm2/(k-2)\n\n if s<0:\n return 0\n\n A = [ [ phi_k_cc(sp, mp, k) for mp in range(m+1) ] for sp in range(s+1) ]\n Alog = lLog( A )\n \n return Alog[s][m]", "def generator_matrix(self):\n self.generator_mat = np.zeros((self.k, self.n), dtype=int)\n A_matrix = np.ones((self.k, self.n-self.k), dtype=int)\n\n identity_i = np.identity(self.k, dtype=int)\n self.generator_mat[:, :self.k] = identity_i\n\n # This loop edits the A_matrix to make the column vectors linearly ind.\n for x in range(self.n-self.k):\n A_matrix[x, x] = 0\n\n self.generator_mat[:, self.k:] = A_matrix\n\n# for i in range(self.k):\n# print(self.generator_mat[i,:])\n\n return self.generator_mat", "def init_tau(k, p, q): # whereas k = k + 4\n tau_matrix = np.zeros((k, k))\n for row in range(2, k - 2):\n tau_matrix[row][row + 1] = 1\n tau_matrix[0][1] = q\n tau_matrix[0][k - 2] = 1 - q\n tau_matrix[1][1] = 1 - p\n tau_matrix[1][2] = p\n tau_matrix[k - 2][k - 2] = 1 - p\n tau_matrix[k - 2][k - 1] = p\n tau_matrix[-1][-1] = 1\n tau_matrix = mf.log_marix(tau_matrix)\n return tau_matrix", "def get_couplingmatrix(self,lmax,nwins):\n #store sqrt of taper power in 'tapers' array:\n if nwins>self.nwins: nwins = self.nwins\n tapers = np.zeros( (self.nl, nwins) )\n for itaper in range(nwins):\n tapers[:,itaper] = np.sqrt(SHPowerSpectrum(self._coeffs(itaper)))\n\n #compute coupling matrix of the first nwins tapers:\n coupling_matrix = SHMTCouplingMatrix(lmax,tapers[:,:nwins])\n return coupling_matrix", "def _canonical_kr(B, C):\n n, p = B.shape\n m, pC = C.shape\n A = np.zeros((n * m, p))\n for k in range(B.shape[-1]):\n A[:, k] = np.kron(B[:, k], C[:, k])\n return A", "def calc_k_dot_r(self):\n\t\n\tself.k_dot_r = self.k[0]*self.rij[0,:,:,:] + self.k[1]*self.rij[1,:,:,:] + self.k[2]*self.rij[2,:,:,:]\n\t\n\treturn", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def build_transition_matrix(k_counter, q, p):\r\n dim = k_counter + NOT_MOTIF_STATES\r\n transition_mat = np.zeros([dim, dim])\r\n # B start\r\n transition_mat[0, 1] = q\r\n transition_mat[0, -2] = 1 - q\r\n # B_1\r\n transition_mat[1, 1:3] = [1 - p, p]\r\n # B_2\r\n transition_mat[-2, -2:] = [1 - p, p]\r\n # S_last\r\n transition_mat[-3, -2] = 1\r\n # all S\r\n transition_mat[2:-3, 3:-2] = np.eye(k_counter - 1)\r\n return wrap_log(transition_mat)", "def jordan_gen(k):\n if k == 1:\n return totient\n @needs_decomp\n def j(decomp):\n factors = [1 - fractions.Fraction(1, p ** k) for p in decomp]\n return int((int(decomp) ** k) * product(factors))", "def _get_kriging_matrix(self, n, exact_values):\n\n xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis],\n self.Y_ADJUSTED[:, np.newaxis],\n self.Z_ADJUSTED[:, np.newaxis]), axis=1)\n d = cdist(xyz, xyz, 'euclidean')\n a = np.zeros((n+1, n+1))\n a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)\n if not exact_values:\n if self.variogram_model == 'linear':\n np.fill_diagonal(a, self.variogram_model_parameters[1])\n elif self.variogram_model != 'custom':\n np.fill_diagonal(a, self.variogram_model_parameters[2])\n else :\n np.fill_diagonal(a, 0.)\n a[n, :-1] = 1.0\n a[:-1, n] = 1.0\n\n return a", "def _block_diagonal(factor_matrices):\n shapes_dict = {}\n for i, matrix_i in enumerate(factor_matrices):\n for j, matrix_j in enumerate(factor_matrices):\n shapes_dict[(i, j)] = matrix_i.shape[:-1] + matrix_j.shape[-1:]\n rows = []\n # concacatenate along axis = -2\n for i, matrix_i in enumerate(factor_matrices):\n # concatenate along axis = -1\n blocks_to_concatenate = []\n for j, _ in enumerate(factor_matrices):\n if i == j:\n blocks_to_concatenate.append(matrix_i)\n else:\n blocks_to_concatenate.append(gs.zeros(shapes_dict[(i, j)]))\n row = gs.concatenate(blocks_to_concatenate, axis=-1)\n rows.append(row)\n metric_matrix = gs.concatenate(rows, axis=-2)\n return metric_matrix", "def init_T(self, k):\n if k == 0:\n self.T = np.zeros((self.n, self.m), dtype=int)\n elif k == 1:\n self.T = np.arange(self.n * self.m, dtype=int)\n self.T = np.reshape(self.T, (self.n, self.m))\n elif k == 2:\n self.T = np.array(list(range( (self.n * self.m) // 2)) * 2, dtype=int)\n self.T = np.reshape(self.T, (self.n, self.m))", "def k_map(self):\n\t\tt1 = time.time()\n\t\tmapping_matrix = [] \n\t\tfor index in self.mapping:\n\t\t\tvector = np.zeros(len(self.unique_char),dtype=float)\n\t\t\tvector[index] = 1.0\n\t\t\tmapping_matrix.append(vector)\n\t\tprint(\"Time creating k map {:.3f} sec\".format(time.time()-t1))\n\t\tself.mapping_matrix = mapping_matrix\n\t\treturn mapping_matrix", "def matrix_K2(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- w21(l, xt), d23(l, xt)))\n row2 = np.array((- w41(l, xt, zt, rhos, rho), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def Kdiag(self,X,target):\r\n foo = np.zeros((X.shape[0],X.shape[0]))\r\n self.K(X,X,foo)\r\n target += np.diag(foo)", "def factorial(k):\n fact = 1\n for i in range(1, k + 1):\n fact *= i\n return fact", "def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)", "def timesCroot(self, mat):\r\n print(\"WARNING: timesCroot is not yet tested\")\r\n if self.opts['CMA_diagonal'] is True \\\r\n or self.countiter <= self.opts['CMA_diagonal']:\r\n res = (self._Croot * mat.T).T\r\n else:\r\n res = np.dot(self._Croot, mat)\r\n return res", "def u(k, dim=2):\n return np.zeros(dim)", "def k_c(self, tl):\n\t\treturn self.KC0*exp(self.HKC/(R*self.TO)*(1. - self.TO/tl))", "def generate_Lk_by_Ck(fptree, data_set, Ck,dim, min_support, support_data):\n Lk = set()\n item_count = {}\n t_num = float(len(data_set))\n if dim == 1:\n for t in data_set:\n for item in Ck:\n if item.issubset(t):\n if item not in item_count:\n item_count[item] = 1\n else:\n item_count[item] += 1\n t_num = float(len(data_set))\n else:\n for item in Ck:\n array = list(item)\n subData, support = fptree.getSubData(array[len(array) - 1], dim)\n for i in range(0, len(subData)):\n if item.issubset(subData[i]):\n if item not in item_count:\n item_count[item] = support[i]\n else:\n item_count[item] += support[i]\n for item in item_count:\n if (item_count[item] / t_num) >= min_support:\n Lk.add(item)\n support_data[item] = item_count[item] / t_num\n\n return Lk", "def __init__(self, k):\n self.k = k\n self.N = 2**self.k", "def create_K_u(n: int) -> Array:\n return Array([K(i) for i in range(n ** 2 - 1)])", "def Kdiag(self, X):\n return self.inner_kernel.Kdiag(self._get_Z(X))", "def bkMatrix(km1basis, kbasis):\n bk = np.zeros((len(km1basis), len(kbasis)), dtype=int)\n for cell in kbasis:\n for idx in range(len(cell)):\n face = cell[:idx] + cell[idx + 1:]\n row = km1basis.index(face)\n col = kbasis.index(cell)\n bk[row, col] = 1\n return bk", "def jaccard_coeff(self):\n a, c, _, b = self.to_ccw()\n return _div(a, a + b + c)", "def KRC(self, ik, ipd, ipl, t):\n idx = ik - 1\n\n den1 = 1 - self.delta[idx] * self.coca.PK(ik, t)\n num1 = self.delta[idx] * self.thetak[idx]\n ins = num1 / den1\n\n for l in np.arange(0, self.L):\n pl = self.coca.PL(l, t)\n ins += ((self.thetal[l] * self.gamma[l][idx]) / (1 - pl))\n\n ans = ipd * np.exp(t * ipl) * ins\n\n return ans", "def generate_L(data_set, k, min_support):\n fptree = FPtree.fptree(data_set, min_support)\n print(\"pre:\",datetime.datetime.now())\n fptree.getPretable()\n print(\"pre:\",datetime.datetime.now())\n fptree.getRootTree()\n support_data = {}\n\n # L1,L2,support_L1 = adjacencyMatrix(data_set,min_support,support_data)\n # pretable = sorted(support_L1.items(), key=itemgetter(1, 0), reverse=True)\n # fptree = FPtree.fptree(data_set, min_support,pretable)\n # fptree.getRootTree()\n # Lksub1 = L2.copy()\n C1 = create_C1(data_set)\n # print (C1)\n # print (\"=====================\")\n L1 = generate_Lk_by_Ck(fptree,data_set, C1,1, min_support, support_data)\n Lksub1 = L1.copy()\n L = []\n L.append(Lksub1)\n for i in range(2, k+1):\n Ci = create_Ck(Lksub1, i)\n # print (Ci)\n # print (\"=====================\")\n Li = generate_Lk_by_Ck(fptree, data_set, Ci, i, min_support, support_data)\n Lksub1 = Li.copy()\n L.append(Lksub1)\n return L, support_data", "def make_kmer_tree(self, k, nums):\n nodes = [(np.array([]), [])]\n for it in range(k):\n new_nodes = []\n count = 0\n for i, node in enumerate(nodes):\n n, e = node\n if len(n) < it:\n continue\n for a in nums:\n count += 1\n new_node = (np.append(n, a), [])\n new_nodes.append(new_node)\n nodes[i][1].append(len(nodes) + count - 1)\n nodes += new_nodes\n return nodes", "def collatz_sequence_term(seed, k):\n if k == 1:\n return seed\n a = seed\n for i in range(k - 1):\n a = collatz(a)\n if a == 1:\n return None if k > i + 2 else a\n return a", "def _compute_eigenmatrix(self, k, tr, expand=False, factor=False,\n simplify=False):\n if not self._has(\"omega\"):\n self.cosineSequences(expand=expand, factor=factor,\n simplify=simplify)\n return Matrix(SR, [[self._.omega[tr(i, j)] * k[j]\n for j in range(self._.d + 1)]\n for i in range(self._.d + 1)])", "def phi_k_cc( s, m, k ):\n \n l_t2 = m + s * k\n\n if l_t2 % 2 != 0:\n return 0\n \n denom = factorial(s) * factorial(m) * factorial( k ) ** s\n nom = double_factorial( l_t2 - 1 )\n\n return Fraction( nom, denom )", "def Kdiag(self,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n np.add(target,np.diag(mdot(FX,self.Gi,FX.T)),target)", "def Kdiag(self,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n np.add(target,np.diag(mdot(FX,self.Gi,FX.T)),target)", "def Kdiag(self,X,target):\r\n FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)\r\n np.add(target,np.diag(mdot(FX,self.Gi,FX.T)),target)", "def factorize_tridiag_matrix(A):\n n = len(b)\n # scratch arrays:\n d = zeros(n, 'd'); c = zeros(n, 'd'); m = zeros(n, 'd')\n\n d[0] = A[0,1]\n c[0] = b[0]\n\n for k in iseq(start=1, stop=n-1, inc=1):\n m[k] = A[k,0]/d[k-1]\n d[k] = A[k,1] - m[k]*A[k-1,2]\n c[k] = b[k] - m[k]*c[k-1]\n return c, d", "def power_jordan(A, k):\n JA, P, jordan_bloques, nilp = canonica_jordan(A)\n nrowA = np.shape(A)[0]\n JK = np.zeros_like(A)\n m0 = 0 # inicializacion de indexador\n\n for m, BJ in zip(nilp, jordan_bloques):\n F = np.array([factorial(k)/(factorial(j)*factorial(k-j))*a**k for j in range(m)])\n BJK = np.zeros_like(m)\n for j in range(m):\n BJK[j, j:] = F[1:m-j+1]\n JK[m0:m, m0:m] = BJK\n m0 = m\n\n invP = gauss_jordan(P)\n invPJK = np.dot(invP, JK)\n AK = np.dot(invPJK, P)\n\n return AK", "def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m", "def dilatation_matrice(kx,ky,M):\n\n # kx,ky entiers\n n = len(M) # Nb de lignes\n p = len(M[0]) # Nb de colonnes\n N = [[0 for j in range(kx*p)] for i in range(ky*n)]\n for i in range(ky*n):\n for j in range(kx*p):\n N[i][j] = M[i//ky][j//kx]\n return N", "def combinations(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))", "def calc_cmatrix(self):\n tw = self.twiss_df\n res = self._results_df\n\n LOG.debug(\"Calculating CMatrix.\")\n with timeit(lambda t:\n LOG.debug(\" CMatrix calculated in {:f}s\".format(t))):\n\n j = np.array([[0., 1.],\n [-1., 0.]])\n rs = np.reshape(tw.as_matrix(columns=[\"R11\", \"R12\",\n \"R21\", \"R22\"]),\n (len(tw), 2, 2))\n cs = np.einsum(\"ij,kjn,no->kio\",\n -j, np.transpose(rs, axes=(0, 2, 1)), j)\n cs = np.einsum(\"k,kij->kij\", (1 / np.sqrt(1 + np.linalg.det(rs))), cs)\n\n g11a = 1 / np.sqrt(tw.loc[:, \"BETX\"])\n g12a = np.zeros(len(tw))\n g21a = tw.loc[:, \"ALFX\"] / np.sqrt(tw.loc[:, \"BETX\"])\n g22a = np.sqrt(tw.loc[:, \"BETX\"])\n gas = np.reshape(np.array([g11a, g12a,\n g21a, g22a]).T,\n (len(tw), 2, 2))\n\n ig11b = np.sqrt(tw.loc[:, \"BETY\"])\n ig12b = np.zeros(len(tw))\n ig21b = -tw.loc[:, \"ALFY\"] / np.sqrt(tw.loc[:, \"BETY\"])\n ig22b = 1. / np.sqrt(tw.loc[:, \"BETY\"])\n igbs = np.reshape(np.array([ig11b, ig12b,\n ig21b, ig22b]).T,\n (len(tw), 2, 2))\n cs = np.einsum(\"kij,kjl,kln->kin\", gas, cs, igbs)\n gammas = np.sqrt(1 - np.linalg.det(cs))\n\n res.loc[:, \"GAMMA_C\"] = gammas\n\n res.loc[:, \"F1001_C\"] = ((cs[:, 0, 0] + cs[:, 1, 1]) * 1j +\n (cs[:, 0, 1] - cs[:, 1, 0])) / 4 / gammas\n res.loc[:, \"F1010_C\"] = ((cs[:, 0, 0] - cs[:, 1, 1]) * 1j +\n (-cs[:, 0, 1]) - cs[:, 1, 0]) / 4 / gammas\n\n res.loc[:, \"C11\"] = cs[:, 0, 0]\n res.loc[:, \"C12\"] = cs[:, 0, 1]\n res.loc[:, \"C21\"] = cs[:, 1, 0]\n res.loc[:, \"C22\"] = cs[:, 1, 1]\n\n LOG.debug(\" Average coupling amplitude |F1001|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1001_C\"]))))\n LOG.debug(\" Average coupling amplitude |F1010|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1010_C\"]))))\n LOG.debug(\" Average gamma: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"GAMMA_C\"]))))\n\n self._log_added('GAMMA_C', 'F1001_C', 'F1010_C', 'C11', 'C12', 'C21', 'C22')", "def k(self) -> np.ndarray:\n return self._vector[12:18]", "def kuzmin_rotation(R,c,M,G=astronomicalG):\n return np.sqrt(2*G*np.power(10.,M)*R*R*np.power(c*c+R*R,-1.5))", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def log_factorial(k):\n\tif k ==0:\n\t\treturn 0\n\telse:\n\t\treturn 0.5*np.log(2*TMath.Pi()*k) + k*np.log(k) - k + np.log(1+1./(12*k) + 1/(288.*k**2) -139./(51840*k**3)-571./(2488320*k**4) + 163879./(209018880*k**5))", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def relaxation_matrix(self, uphill, downhill):\n world.KK = numpy.zeros((2,2), dtype=numpy.float64)\n Kup = 1.0/float(uphill)\n world.KK[0,0] = -Kup\n world.KK[1,0] = Kup\n Kdn = 1.0/float(downhill)\n world.KK[0,1] = Kdn\n world.KK[1,1] = -Kdn", "def expansion_matrix_du(self):\n row = self._base_nlp._upper_d_map\n nnz = len(self._base_nlp._upper_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def K_matrix(X, Y):\n\n eps = 1e-9\n\n D2 = torch.pow(X[:, :, None, :] - Y[:, None, :, :], 2).sum(-1)\n K = D2 * torch.log(D2 + eps)\n return K", "def dimension_reduction(X, k=10):\n cov = cov_generation(X)\n eig, eigv, _, _ = jacobi_loop(cov)\n sort_args = np.argsort(np.abs(eig))[::-1]\n projection_matrix = eigv[sort_args][:, :k]\n reduce_x = np.dot(X, projection_matrix)\n \n return projection_matrix, reduce_x", "def calc(k):\n n = factorial(4*k) * (1103.0 + 26390.0*k)\n d = factorial(k)**4 * 396.0**(4.0*k)\n z = n/d\n return z", "def laplacian_mat(n):\n data = [1, -2, 1]*n\n i = flatten([[k,k,k] for k in range(n)])\n j = flatten([[k-1, k, k+1] for k in range(n)])\n return scipy.sparse.coo_matrix((data[1:-1], (i[1:-1], j[1:-1])))", "def _initialize_kindiffeq_matrices(self, kdeqs):\n\n if kdeqs:\n if len(self.q) != len(kdeqs):\n raise ValueError('There must be an equal number of kinematic '\n 'differential equations and coordinates.')\n kdeqs = Matrix(kdeqs)\n\n u = self.u\n qdot = self._qdot\n # Dictionaries setting things to zero\n u_zero = dict((i, 0) for i in u)\n uaux_zero = dict((i, 0) for i in self._uaux)\n qdot_zero = dict((i, 0) for i in qdot)\n\n f_k = msubs(kdeqs, u_zero, qdot_zero)\n k_ku = (msubs(kdeqs, qdot_zero) - f_k).jacobian(u)\n k_kqdot = (msubs(kdeqs, u_zero) - f_k).jacobian(qdot)\n\n f_k = k_kqdot.LUsolve(f_k)\n k_ku = k_kqdot.LUsolve(k_ku)\n k_kqdot = eye(len(qdot))\n\n self._qdot_u_map = solve_linear_system_LU(\n Matrix([k_kqdot.T, -(k_ku * u + f_k).T]).T, qdot)\n\n self._f_k = msubs(f_k, uaux_zero)\n self._k_ku = msubs(k_ku, uaux_zero)\n self._k_kqdot = k_kqdot\n else:\n self._qdot_u_map = None\n self._f_k = Matrix()\n self._k_ku = Matrix()\n self._k_kqdot = Matrix()", "def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]", "def K(r, R):\n u = r / R\n k = np.sqrt(1 - 1. / u ** 2)\n return k", "def c(self,j,i_j):\n \"\"\" The index j of the chains goes from 0 to k-1 (where k is the \n number of chains in our decomposition \"\"\"\n assert j < len(self.lcd), \"j must be the index of a chain\"\n \"\"\" The index i_j goes from 0 to len(lcd[j]) this range is one longer\n than the length of the chain because we go from {} to the full chain. \"\"\"\n assert i_j <= self.lcd_dims[j], \"i_j = {}, dims[j] = {}\".format(i_j, self.lcd_dims[j])\n if i_j == 0:\n return None\n else:\n return self.lcd[j][i_j-1]", "def Ham_gen(self,kx,ky):\n temp=np.zeros((self.NL*2,self.NL*2),dtype=complex) # for storage of Hamiltonian matrix\n for i in range(self.NL):\n #Diagonal terms are purely layer specific.\n # DIAG A\n temp[2*i ,2*i ]=self.layers[i].H1(kx,ky) + self.layers[i].Hz(kx,ky)\n # LOWER OFF-DIAG BA\n temp[2*i+1,2*i ]=self.layers[i].Hx(kx,ky) + 1.j*self.layers[i].Hy(kx,ky)\n # UPPER OFF-DIAG AB\n temp[2*i ,2*i+1]=self.layers[i].Hx(kx,ky) - 1.j*self.layers[i].Hy(kx,ky)\n # DIAG B\n temp[2*i+1,2*i+1]=self.layers[i].H1(kx,ky) - self.layers[i].Hz(kx,ky)\n\n # Next update the couplings between the layers.\n if i<self.NL-1:\n temp[2*i ,2*i+2]=self.couplings[i]\n temp[2*i+1,2*i+3]=self.couplings[i]\n temp[2*i+2,2*i ]=self.couplings[i]\n temp[2*i+3,2*i+1]=self.couplings[i]\n\n return temp", "def cartan_matrix(self):\n # as soon as CartanMatrix is implemented we should use it here:\n # from sage.combinat.root_system.cartan_matrix import CartanMatrix\n cmat = copy(self.b_matrix())\n for i,j in cmat.nonzero_positions():\n a = cmat[i,j]\n if a > 0: cmat[i,j] = -a\n for i in range(self._rank):\n cmat[i,i] = 2\n # return CartanMatrix(cmat)\n return cmat", "def generator_matrix(self):\n C = self.code()\n F = C.base_ring()\n Cor = C.original_code()\n G = Cor.generator_matrix()\n k = C.dimension()\n extra_col = [-sum(G.rows()[i]) for i in range(k)]\n extra_col = matrix(F, k, 1, extra_col)\n return G.augment(extra_col)", "def Kdiag(self,X,target):\r\n target1 = np.zeros(X.shape[0])\r\n target2 = np.zeros(X.shape[0])\r\n self.k1.Kdiag(X[:,self.slice1],target1)\r\n self.k2.Kdiag(X[:,self.slice2],target2)\r\n target += target1 * target2", "def get_analytical_stiffness_matrix(self):\n if not self.bearing_type == \"short_bearing\":\n warnings.warn(\n \"Function get_analytical_stiffness_matrix suitable only for short bearings. \"\n \"The ratio between the bearing length and its radius should be less or \"\n \"equal to 0.25. Currently we have \"\n + str(self.length / self.radius_stator)\n + \".\"\n )\n # fmt: off\n f = self.get_rotor_load()\n h0 = 1.0 / (((np.pi ** 2) * (1 - self.eccentricity_ratio ** 2) + 16 * self.eccentricity_ratio ** 2) ** 1.5)\n a = f / self.radial_clearance\n kxx = a * h0 * 4 * ((np.pi ** 2) * (2 - self.eccentricity_ratio ** 2) + 16 * self.eccentricity_ratio ** 2)\n kxy = (a * h0 * np.pi * ((np.pi ** 2) * (1 - self.eccentricity_ratio ** 2) ** 2 -\n 16 * self.eccentricity_ratio ** 4) /\n (self.eccentricity_ratio * np.sqrt(1 - self.eccentricity_ratio ** 2)))\n kyx = (-a * h0 * np.pi * ((np.pi ** 2) * (1 - self.eccentricity_ratio ** 2) *\n (1 + 2 * self.eccentricity_ratio ** 2) +\n (32 * self.eccentricity_ratio ** 2) * (1 + self.eccentricity_ratio ** 2)) /\n (self.eccentricity_ratio * np.sqrt(1 - self.eccentricity_ratio ** 2)))\n kyy = (a * h0 * 4 * ((np.pi ** 2) * (1 + 2 * self.eccentricity_ratio ** 2) +\n ((32 * self.eccentricity_ratio ** 2) *\n (1 + self.eccentricity_ratio ** 2)) / (1 - self.eccentricity_ratio ** 2)))\n # fmt: on\n return [kxx, kxy, kyx, kyy]", "def _assemble_kernel_mat_wkr(\n j,\n n_perms,\n tril_perms_lin,\n sig,\n use_E_cstr=False,\n cols_m_limit=None,\n cols_3n_keep_idxs=None,\n):\n\n global glob\n\n R_desc = np.frombuffer(glob['R_desc']).reshape(glob['R_desc_shape'])\n R_d_desc = np.frombuffer(glob['R_d_desc']).reshape(glob['R_d_desc_shape'])\n\n K = np.frombuffer(glob['K']).reshape(glob['K_shape'])\n\n n_train, dim_d, dim_i = R_d_desc.shape\n dim_keep = dim_i if cols_3n_keep_idxs is None else cols_3n_keep_idxs.size\n cols_m_limit = n_train if cols_m_limit is None else cols_m_limit\n\n # TODO: document this exception\n if use_E_cstr and not (cols_m_limit is None or cols_m_limit == n_train):\n raise ValueError(\n '\\'use_E_cstr\\'- and \\'cols_m_limit\\'-parameters are mutually exclusive!'\n )\n\n # TODO: document this exception\n if use_E_cstr and cols_3n_keep_idxs is not None:\n raise ValueError(\n '\\'use_E_cstr\\'- and \\'cols_3n_keep_idxs\\'-parameters are mutually exclusive!'\n )\n\n mat52_base_div = 3 * sig ** 4\n sqrt5 = np.sqrt(5.0)\n sig_pow2 = sig ** 2\n\n if cols_3n_keep_idxs is None:\n cols_3n_keep_idxs = np.arange(dim_i)\n\n base = np.arange(dim_i) # base set of indices\n base_keep = np.arange(dim_keep)\n\n blk_j = base + j * dim_i\n blk_j_keep = base_keep + j * dim_keep # NEW\n\n E_off = dim_i * cols_m_limit\n\n # Create permutated variants of 'rj_desc' and 'rj_d_desc'.\n rj_desc_perms = np.reshape(\n np.tile(R_desc[j, :], n_perms)[tril_perms_lin], (n_perms, -1), order='F'\n )\n rj_d_desc_perms = np.reshape(\n np.tile(R_d_desc[j, :, :].T, n_perms)[:, tril_perms_lin], (-1, dim_d, n_perms)\n )\n\n for i in range(j, n_train):\n blk_i = base[:, None] + i * dim_i\n blk_i_keep = base_keep[:, None] + i * dim_keep\n\n diff_ab_perms = R_desc[i, :] - rj_desc_perms\n norm_ab_perms = sqrt5 * np.linalg.norm(diff_ab_perms, axis=1)\n\n mat52_base_perms = np.exp(-norm_ab_perms / sig) / mat52_base_div * 5\n diff_ab_outer_perms = 5 * np.einsum(\n 'ki,kj->ij',\n diff_ab_perms * mat52_base_perms[:, None],\n np.einsum('ik,jki -> ij', diff_ab_perms, rj_d_desc_perms),\n )\n diff_ab_outer_perms -= np.einsum(\n 'ijk,k->ji',\n rj_d_desc_perms,\n ((sig_pow2 + sig * norm_ab_perms) * mat52_base_perms),\n )\n\n ## K[blk_i, blk_j] = K[blk_j, blk_i] = R_d_desc[i, :, :].T.dot(diff_ab_outer_perms)\n # K[blk_i, blk_j] = R_d_desc[i, :, :].T.dot(diff_ab_outer_perms)\n # if (\n # i < cols_m_limit\n # ): # symmetric extension is not always possible, if a partial kernel is assembled\n # K[blk_j, blk_i] = K[blk_i, blk_j]\n\n k = R_d_desc[i, :, :].T.dot(diff_ab_outer_perms)\n K[blk_i, blk_j_keep[None, :]] = k[:, cols_3n_keep_idxs]\n if (\n i < cols_m_limit\n ): # symmetric extension is not always possible, if a partial kernel is assembled\n K[blk_j[:, None], blk_i_keep.T] = k[cols_3n_keep_idxs, :].T\n\n if use_E_cstr:\n for i in range(n_train):\n\n diff_ab_perms = R_desc[i, :] - rj_desc_perms\n norm_ab_perms = sqrt5 * np.linalg.norm(diff_ab_perms, axis=1)\n\n if use_E_cstr:\n K_fe = (\n 5\n * diff_ab_perms\n / (3 * sig ** 3)\n * (norm_ab_perms[:, None] + sig)\n * np.exp(-norm_ab_perms / sig)[:, None]\n )\n K[E_off + i, blk_j] = K[blk_j, E_off + i] = np.einsum(\n 'ik,jki -> j', K_fe, rj_d_desc_perms\n )\n\n K[E_off + i, E_off + j] = K[E_off + j, E_off + i] = (\n 1 + (norm_ab_perms / sig) * (1 + norm_ab_perms / (3 * sig))\n ).dot(np.exp(-norm_ab_perms / sig))\n\n return n_train - j", "def k_kappa_l(self) -> List[complex]:\n return self._k_kappa_l_array", "def mkCoalMatrix(C, npop):\n C = np.array(C).flatten()\n M = np.zeros((npop, npop))\n cnt = 0\n for i in range(npop):\n for j in range(i, npop):\n M[i, j] = C[cnt]\n if i != j:\n M[j, i] = M[i, j]\n cnt += 1\n\n return M", "def gamma_matrix(gamma_coefs):\n matrix = np.ndarray([len(gamma_coefs), 3, 256], dtype=int)\n\n # gamma_coefs contains an [R, G, B] gamma table for each slab\n for i, slab in enumerate(gamma_coefs):\n for j, color in enumerate(slab):\n for k in range(256):\n v = pow(k / 255, color) * 255\n v = int(round(v))\n matrix[i, j, k] = v\n return matrix", "def kTable(self, expand=False, factor=False, simplify=False):\n self._compute_kTable(expand=expand, factor=factor,\n simplify=simplify)\n self._.k = rewriteTuple(self._.k, expand=expand, factor=factor,\n simplify=simplify)\n return self._.k", "def k_b(self):\n\n b = self.width()/2\n c = self.height()/2\n\n Ex = self.E\n Ey = self.E\n nu_xy = self.nu\n nu_yx = self.nu\n G = self.E/(2*(1 + self.nu))\n t = self.t\n\n # Stiffness matrix for plate bending. This matrix was derived using a jupyter notebook. The\n # notebook can be found in the `Derivations`` folder of this project.\n k = t**3/12*array([[(-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 + Ey*nu_xy*b**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 - Ey*nu_xy*b**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-Ey*nu_xy*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), Ey*nu_xy/(nu_xy*nu_yx - 1), (Ey*nu_xy*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(Ex*nu_yx*b**2/2 + Ex*c**2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*nu_xy*nu_yx*b**2 - 2*G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 + G*nu_xy*nu_yx*b**2 - G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), 0, 2*(-5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 + Ex*c**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 + G*nu_xy*nu_yx*b**2 - G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 - Ey*nu_xy*b**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2/2 + Ey*nu_xy*b**2/2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(Ey*nu_xy*c**2/2 - Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*nu_xy*c**2/2 - Ey*b**2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), -Ey*nu_xy/(nu_xy*nu_yx - 1), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(5*b**2*c*(nu_xy*nu_yx - 1)), 0, 2*(-5*Ex*c**2 - G*nu_xy*nu_yx*b**2 + G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 - Ex*c**2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), -Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*nu_xy*nu_yx*b**2 - 2*G*b**2)/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 - Ex*c**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (-5*Ex*c**2 - 25*Ey*nu_xy*b**2 + 2*b**2*(15*Ey*nu_xy - G*nu_xy*nu_yx + G))/(10*b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 - Ey*nu_xy*b**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-Ex*c**2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*nu_xy*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), Ey*nu_xy/(nu_xy*nu_yx - 1), (-Ey*nu_xy*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0],\n [(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 - Ex*c**2/2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2/2 - Ex*c**2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*c**2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))],\n [(5*Ex*nu_yx*b**2*c**2 - 10*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 + 20*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), (5*Ex*c**2 + 25*Ey*nu_xy*b**2 - 2*b**2*(15*Ey*nu_xy - G*nu_xy*nu_yx + G))/(10*b**2*c*(nu_xy*nu_yx - 1)), (-5*Ex*nu_yx*b**2*c**2 + 10*Ex*c**4 - 5*Ey*nu_xy*b**2*c**2 + 10*Ey*b**4 + 28*G*nu_xy*nu_yx*b**2*c**2 - 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (5*Ex*nu_yx*b**2*c**2 + 20*Ex*c**4 + 5*Ey*nu_xy*b**2*c**2 - 10*Ey*b**4 - 28*G*nu_xy*nu_yx*b**2*c**2 + 28*G*b**2*c**2)/(20*b**3*c**3*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), (-Ex*nu_yx*b**2*c**2/4 - Ex*c**4 - Ey*nu_xy*b**2*c**2/4 - Ey*b**4 + 7*G*nu_xy*nu_yx*b**2*c**2/5 - 7*G*b**2*c**2/5)/(b**3*c**3*(nu_xy*nu_yx - 1)), (Ex*nu_yx*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (Ex*c**2 + Ey*nu_xy*b**2/2 - G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1))],\n [(-5*Ey*b**2 + G*nu_xy*nu_yx*c**2 - G*c**2)/(5*b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - G*nu_xy*nu_yx*c**2 + G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*b**2/2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), (-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (-Ey*nu_xy*c**2/2 + Ey*b**2/2 + G*nu_xy*nu_yx*c**2/5 - G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 2*(-5*Ey*b**2 - 4*G*nu_xy*nu_yx*c**2 + 4*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), 0, (Ey*nu_xy*c**2/2 + Ey*b**2 - G*nu_xy*nu_yx*c**2/5 + G*c**2/5)/(b*c**2*(nu_xy*nu_yx - 1)), 4*(-5*Ey*b**2 + 2*G*nu_xy*nu_yx*c**2 - 2*G*c**2)/(15*b*c*(nu_xy*nu_yx - 1)), -Ey*nu_xy/(nu_xy*nu_yx - 1)],\n [(-Ex*nu_yx*b**2/2 + Ex*c**2/2 + G*nu_xy*nu_yx*b**2/5 - G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 8*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), -(Ex*c**2/2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, (-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (-Ex*c**2 + G*b**2*(nu_xy*nu_yx - 1)/5)/(b**2*c*(nu_xy*nu_yx - 1)), 0, -(10*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1)), (Ex*nu_yx*b**2/2 + Ex*c**2 - G*nu_xy*nu_yx*b**2/5 + G*b**2/5)/(b**2*c*(nu_xy*nu_yx - 1)), -Ex*nu_yx/(nu_xy*nu_yx - 1), 4*(-5*Ex*c**2 + 2*G*b**2*(nu_xy*nu_yx - 1))/(15*b*c*(nu_xy*nu_yx - 1))]])\n \n # Calculate the stiffness of a weak spring for the drilling degree of freedom (rotation\n # about local z). We'll set the weak spring to be 1000 times weaker than any of the other\n # rotational stiffnesses in the matrix.\n k_rz = min(abs(k[1, 1]), abs(k[2, 2]), abs(k[4, 4]), abs(k[5, 5]),\n abs(k[7, 7]), abs(k[8, 8]), abs(k[10, 10]), abs(k[11, 11])\n )/1000\n\n # The matrix currently only holds terms related to bending action. We need to expand it to\n # with placeholders for all the degrees of freedom so it can be directly added to the\n # membrane stiffness matrix later on.\n\n # Initialize the expanded stiffness matrix to all zeros\n k_exp = zeros((24, 24))\n\n # Step through each term in the unexpanded stiffness matrix\n\n # i = Unexpanded matrix row\n for i in range(12):\n\n # j = Unexpanded matrix column\n for j in range(12):\n \n # Find the corresponding term in the expanded stiffness\n # matrix\n\n # m = Expanded matrix row\n if i in [0, 3, 6, 9]: # indices associated with deflection in z\n m = 2*i + 2\n if i in [1, 4, 7, 10]: # indices associated with rotation about x\n m = 2*i + 1\n if i in [2, 5, 8, 11]: # indices associated with rotation about y\n m = 2*i\n\n # n = Expanded matrix column\n if j in [0, 3, 6, 9]: # indices associated with deflection in z\n n = 2*j + 2\n if j in [1, 4, 7, 10]: # indices associated with rotation about x\n n = 2*j + 1\n if j in [2, 5, 8, 11]: # indices associated with rotation about y\n n = 2*j\n \n # Ensure the indices are integers rather than floats\n m, n = round(m), round(n)\n\n # Add the term from the unexpanded matrix into the expanded\n # matrix\n k_exp[m, n] = k[i, j]\n \n # Add the drilling degree of freedom's weak spring\n k_exp[5, 5] = k_rz\n k_exp[11, 11] = k_rz\n k_exp[17, 17] = k_rz\n k_exp[23, 23] = k_rz\n \n # Return the local stiffness matrix\n return k_exp", "def k(self):\n self.kTable()", "def _cache_factors(self, Pibra, Piket, Kbra, Kket, eps):\n q1, p1, Q1, P1 = Pibra\n q2, p2, Q2, P2 = Piket\n\n # If both parameter sets are identical, we are back in the homogeneous case.\n if q1 == q2 and p1 == p2 and Q1 == Q2 and P1 == P2:\n self._Hl = None\n\n # We have k in [0, 1, ..., K-1] where |K| is the basis size\n # hence K-1 is the maximal index.\n L = Kket.get_basis_size()\n\n makl = L\n\n # Factorials\n f = factorial(arange(makl))\n self._f = 1.0 / sqrt(f[:L].reshape(1, -1))\n\n # Note: formula currently fails for non-inhomogeneous case\n # because of divisions by zero in the two args below.\n argl = ((1.0j * conjugate(P1) * (q1 - q2) - 1.0j * conjugate(Q1) * (p1 - p2)) /\n (sqrt(1.0j * conjugate(Q2 * P1) - 1.0j * conjugate(Q1 * P2)) *\n sqrt(1.0j * conjugate(P1) * Q2 - 1.0j * conjugate(Q1) * P2)))\n\n # TODO: Better test for failure?\n if self._doraise and isnan(squeeze(argl)):\n raise InnerProductException(\"Symbolic formula failed due to Q_k = Q_l and P_k = P_l.\")\n\n # The parameter j varies in the range [0, 1, ..., min(K-1,L-1)]\n # hence we have that k-j can be in [K-1, K-2, ..., K-1-min(K-1,L-1)]\n # and similar for l-j we have [L-1, L-2, ..., L-1-min(K-1,L-1)]\n # where both K-1-min(K-1,L-1) and L-1-min(K-1,L-1) are non-negative.\n self._Hl = self._evaluate_hermite(L - 1, 1.0 / eps * argl)\n\n il = arange(L).reshape(-1, 1)\n self._pfl = ((1.0j * conjugate(Q2 * P1) - 1.0j * conjugate(Q1 * P2)) ** (il / 2.0)).reshape(L)\n\n # And the groundstate value\n self._I0 = self.exact_result_ground(Pibra, Piket, eps)", "def _r_matrix_xxz(self, root):\n r_matrix = np.eye(4, dtype=np.complex128)\n if self.delta == 1:\n b = (root - 1j) / (root + 1j)\n c = 2j / (root + 1j)\n\n elif self.delta > 1:\n gamma = np.arccosh(self.delta)\n b = np.sin(gamma / 2 * (root - 1j)) / np.sin(gamma / 2 * (root + 1j))\n c = 1j * np.sinh(gamma) / np.sin(gamma / 2 * (root + 1j))\n else:\n gamma = np.arccos(self.delta)\n b = np.sinh(gamma / 2 * (root - 1j)) / np.sinh(gamma / 2 * (root + 1j))\n c = 1j * np.sin(gamma) / np.sinh(gamma / 2 * (root + 1j))\n r_matrix[1, 1] = r_matrix[2, 2] = c\n r_matrix[1, 2] = r_matrix[2, 1] = b\n return r_matrix" ]
[ "0.6495986", "0.6089255", "0.6045119", "0.59890914", "0.5949488", "0.59035623", "0.5859298", "0.58462423", "0.57634705", "0.574443", "0.5730508", "0.5717386", "0.56819576", "0.566873", "0.5568253", "0.55545205", "0.5523086", "0.55172205", "0.5492196", "0.5491694", "0.5478032", "0.545727", "0.54372895", "0.5429208", "0.54242074", "0.54238397", "0.5373548", "0.5370893", "0.5370422", "0.5327783", "0.53144246", "0.53115743", "0.52899116", "0.5285035", "0.52575773", "0.52430975", "0.5240762", "0.5214698", "0.5214358", "0.51990324", "0.5189095", "0.5182801", "0.51823264", "0.51777744", "0.5153844", "0.51438445", "0.51391894", "0.51391655", "0.51356745", "0.51247686", "0.5115958", "0.5112978", "0.5110426", "0.51092285", "0.5081142", "0.5080568", "0.5077727", "0.50771344", "0.5072209", "0.50707847", "0.5063103", "0.5046778", "0.5045834", "0.5045834", "0.5045834", "0.50337565", "0.50334644", "0.50328356", "0.50092936", "0.5004326", "0.4999758", "0.49977154", "0.4992282", "0.49906734", "0.49903756", "0.49842948", "0.49818853", "0.497951", "0.49739745", "0.49675918", "0.49659172", "0.49557117", "0.49531987", "0.49462882", "0.49462014", "0.49415404", "0.49412304", "0.49336082", "0.4928694", "0.4927751", "0.4923733", "0.49224105", "0.49178925", "0.4910802", "0.48991477", "0.48964894", "0.48909009", "0.4889863", "0.488498", "0.4883424" ]
0.78092545
0
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real
def factor_circulant_multiplication(u, x, k=1): n = len(u) D_k = (k**(1/n))**np.arange(0,n) Lambda = fft(D_k*x) return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def circulant_multiplication(u, a):\n \n return real(ifft(fft(a)*fft(u)))", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def matmul(x, y):\n return np.matmul(x, y)", "def test_two_qubit_weyl_decomposition_cnot(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, 0, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def zzX_mul_term(f, c, k):\n if poly_univariate_p(f):\n return zzx_mul_term(f, c, k)\n elif zzX_zero_p(f):\n return f\n elif zzX_zero_p(c):\n return zzX_zero_of(f)\n else:\n return [ zzX_mul(c, coeff) for coeff in f ] + zzX_zeros_of(f, k, 1)", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def dot_kf(u, v):\n # TODO: implement the kernel function\n\n counter = 0\n if len(u)==len(v):\n for i in range(len(u)):\n counter = counter + (u[i]*v[i])\n return counter", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def zzx_mul_term(f, c, k):\n if not c or not f:\n return []\n else:\n return [ c * coeff for coeff in f ] + [INT_ZERO]*k", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices", "def CoTang(M):\n x = [sy.Dummy() for _ in range(nargs(M))]\n fx = [sy.Dummy() for _ in range(len(x))]\n\n y = list(M(*x))\n J = Jac(M)(*x)\n J = sy.Matrix(J).reshape(len(y), len(x))\n\n fy = list(J.T.inv() @ sy.Matrix(fx))\n return sy.lambdify(\n x + fx,\n y + fy,\n 'sympy',\n )", "def matvec(self, x):\n return self * x", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def ccc_v(y_true, y_pred):\n x = y_true[:, 0]\n y = y_pred[:, 0]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc", "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def _factorsY(self, inputs):\n return tensor.dot(inputs[1], self.wyf)", "def productGaussian(mu1, C1, mu2, C2):\n Cn = C1 + mat(.0001*identity(2))\n K = Cn*linalg.inv(Cn+C2)\n mu = mu1 + K*(mu2-mu1)\n C = Cn - K*Cn\n #denom = linalg.inv(C1+C2)\n #mu = denom*(C1*mu2+C2*mu1)\n #C = C1*denom*C2\n return mu,C", "def _mps_CA(self, C, A):\n return np.tensordot(C, A, axes=(1, 0))", "def build_cooc_matrix(users):\n nprods = constants.N_PRODUCTS\n M = scipy.sparse.dok_matrix((nprods, nprods), dtype=np.int32)\n i = 0\n for user in users:\n order = user.orders[-1]\n for pid in user.sorted_pids:\n focal_ix = pid-1\n prevs = paired_pids(user, pid)\n for prev in prevs:\n key = (focal_ix, prev-1)\n #n = M.get(key, 0)\n # further centi-optimization\n n = dict.get(M, key, 0)\n M.update({key:n+1})\n # Above is like 5x faster than below (and this inner loop is current bottleneck)\n #M[focal_ix, prev-1] += 1\n i += 1\n if i % 10000 == 0:\n logging.info('Processed {} users'.format(i))\n\n return M", "def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))", "def prod_par_scal(self,k):\n return Mat(self.D, lambda i,j: k*self.F(i,j))", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def uCen(self, k, m, z):\n result = (1.-self.pOff) + self.pOff * self.uOff(k, m, z)\n result *= self.Ncen(m) / self.nBarGal(1./(1.+z))\n return result", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def get_Cm(self, K0, K1):\n # if(self.weight>=1.5):\n # raise ValueError,\" Error bounds only accurate for k<1.5! got k=%s\" % self.weight\n twominusk = mp2 - self._weight\n tmp = mpmath.mpf(len(self.multiplier().weil_module().D()))\n tmp1 = mppi * mp2\n tmp1 = mpmath.power(tmp1, twominusk)\n tmp3 = mpmath.zeta(twominusk)\n if(K0 == 0):\n tmp4 = 1\n else:\n tmp4 = mpmath.power(K0, 1 - self._weight)\n g1 = mpmath.gamma(1 - self._weight)\n g2 = mpmath.gamma(2 - self._weight)\n\n Cm = mp2 / g1 + mp4 * tmp1 / g1 / g2 * tmp * tmp3 * tmp4\n return Cm", "def scalarProduct(self, factor):\n scalar_product = np.dot(self.components(), factor.components())\n return scalar_product", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def least_squares(Cui, X, Y, regularization, num_threads):\n users, factors = X.shape\n YtY = Y.T.dot(Y)\n\n for u in range(users):\n # accumulate YtCuY + regularization*I in A\n A = YtY + regularization * np.eye(factors)\n\n # accumulate YtCuPu in b\n b = np.zeros(factors)\n\n for i, confidence in nonzeros(Cui, u):\n factor = Y[i]\n A += (confidence - 1) * np.outer(factor, factor)\n b += confidence * factor\n\n # Xu = (YtCuY + regularization * I)^-1 (YtCuPu)\n X[u] = np.linalg.solve(A, b)", "def p_mx_c(pm,px,py,pyx_c,pym_c,beta):\n \n pmx_c = np.zeros((pm.size,px.size)) # P(M|X) matrix to be returned\n for mi in range(pm.size):\n for xi in range(px.size):\n pmx_c[mi,xi] = pm[mi] * np.exp(-beta * entropy(pyx_c[:,xi], pym_c[:,mi], base=2))\n z = pmx_c.sum(axis=0)\n pmx_c /= z #Normalize\n \n \t\n return pmx_c, z", "def K(self, u, v):\n return (self.L(u, v) * self.N(u, v) - np.square(self.M(u, v))) / \\\n (self.E(u, v) * self.G(u, v) - np.square(self.F(u, v)))", "def scalar_multiply(c, v):\n\treturn [c * v_i for v_i in v]", "def calculate_xi(self, postJ):\n # get output of rec model\n self.batch_mu = self.mu_net(postJ)\n self.batch_u = self.u_net(postJ)\n self.batch_unc_d = self.unc_d_net(postJ)\n\n # add extra dim to batch_u, so it gets treated as column vectors when\n # iterated over\n\n self.batch_u = tf.expand_dims(self.batch_u, -1)\n\n def get_cov(acc, inputs):\n # convert output of rec model to rank-1 covariance matrix\n\n # use softplus to get positive constrained d, minimum of -15\n # since softplus will turn low numbers into 0, which become NaNs\n # when inverted\n u, unc_d = inputs\n d = tf.nn.softplus(tf.maximum(unc_d, -15.0))\n D_inv = tf.diag(1.0 / d)\n eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)\n C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),\n tf.transpose(u)), D_inv)\n Tr_C = tf.trace(C)\n ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM\n # coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))\n # simplified coefficient below is more stable as u -> 0\n # original coefficient from paper is above\n coeff = eta / (1.0 + tf.sqrt(eta))\n R = (tf.sqrt(D_inv) - coeff * tf.matmul\n (tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),\n tf.sqrt(D_inv)))\n return Tr_C, ld_C, R\n\n (self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(\n get_cov, [self.batch_u, self.batch_unc_d],\n initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))\n\n self.batch_xi = (self.batch_mu +\n (tf.squeeze(tf.matmul(self.batch_R,\n (tf.expand_dims(tf.random_normal(\n [tf.shape(self.batch_R)[0],\n self.num_units]), -1))))))", "def _mps_AC(self, A, C):\n return np.tensordot(A, C, axes=(2, 0))", "def matrices_TC(l, omega, S, cn, csn, rhos, rho):\n MN = (np.linalg.inv(matrix_M1(l, omega, S, cn, csn, rhos, rho))\n * matrix_N1(l, omega, S, cn)\n )\n KL = (np.linalg.inv(matrix_K1(l, omega, S, cn, csn, rhos, rho))\n * matrix_L1(l, omega, S, cn)\n )\n T = np.zeros((3,3))\n C = np.zeros((3,3))\n T[:2,:2] = MN[:2]\n T[ 3, 3] = KL[0]\n C[:2,:2] = MN[2:]\n C[ 3, 3] = KL[1]\n return T, C", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def uCen(self, k, m, z):\n result = self.Ncen(m) / self.nBarGal(1./(1.+z))\n return result", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def product1(a, b, c) :\n return a * b * c", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def calc_cmatrix(self):\n tw = self.twiss_df\n res = self._results_df\n\n LOG.debug(\"Calculating CMatrix.\")\n with timeit(lambda t:\n LOG.debug(\" CMatrix calculated in {:f}s\".format(t))):\n\n j = np.array([[0., 1.],\n [-1., 0.]])\n rs = np.reshape(tw.as_matrix(columns=[\"R11\", \"R12\",\n \"R21\", \"R22\"]),\n (len(tw), 2, 2))\n cs = np.einsum(\"ij,kjn,no->kio\",\n -j, np.transpose(rs, axes=(0, 2, 1)), j)\n cs = np.einsum(\"k,kij->kij\", (1 / np.sqrt(1 + np.linalg.det(rs))), cs)\n\n g11a = 1 / np.sqrt(tw.loc[:, \"BETX\"])\n g12a = np.zeros(len(tw))\n g21a = tw.loc[:, \"ALFX\"] / np.sqrt(tw.loc[:, \"BETX\"])\n g22a = np.sqrt(tw.loc[:, \"BETX\"])\n gas = np.reshape(np.array([g11a, g12a,\n g21a, g22a]).T,\n (len(tw), 2, 2))\n\n ig11b = np.sqrt(tw.loc[:, \"BETY\"])\n ig12b = np.zeros(len(tw))\n ig21b = -tw.loc[:, \"ALFY\"] / np.sqrt(tw.loc[:, \"BETY\"])\n ig22b = 1. / np.sqrt(tw.loc[:, \"BETY\"])\n igbs = np.reshape(np.array([ig11b, ig12b,\n ig21b, ig22b]).T,\n (len(tw), 2, 2))\n cs = np.einsum(\"kij,kjl,kln->kin\", gas, cs, igbs)\n gammas = np.sqrt(1 - np.linalg.det(cs))\n\n res.loc[:, \"GAMMA_C\"] = gammas\n\n res.loc[:, \"F1001_C\"] = ((cs[:, 0, 0] + cs[:, 1, 1]) * 1j +\n (cs[:, 0, 1] - cs[:, 1, 0])) / 4 / gammas\n res.loc[:, \"F1010_C\"] = ((cs[:, 0, 0] - cs[:, 1, 1]) * 1j +\n (-cs[:, 0, 1]) - cs[:, 1, 0]) / 4 / gammas\n\n res.loc[:, \"C11\"] = cs[:, 0, 0]\n res.loc[:, \"C12\"] = cs[:, 0, 1]\n res.loc[:, \"C21\"] = cs[:, 1, 0]\n res.loc[:, \"C22\"] = cs[:, 1, 1]\n\n LOG.debug(\" Average coupling amplitude |F1001|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1001_C\"]))))\n LOG.debug(\" Average coupling amplitude |F1010|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1010_C\"]))))\n LOG.debug(\" Average gamma: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"GAMMA_C\"]))))\n\n self._log_added('GAMMA_C', 'F1001_C', 'F1010_C', 'C11', 'C12', 'C21', 'C22')", "def __updateMatC(self):\n\t\tif self.regScheme == 2:\n\t\t\tfor id1 in range(self.nTemplates):\n\t\t\t\tself.C[id1,id1] = 1.0 / self.w0[id1]**2", "def u(self,c,x):\r\n alpha = self.alpha ; sigma = self.sigma\r\n \r\n ctilde = c - alpha*x\r\n u = ctilde**(1-sigma) / (1-sigma)\r\n \r\n return u", "def calc_k_dot_r(self):\n\t\n\tself.k_dot_r = self.k[0]*self.rij[0,:,:,:] + self.k[1]*self.rij[1,:,:,:] + self.k[2]*self.rij[2,:,:,:]\n\t\n\treturn", "def calc_zchi2_batch(spectra, tdata, weights, flux, wflux, nz, nbasis, solve_matrices_algorithm=\"PCA\", use_gpu=False, fullprecision=True):\n zchi2 = np.zeros(nz)\n if (weights.sum() == 0):\n zchi2[:] = 9e99\n zcoeff = np.zeros((nz, nbasis))\n return (zchi2, zcoeff)\n if (use_gpu):\n global cp\n import cupy as cp\n #On the GPU, all operations are batch operations for all templates\n #in parallel.\n\n #1) batch_dot_product_sparse will compute dot products of all\n #spectra with all templates in batch and return a 3D array of\n #size (nz x ncols x nbasis).\n Tbs = batch_dot_product_sparse(spectra, tdata, nz, use_gpu)\n if (cp_memcheck):\n #Free memory on consumer grade GPUs with low resources\n mpool = cp.get_default_memory_pool()\n mpool.free_all_blocks()\n\n #2) On the GPU, M and y are computed for all templates at once\n #CUPY swapaxes is the equivalent of the transpose in CPU mode\n #and the @ matrix multiplication operator performs a dot\n #product for each template.\n\n ###!!! NOTE - there are 3 different options for calculating the\n ### M and y arrays -\n ### A) Straight CUPY, which works well on perlmutter with a\n ### runtime of 6.2s on 1 GPU and 2.0s on 4 GPUs, but is\n ### unusably slow on Volta generation GPUs (16.8s for only\n ### 10 targets on a 1660 Super).\n ### B) calc_M_y_batch, the custom CUDA kernel, which is the\n ### fastest at 2.9s on 1 GPU and 0.7s on 4 GPUs (and 0.7s\n ### for 10 targets on a 1660 Super) but is the most difficult\n ### from a maintenance perspective\n ### C) Use the calc_batch_dot_product_3d3d_gpu kernel to offload\n ### only the matrix multiplication for M (and transpose of\n ### Tbs) but use CUPY for everything else. This strikes a\n ### middle ground that is very maintainable but removes the\n ### bottleneck of the CUPY Volta issue. 5.7s on 1 GPU and\n ### 1.8s on 4 GPUs on Perlmutter; 1.6s for 10 targets on\n ### 1660 Super.\n ###!!! NOTE - uncomment the 2 lines below to run (A)\n #all_M = Tbs.swapaxes(-2, -1) @ (weights[None, :, None] * Tbs)\n #all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n ###!!! NOTE - uncomment the below line to run (B)\n #(all_M, all_y) = calc_M_y_batch(Tbs, weights, wflux, nz, nbasis)\n ###!!! NOTE - uncomment the 2 lines below to run (C)\n all_M = calc_batch_dot_product_3d3d_gpu(Tbs, (weights[None, :, None] * Tbs), transpose_a=True, fullprecision=fullprecision)\n all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n ###!!! NOTE - uncomment the 2 lines below to run an alternative\n ### version of (C) that does the transpose on the CPU - this seems\n ### to needlessly waste time though\n #all_M = calc_batch_dot_product_3d3d_gpu(cp.ascontiguousarray(Tbs.swapaxes(-2, -1)), (weights[None, :, None] * Tbs))\n #all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n\n #3) Use new helper method solve_matrices to use appropriate method\n #for this template to solve for zcoeff in batch for all_M and all_y.\n #There is no Error thrown by cupy's version of linalg.solve so just\n #need to catch NotImplementedError.\n try:\n zcoeff = solve_matrices(all_M, all_y, solve_algorithm=solve_matrices_algorithm, use_gpu=True)\n except NotImplementedError:\n zchi2[:] = 9e99\n zcoeff = np.zeros((nz, nbasis))\n return (zchi2, zcoeff)\n\n #4) calc_batch_dot_product_3d2d will compute the dot product\n #of Tbs and zcoeff for all templates in parallel.\n #It is the same as model[i,:,:] = Tbs[i,:,:].dot(zcoeff[i,:])\n model = calc_batch_dot_product_3d2d(Tbs, zcoeff, use_gpu)\n\n #5) On the GPU, (flux-model)*(flux-model) is faster than\n #(flux-model)**2. The @ matrix multiplication operator performs\n #a dot product for each template. get() copies the data back\n #from the GPU to the numpy array allocated for zchi2.\n zchi2[:] = (((flux - model)*(flux-model)) @ weights).get()\n #Copy data from GPU to numpy arrays\n zcoeff = zcoeff.get()\n\n if (cp_memcheck):\n #Free memory on consumer grade GPUs with low resources\n del Tbs\n del all_M\n del all_y\n del model\n mpool = cp.get_default_memory_pool()\n mpool.free_all_blocks()\n else:\n zcoeff = np.zeros((nz, nbasis))\n #On the CPU, the templates are looped over and all operations\n #are performed on one template at a time.\n\n for i in range(nz):\n #1) dot_product_sparse_one will compute dot products of all\n #spectra with ONE template and return a 2D array of size\n #(ncols x nbasis)\n Tb = dot_product_sparse_one(spectra, tdata, i)\n\n #2) On the CPU, M and y are computed for each template\n M = Tb.T.dot(np.multiply(weights[:,None], Tb))\n y = Tb.T.dot(wflux)\n\n #3) Use new helper method solve_matrices to use appropriate method\n #for this template to solve for zcoeff for each M, y.\n #Catch LinAlgError and NotImplementedError\n try:\n zcoeff[i,:] = solve_matrices(M, y, solve_algorithm=solve_matrices_algorithm, use_gpu=False)\n except np.linalg.LinAlgError:\n zchi2[i] = 9e99\n continue\n except NotImplementedError:\n zchi2[i] = 9e99\n continue\n\n #4) Calculate dot products individually for each template\n model = Tb.dot(zcoeff[i,:])\n\n #5) Calculate this zchi2 element individually for each template\n zchi2[i] = np.dot( (flux - model)**2, weights )\n return (zchi2, zcoeff)", "def _compute_mu_factor2(*input_mols):\n mu_factor = 1\n for mol in input_mols:\n mu_factor *= np.prod(fact(mol))\n return mu_factor", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def trans_cross_coeff(K, J, df, fmax):\n\n # Frequencies to evaluate \n Np = np.floor(fmax/df)\n n = np.arange(-Np, Np+1, dtype=np.int64)\n N = n.size\n\n f = df * n\n\n # Evaluate K and J at grid points\n fxy = np.meshgrid(f, f)\n Jm = J(fxy)\n Km = K(fxy)\n\n # Compute TCC via FFT\n Jmf = np.fft.fft2(Jm)\n Kmf = np.fft.fft2(Km)\n\n i = np.broadcast_to(np.reshape(np.arange(N), (N, 1, 1, 1)), [N]*4)\n j = np.broadcast_to(np.reshape(np.arange(N), (1, N, 1, 1)), [N]*4)\n k = np.broadcast_to(np.reshape(np.arange(N), (1, 1, N, 1)), [N]*4)\n l = np.broadcast_to(np.reshape(np.arange(N), (1, 1, 1, N)), [N]*4)\n\n TCCf = df**2 * (Jmf[np.mod(-i - k, N), np.mod(-j - l, N)]\n * Kmf[np.mod(i, N), np.mod(j, N)]\n * np.conj(Kmf[np.mod(-k, N), np.mod(-l, N)]))\n\n TCC = np.fft.fftshift(np.fft.ifftn(TCCf))\n\n return TCC, f", "def p_m(pmx_c,px):\n pm = np.zeros(pmx_c.shape[0])\n for mi in range(pm.size):\n for xi in range(px.size):\n pm[mi] += pmx_c[mi,xi]*px[xi]\n return pm", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def matrix_mult(m1, m2):\n pass", "def mul(Z,X,Y):", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def get_C_matrix(ua, ub, ud, shoe=None):\n if shoe is None:\n # default to a single deck shoe\n shoe = {i: 4 for i in range(1, 10)}\n shoe[10] = 16\n #\n shoe[ua] -= 1\n shoe[ub] -= 1\n shoe[ud] -= 1\n\n C = np.ones([10, 10])\n dealer_probabilities = dealer_probs()\n\n total_weight = 0\n for da in range(1, 11):\n da_weight = shoe.get(da, 0)\n C[da - 1] *= da_weight # weighted prob of getting da\n if da_weight == 0:\n continue\n shoe[da] -= 1 # modify shoe\n for db in range(1, 11):\n db_weight = shoe.get(db, 0)\n total_weight += da_weight * db_weight\n C[da - 1][db - 1] *= db_weight # weighted prob of getting db\n if db_weight == 0:\n continue\n shoe[db] -= 1 # modify shoe\n\n bob_hand = (ub + db + 10, False) if (ub == 1 or db == 1) else (ub + db, True)\n dealer_hand = (ud + 10, False) if ud == 1 else (ud, True)\n C[da - 1][db - 1] *= hit_stand_ev_diff(bob_hand, shoe, dealer_hand, dealer_probabilities)\n shoe[db] += 1 # restore shoe\n shoe[da] += 1\n return C / total_weight", "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def coefficients(k, xi, x):\n\n import pyweno.cnonuniform\n\n x = np.asarray(x, np.float64)\n xi = np.asarray(xi, np.float64)\n\n nc = len(x) - 1\n n = len(xi)\n c = np.zeros((nc, n, k, k), np.float64)\n beta = np.zeros((nc, k, k, k), np.float64)\n varpi = np.zeros((nc, n, k), np.float64)\n\n pyweno.cnonuniform.nonuniform_coeffs(k, xi, x, c, beta, varpi)\n\n return c, beta, varpi", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def _canonical_kr(B, C):\n n, p = B.shape\n m, pC = C.shape\n A = np.zeros((n * m, p))\n for k in range(B.shape[-1]):\n A[:, k] = np.kron(B[:, k], C[:, k])\n return A", "def proyeccion(v1, v2):\n prod = np.dot(v1, v2)\n N = np.zeros((len(prod), len(v2)))\n\n for i in range(len(N)):\n N[i, :] = prod[i] * v2\n return N", "def compCGP_C(M):\n # we store the coedd as a lower triangular matrix\n # random polynomial coefficients\n c = 0.5 * np.random.uniform(-1.0, -0.45, size=(M + 1, M + 1)) +\\\n 0.5 * np.random.uniform(0.45, 1.0, size=(M + 1, M + 1))\n for i in np.arange(M + 1):\n c[i, :] /= 2**(np.arange(M + 1) + i)\n c /= 1.5\n c = np.tril(c)\n c[0, 0] = 0\n c[1, 0] = 0\n c[1, 1] = 1\n\n return c", "def kkMul(*args):\n if (None in args):\n return None\n product = 1\n for arg in args:\n product *= arg\n return product", "def conv(A, B, c, i, tipus):\r\n if tipus == 1:\r\n suma = [np.conj(A[k, i]) * B[c - k, i] for k in range(1, c + 1)]\r\n return sum(suma)\r\n elif tipus == 2:\r\n suma = [A[k, i] * B[c - 1 - k, i] for k in range(1, c)]\r\n return sum(suma)\r\n elif tipus == 3:\r\n suma = [A[k, i] * np.conj(B[c - k, i]) for k in range(1, c)]\r\n return sum(suma)", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def test_calc_k_c():\n\n P_x0 = ufloat(1.75789868673e-12, 1.75789868673e-14) * u.nm**2/u.Hz # 1/100\n f_c = ufloat(50000, 0.5) * u.Hz # 1/100000 relative\n Q = ufloat(10000, 100) * u.dimensionless # 1/100\n T = ufloat(300, 3) * u.K # 1/100\n # ex_k_c is no longer a nice number because I switched from a rounded to\n # more exact value for Boltzmann's constant\n ex_k_c = ufloat(2.9999965233852217, 0.05196147267057527) * u.N/u.m\n k_c = calc_k_c(f_c, Q, P_x0, T)\n assert_almost_equal(k_c.magnitude.n, ex_k_c.magnitude.n)\n assert_almost_equal(k_c.magnitude.s, ex_k_c.magnitude.s)", "def concretize_u(self, uw: torch.Tensor):\n return self.eps * torch.norm(uw, p=self.q, dim=-1)", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res" ]
[ "0.6325033", "0.6273725", "0.6251581", "0.62479377", "0.6177961", "0.6087597", "0.6022537", "0.60215706", "0.6020421", "0.60090333", "0.6000697", "0.5998053", "0.59429264", "0.59204763", "0.58713275", "0.5850264", "0.5813686", "0.57964927", "0.57901424", "0.57262236", "0.57260317", "0.5713855", "0.571201", "0.5704799", "0.57028663", "0.5689596", "0.5675992", "0.56757015", "0.5666318", "0.5655894", "0.5647563", "0.5630625", "0.5625818", "0.56211036", "0.561917", "0.5594783", "0.55804807", "0.556855", "0.5565084", "0.55536", "0.5536789", "0.5518017", "0.55166507", "0.5516216", "0.55042887", "0.5492636", "0.548779", "0.54840076", "0.5481734", "0.5480862", "0.54772455", "0.54694587", "0.5458565", "0.545845", "0.54573977", "0.54430896", "0.5436721", "0.5430599", "0.5422746", "0.542256", "0.5419855", "0.54162866", "0.5415023", "0.5405546", "0.5399005", "0.5394567", "0.53936535", "0.53898394", "0.53891355", "0.5380426", "0.53634554", "0.535642", "0.53524673", "0.5345492", "0.5345291", "0.53447884", "0.53428036", "0.5338491", "0.5337324", "0.532983", "0.5328855", "0.5328855", "0.5324262", "0.5321704", "0.53162116", "0.5313517", "0.53127205", "0.53104323", "0.5309447", "0.53092057", "0.5302118", "0.53016984", "0.5296128", "0.52940315", "0.5292537", "0.52888757", "0.52871686", "0.5287157", "0.5287157", "0.52865314" ]
0.693636
0
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real
def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def matmul(x, y):\n return np.matmul(x, y)", "def CoTang(M):\n x = [sy.Dummy() for _ in range(nargs(M))]\n fx = [sy.Dummy() for _ in range(len(x))]\n\n y = list(M(*x))\n J = Jac(M)(*x)\n J = sy.Matrix(J).reshape(len(y), len(x))\n\n fy = list(J.T.inv() @ sy.Matrix(fx))\n return sy.lambdify(\n x + fx,\n y + fy,\n 'sympy',\n )", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def matvec(self, x):\n return self * x", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def _mps_CA(self, C, A):\n return np.tensordot(C, A, axes=(1, 0))", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def _rmatvec(self, u: np.ndarray) -> np.ndarray:\n return convolve(self.x.conj()[::-1], u, mode='valid', method=self.method)", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def c(self, z, y, r, t):\n \n u = np.zeros( self.m ) \n \n return u", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def scalar_multiply(c, v):\n\treturn [c * v_i for v_i in v]", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def _mps_AC(self, A, C):\n return np.tensordot(A, C, axes=(2, 0))", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def vect_contract(m, c, n):\n a = np.tensordot(m, c, (0, 0))\n mn = np.tensordot(a, n, (2, 0))\n return mn", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def complex_inverse(c1,cr):", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def f(self, x: np.array) -> np.array:\n return self.m * x + self.c", "def p_mx_c(pm,px,py,pyx_c,pym_c,beta):\n \n pmx_c = np.zeros((pm.size,px.size)) # P(M|X) matrix to be returned\n for mi in range(pm.size):\n for xi in range(px.size):\n pmx_c[mi,xi] = pm[mi] * np.exp(-beta * entropy(pyx_c[:,xi], pym_c[:,mi], base=2))\n z = pmx_c.sum(axis=0)\n pmx_c /= z #Normalize\n \n \t\n return pmx_c, z", "def ccc_v(y_true, y_pred):\n x = y_true[:, 0]\n y = y_pred[:, 0]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def p_m(pmx_c,px):\n pm = np.zeros(pmx_c.shape[0])\n for mi in range(pm.size):\n for xi in range(px.size):\n pm[mi] += pmx_c[mi,xi]*px[xi]\n return pm", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def matrix_multiply(x, y):\r\n\r\n # handle the base case of receiving\r\n # two empty matrices\r\n if x == [] and y == []:\r\n return []\r\n\r\n # determine the number of rows and columns in the result matrix\r\n num_rows = len(x)\r\n num_cols = len(y[0])\r\n\r\n num_cross = len(x[0])\r\n\r\n # initialize the result matrix\r\n result_matrix = [[0] * num_cols for _ in xrange(num_rows)]\r\n\r\n # compute the values for each cell of the result\r\n # matrix\r\n for row_index in xrange(num_rows):\r\n for col_index in xrange(num_cols):\r\n\r\n # sum up the corresponding values from\r\n # x and y\r\n for multiplication_index in xrange(num_cross):\r\n\r\n x_value = x[row_index][multiplication_index]\r\n y_value = y[multiplication_index][col_index]\r\n\r\n result_matrix[row_index][col_index] += x_value * y_value\r\n\r\n return result_matrix", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def product_moment(*args, **kwargs):\n return ConfusionMatrix2.from_ccw(*args, **kwargs).matthews_corr()", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def c( self , y , r , t = 0 ):\n \n u = np.zeros(self.m) # State derivative vector\n \n raise NotImplementedError\n \n return u", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def mul(Z,X,Y):", "def Cvec(self):\n return vec(self.xc, self.yc)", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def calculate_xi(self, postJ):\n # get output of rec model\n self.batch_mu = self.mu_net(postJ)\n self.batch_u = self.u_net(postJ)\n self.batch_unc_d = self.unc_d_net(postJ)\n\n # add extra dim to batch_u, so it gets treated as column vectors when\n # iterated over\n\n self.batch_u = tf.expand_dims(self.batch_u, -1)\n\n def get_cov(acc, inputs):\n # convert output of rec model to rank-1 covariance matrix\n\n # use softplus to get positive constrained d, minimum of -15\n # since softplus will turn low numbers into 0, which become NaNs\n # when inverted\n u, unc_d = inputs\n d = tf.nn.softplus(tf.maximum(unc_d, -15.0))\n D_inv = tf.diag(1.0 / d)\n eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)\n C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),\n tf.transpose(u)), D_inv)\n Tr_C = tf.trace(C)\n ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM\n # coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))\n # simplified coefficient below is more stable as u -> 0\n # original coefficient from paper is above\n coeff = eta / (1.0 + tf.sqrt(eta))\n R = (tf.sqrt(D_inv) - coeff * tf.matmul\n (tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),\n tf.sqrt(D_inv)))\n return Tr_C, ld_C, R\n\n (self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(\n get_cov, [self.batch_u, self.batch_unc_d],\n initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))\n\n self.batch_xi = (self.batch_mu +\n (tf.squeeze(tf.matmul(self.batch_R,\n (tf.expand_dims(tf.random_normal(\n [tf.shape(self.batch_R)[0],\n self.num_units]), -1))))))", "def circumcenter(C):\n ri, rj, rk = C.transpose(1,2,0)\n ax, ay = ri\n bx, by = rj\n cx, cy = rk\n d = 2 * (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by))\n ux = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (\n ay - by)) / d\n uy = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (\n bx - ax)) / d\n vs = np.empty((ax.size,2),dtype=np.float64)\n vs[:,0],vs[:,1] = ux,uy\n return vs", "def cc_coefficient(x, y):\n cor = np.sum( (x-np.mean(x)) * (y-np.mean(y)) )\n norm = sqrt( np.sum((x-np.mean(x))**2) * np.sum((x-np.mean(x))**2) )\n r = cor/norm\n return r", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def mvector(B, c):\n # for Sun Mg Potential: c=1.6281689374348\n A = np.zeros(shape=4)\n A[0] = (2 / 3) * B[0]\n A[1] = 0.5 * ((2 / sqrt(3)) * B[1] - A[0])\n A[2] = -A[0] - A[1]\n A[3] = B[2] / c\n return A", "def complex_mul(x1, x2):\n assert x1.size(-1) == 2 and x2.size(-1) == 2\n\n res = torch.stack(\n (x1[..., 0]*x2[..., 0]-x1[..., 1]*x2[..., 1],\n x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]), -1)\n\n return res", "def evaluate_c(self, x, out=None, **kwargs):\n return np.zeros(0)", "def build_cooc_matrix(users):\n nprods = constants.N_PRODUCTS\n M = scipy.sparse.dok_matrix((nprods, nprods), dtype=np.int32)\n i = 0\n for user in users:\n order = user.orders[-1]\n for pid in user.sorted_pids:\n focal_ix = pid-1\n prevs = paired_pids(user, pid)\n for prev in prevs:\n key = (focal_ix, prev-1)\n #n = M.get(key, 0)\n # further centi-optimization\n n = dict.get(M, key, 0)\n M.update({key:n+1})\n # Above is like 5x faster than below (and this inner loop is current bottleneck)\n #M[focal_ix, prev-1] += 1\n i += 1\n if i % 10000 == 0:\n logging.info('Processed {} users'.format(i))\n\n return M", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def product1(a, b, c) :\n return a * b * c", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def coproduct(self, element):\n from sage.categories.tensor import tensor\n base = element.lift().parent()\n return self.tensor_square().sum(coeff * tensor([self(base[x]), self(base[y])])\n for ((x,y), coeff) in element.lift().coproduct())", "def mbvector(A, c=sqrt(8 / 3)):\n la = len(A)\n sa = A.size\n if la == sa:\n B = np.array([0.0, 0.0, 0.0])\n a1 = A[0] * np.array([1.0, 0.0])\n a2 = A[1] * np.array([-0.5, 0.5 * sqrt(3)])\n a3 = A[2] * np.array([-0.5, -0.5 * sqrt(3)])\n B[0] = a1[0] + a2[0] + a3[0]\n B[1] = a1[1] + a2[1] + a3[1]\n B[2] = c * A[3]\n else:\n sa = A.shape\n B = np.zeros(shape=(sa[0], 3))\n for i in range(sa[0]):\n B[i, 0] = a1[0] + a2[0] + a3[0]\n B[i, 1] = a1[1] + a2[1] + a3[1]\n B[i, 2] = c * A[i, 3]\n return B", "def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, bsum=None):\n\n # checking type and shape\n assert A.dtype == B.dtype == C.dtype\n assert A.shape[0] == C.shape[0]\n assert B.shape[1] == C.shape[1]\n assert A.shape[1] == B.shape[0]\n\n # cleaner implementation, shall be equivalent to the one below\n # if relu:\n # C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C\n # else:\n # C[:] = alpha * self.dot(A, B) + beta * C\n\n if beta == 0:\n if C._tensor.flags['C_CONTIGUOUS'] is not True:\n tmp = np.empty(C.shape, dtype=C.dtype)\n math_cpu.blas_dot(A._tensor, B._tensor, tmp)\n C._tensor[:] = tmp.copy()\n else:\n math_cpu.blas_dot(A._tensor, B._tensor, C._tensor)\n if relu:\n self.Relu(C._tensor, C._tensor)\n else:\n # mfma: change np.multiply to mul\n if beta != 1:\n np.multiply(C._tensor, beta, C._tensor)\n tmp = np.empty(C.shape, dtype=C.dtype)\n np.dot(A._tensor, B._tensor, tmp)\n # mfma: change np.multiply to mul\n if alpha != 1:\n np.multiply(tmp, alpha, tmp)\n if relu:\n self.Relu(tmp, tmp)\n np.add(C._tensor, tmp, C._tensor)\n if bsum is not None:\n bsum[:] = self.sum(C, 1)\n\n return C", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def least_squares(Cui, X, Y, regularization, num_threads):\n users, factors = X.shape\n YtY = Y.T.dot(Y)\n\n for u in range(users):\n # accumulate YtCuY + regularization*I in A\n A = YtY + regularization * np.eye(factors)\n\n # accumulate YtCuPu in b\n b = np.zeros(factors)\n\n for i, confidence in nonzeros(Cui, u):\n factor = Y[i]\n A += (confidence - 1) * np.outer(factor, factor)\n b += confidence * factor\n\n # Xu = (YtCuY + regularization * I)^-1 (YtCuPu)\n X[u] = np.linalg.solve(A, b)", "def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out", "def compute_coriolis(self):\r\n # compute the Coriolis force\r\n self.coriolis.assign(\r\n project(-2*self.rho*cross(self.omega, self.u), self.V))", "def _generate_mult_process(X, mat, inits):\n M = np.empty_like(X, dtype=float)\n M[..., 0] = inits[X[..., 0]]\n M[..., 1:] = mat[X[..., :-1], X[..., 1:]]\n np.cumprod(M, axis=-1, out=M)\n return M", "def compute_operator(self, Xc, Yc):\n\n U, s, V = self._compute_svd(Xc)\n\n self._Atilde = (np.linalg.multi_dot([U.T.conj(), (Yc), (V)])\n * np.reciprocal(s))\n\n self._compute_eigenquantities()\n self._compute_modes(Yc, U, s, V)\n\n self._slow_modes = (np.abs(old_div(np.log(self.eigenvalues),\n self._eigs_divider))) <= self._rho", "def u(self,c,x):\r\n alpha = self.alpha ; sigma = self.sigma\r\n \r\n ctilde = c - alpha*x\r\n u = ctilde**(1-sigma) / (1-sigma)\r\n \r\n return u" ]
[ "0.650418", "0.650212", "0.6441079", "0.6313763", "0.6310517", "0.62949276", "0.62782884", "0.62631303", "0.61975265", "0.6096459", "0.608041", "0.606508", "0.6038961", "0.6011421", "0.60068315", "0.59920776", "0.59303707", "0.58836865", "0.5879482", "0.58772385", "0.58575416", "0.5838892", "0.58091784", "0.5796622", "0.57843477", "0.57586485", "0.57561576", "0.57366264", "0.5728224", "0.57246524", "0.572282", "0.57148993", "0.57086194", "0.5698373", "0.5695539", "0.5695106", "0.569498", "0.5687259", "0.56838983", "0.567735", "0.566609", "0.5664836", "0.5649978", "0.5649978", "0.5646444", "0.56459975", "0.5616804", "0.5613991", "0.5613991", "0.56069696", "0.5602995", "0.56016886", "0.5601508", "0.5595393", "0.5594796", "0.55833477", "0.5577543", "0.55557126", "0.55539906", "0.5553841", "0.5552608", "0.55387753", "0.55368805", "0.55332106", "0.5529269", "0.5527718", "0.5523153", "0.55210274", "0.5515821", "0.55033433", "0.55023336", "0.5484248", "0.54813796", "0.5480753", "0.5479537", "0.5474091", "0.546962", "0.54678774", "0.54670525", "0.5465292", "0.5459378", "0.5458223", "0.5457664", "0.54558104", "0.54443145", "0.54395616", "0.5437733", "0.543512", "0.54349375", "0.543476", "0.54323757", "0.5431858", "0.5431346", "0.54254824", "0.5424391", "0.54220825", "0.54190916", "0.5415009", "0.5414828", "0.5412295" ]
0.6389226
3
Compute the matrixvector product y = Tu where T is a Toeplitz matrix All matrices are real
def toeplitz_multiplication(u, c, r=None): n = len(u) if r is None: r = c u1 = zeros((2*n)) u1[0:n] = u c = np.concatenate((c, [0], r[-1:0:-1])) y1 = circulant_multiplication(u1, c) return y1[0:n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def matmul(x, y):\n return np.matmul(x, y)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)", "def mul(Z,X,Y):", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def matvec(self, x):\n return self * x", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def matmul(x, y, _pub):\n if x.shape[-1] != y.shape[-2]:\n pass # TODO: REPORT ERROR\n res = paillier_gpu.matmul_impl(x.flatten(), y.flatten(order='F'), x.shape, y.shape)\n\n return res", "def mat(self) -> np.ndarray:\n Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)\n return Tp.matvec(self.x)", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def unwhiten(self, U, A, m):\n X = np.matmul(A, U.T).T\n X += m\n\n return X", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def u_t(self):\n\t\tdim = self.dim \n\t\ttim_all = self.tim_all\n\t\t#ctrl = self.ctrl\n\t\tH0 = self.H0\n\t\tHctrl = self.Hctrl\n\n\t\tu_all = np.zeros((tim_all+1,dim,dim),dtype = complex)\n\t\tu_all[0,:,:] = np.eye(dim)\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tH = H0 + Hctrl[tim]#np.matrix( ctrl[i,tim] * np.array(Hctrl[i]))\n\t\t\tu_all[tim+1,:,:] = np.dot(self.u_dt(H,tim), u_all[tim,:,:])\n\n\n\t\treturn u_all", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def mult(p, q):\n if p.ndim == 1 and q.ndim > 1:\n p = np.tile(p,(q.shape[0],1))\n if q.ndim == 1 and p.ndim > 1:\n q = np.tile(q,(p.shape[0],1))\n if q.ndim == 1 and p.ndim == 1:\n p = p.reshape((1,4))\n q = q.reshape((1,4))\n\n ps = p[:,3]\n qs = q[:,3]\n pv = p[:,:3]\n qv = q[:,:3]\n\n pq = np.empty_like(p)\n pq[:,3] = ps * qs \n pq[:,3] -= arraylist_dot(pv, qv).flatten()\n pq[:,:3] = ps[:,np.newaxis] * qv \n pq[:,:3] += pv * qs[:,np.newaxis] \n pq[:,:3] += np.cross(pv , qv)\n\n #opposite sign due to different convention on the basis vectors\n #pq *= -1\n return pq", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def matrix_dot(*args):\n rval = args[0]\n for a in args[1:]:\n rval = tm.dot(rval, a)\n return rval", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def multiply(traj, result_list):\n z=traj.x*traj.y\n result_list[traj.v_idx] = z", "def multiply(t):\n return mul(*t)", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def multiply_vector(self, dv, spm):\n product = []\n for a, b in zip(dv, spm):\n product.append(a * b)\n return product", "def multiply(matrix, vector):\n result = []\n for row in matrix:\n assert len(row) == len(vector)\n result.append(sum([a*b for (a, b) in zip(row, vector)]))\n return Vector3D.from_list(result)", "def mat_vec_product(self, psi, t):\n\tx = zeros(self.vib_basis_size * len(self.my_tasks), dtype = complex)\n\n\t#Matrix vector product.\n\tfor i, j in enumerate(self.my_tasks):\n\t slice_x = slice(i * self.vib_basis_size, (i + 1) * self.vib_basis_size)\n\t slice_psi = slice(j * self.vib_basis_size, (j + 1) * self.vib_basis_size)\n\t \n\t x[slice_x] = dot(self.h_0[:,:,i], psi[slice_psi])\n\t\n\ty = dot(self.h_1, psi)\n\n\t#Weigh with field strength, and add components.\n\tpsi_final = x + self.time_function(t) * y\n\t\n\treturn psi_final", "def phi_t(self):\n\t\tdim = self.dim\n\t\ttim_all = self.tim_all \n\t\tphi_all = np.zeros((tim_all+1,dim,1),dtype = complex)\n\t\tphi_all[0,:,:] = self.phi_i[:]\n\t\tu_all = self.u_t()\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tphi_all[tim+1,:,:] = np.dot(u_all[tim+1,:,:], phi_all[0,:,:])\n\t\t\n\t\treturn phi_all", "def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:\n # q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2\n r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))\n return r", "def hadamard(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return u * v", "def _compute_t_matrix(self):\n self.t_matrix = self._kronecker_product(\n tf.diag(tf.reshape(self.likelihood_variances, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def test_trotter_hamiltonian_scalar_mul(nqubits=3):\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, numpy=True)\n local_dense = (2 * local_ham).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n local_dense = (local_ham * 2).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)", "def scalar_mult(x, y, out=None):\n if out is None:\n out = torch.zeros_like(y)\n else:\n if out is x or out is y:\n raise RuntimeError(\"Can't overwrite an argument!\")\n\n out[0] = (x[0] * y[0]) - (x[1] * y[1])\n out[1] = (x[0] * y[1]) + (x[1] * y[0])\n\n return out", "def monomio(x,datos_x,datos_y):\n matriz=np.zeros([datos_x.shape[0],datos_x.shape[0]])\n for j in range(datos_x.shape[0]): #Se contruye la matriz de vandermonde\n matriz[:,j]= datos_x**(j)\n matriz,datos_y=pivoteo_parcial(matriz,datos_y)\n x1= descompo_LU(matriz,datos_y)# se resulve el sistema de ecuaciones por metodo directo\n\n puntos=[] #se almacenan los valores de y para cada punto de x que se quiera calcular \n\n for p in x: #va a ir tomando los valores de x uno por uno \n prod=np.zeros(x1.shape[0])\n for i in range(x1.shape[0]):\n if i==0:\n prod[i]=1\n else:\n prod[i]=prod[i-1]*p #Se hace el calculo de los polimonios con todos los valores de x \n solucion=x1@prod\n puntos.append(solucion) # se agregan los valores de y a la lista final \n puntos=np.array(puntos)# se convierte la lista en array para mejor manejo\n\n return puntos", "def matrix_mult(m1, m2):\n pass", "def calcul_travail_ext(x,modU):\n\tr = np.sqrt(x[:,0]*x[:,0] + x[:,1]*x[:,1])\n\tf = r[:]*modU[:]*modU[:]\n\tW = PointMilieu(r,f)\n\treturn W", "def __matmul__(self, qubit):\n if isinstance(qubit, str):\n qubit = self.get_index(qubit)\n return self.compiled[qubit].y", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def test_mueller_product(self, ):\n mdims = ('mueller_v', 'mueller_h')\n mm_1 = xr.DataArray(np.random.rand(4, 4, ), dims=mdims, )\n mm_2 = xr.DataArray(np.identity(4, ), dims=mdims, )\n sv_1 = xr.DataArray(np.random.rand(4, ), dims=('stokes', ), )\n\n assert_almost_equal(mm_1.values, mueller_product(mm_1, mm_2).values, )\n assert_almost_equal(mm_1.values, mueller_product(mm_2, mm_1).values, )\n assert_almost_equal(sv_1.values, mueller_product(mm_2, sv_1).data, )", "def matrix_mult_vec(matrix_a, x):\n m = len(matrix_a)\n b = [0 for i in xrange(m)]\n for i in xrange(m):\n b[i] = dot_product(matrix_a[i], x)\n return b", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def _factorsY(self, inputs):\n return tensor.dot(inputs[1], self.wyf)", "def compute_hessian_vector_product(self, function, arguments):", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def Pol_Newton_un_punto(x,datos_x,datos_y):\n n = datos_x.shape[0]\n matriz=np.ones([n,n])\n for j in range(n):\n for i in range(n):\n if j>i:\n matriz[i][j]=0\n else:\n producto=1\n for k in range(j):\n producto=producto*(datos_x[i]-datos_x[k])\n matriz[i][j]=producto\n matriz,datos_y1= pivoteo_parcial(matriz,datos_y)\n x1 = descompo_LU(matriz,datos_y1)\n prod=np.zeros(x1.shape[0])\n for i in range(n):\n if i==0:\n prod[i]=1\n else: \n prod[i]=prod[i-1]*(x-datos_x[i-1])\n solucion=x1@prod\n return solucion", "def predict_mat(self):\n return self.u.dot(self.v.T)", "def learned_RHS(t,y,q,x,desc):\n \n \n Ux_mat = create_Ux_mat(x)\n Uxx_mat = create_Uxx_mat(x)\n\n return (q[desc.index('u_{x}')]*Ux_mat.dot(y) + \n q[desc.index('u_{xx}')]*Uxx_mat.dot(y) +\n q[desc.index('u^2')]*y**2 +\n q[desc.index('u')]*y + \n q[desc.index('u^2u_{x}')]*(y**2)*Ux_mat.dot(y) + \n q[desc.index('uu_{x}')]*y*Ux_mat.dot(y) + \n q[desc.index('u^2u_{xx}')]*(y**2)*Uxx_mat.dot(y) + \n q[desc.index('uu_{xx}')]*y*Uxx_mat.dot(y) + \n q[desc.index('u_{x}^2')]*Ux_mat.dot(y)**2)", "def __mul__(self, tensor):\n return self.mul(tensor)", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def tensdot(polyList,order,trunc):\n\n def reshape(poly,expo):\n\n poly.coef = poly[:][:,expo]\n poly.expo = expo\n return poly\n\n dim = len(polyList)\n expo = indextens(order,dim,trunc)\n nbrPoly = expo.shape[1]\n coef = np.eye(nbrPoly)\n\n # Tensor product of the univariate basis\n\n for i in range(dim): polyList[i] = reshape(polyList[i],expo[i])\n for i in range(nbrPoly): coef[i] = np.prod([polyList[j][expo[j,i]] for j in range(dim)],axis=0)\n\n poly = Polynomial(expo,coef,1)\n return poly", "def __mul__(self,v2):\n\t\tif(isinstance(v2,Vect2D)):\n\t\t\treturn np.dot(self._vec,v2._vec)\n\t\telse:\n\t\t\treturn Vect2D(v2*self._vec)", "def T(self) -> BaseMatrix:", "def T(self) -> BaseMatrix:", "def eval_f(self, u, t):\n f = self.f_init\n f[:] = self.A.dot(u.flatten()).reshape(self.nvars)\n return f", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = x.matmul(self.melmat)\n return y", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def _parameter_dot_product(x: JaxComplexArray, y: JaxComplexArray, n_axes: int) -> JaxRealArray:\n axes = tuple(range(-n_axes, 0))\n return jnp.sum(x * y, axis=axes).real", "def scalar_product(self, u, v):\n sp = 0.0\n n1 = len(u)\n n2 = len(v)\n i = j = 0\n d = self.dictionary_db\n while (i < n1 and j < n2):\n if u[i].word_info(d).index > v[j].word_info(d).index:\n j += 1\n elif v[j].word_info(d).index > u[i].word_info(d).index:\n i += 1\n else:\n sp += self.tf_idf(u[i]) * self.tf_idf(v[j])\n i += 1\n j += 1\n\n return sp", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u" ]
[ "0.7003199", "0.6513981", "0.64759356", "0.6454179", "0.6377554", "0.6326698", "0.6245358", "0.620894", "0.6208685", "0.61977005", "0.6195611", "0.61694974", "0.6168602", "0.6134469", "0.6106113", "0.60868716", "0.6082444", "0.60823506", "0.6070701", "0.60688484", "0.6063607", "0.60420716", "0.60411894", "0.6021142", "0.6020375", "0.60189974", "0.5976037", "0.5971896", "0.59694517", "0.5931813", "0.5929974", "0.5898846", "0.5893105", "0.5855257", "0.58545524", "0.58442146", "0.5836224", "0.5832956", "0.5814374", "0.58080685", "0.57904613", "0.57875574", "0.57833064", "0.57730234", "0.5772904", "0.57687515", "0.57664347", "0.57564145", "0.57501626", "0.5744411", "0.5741991", "0.57292855", "0.57247925", "0.5714161", "0.5701248", "0.5698843", "0.56850976", "0.56831443", "0.56521136", "0.56448597", "0.5643709", "0.5640813", "0.5639851", "0.56366783", "0.5632932", "0.5632897", "0.5624747", "0.5620976", "0.5618426", "0.5617202", "0.5615725", "0.5612312", "0.5605161", "0.5604705", "0.5597466", "0.5597071", "0.5594331", "0.5587542", "0.5583664", "0.55818576", "0.5576618", "0.55596435", "0.55546635", "0.55527973", "0.55502707", "0.5547592", "0.5543349", "0.55428696", "0.55401945", "0.55401945", "0.5539136", "0.553875", "0.55359316", "0.55332065", "0.5527199", "0.55229384", "0.5518009", "0.55145466", "0.54998636", "0.5499449" ]
0.63380134
5
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector
def levinson(r, b): n = len(b) y = zeros((n,)) x = zeros((n,)) # normalize the system so that the T matrix has diagonal of ones r_0 = r/r[0] b_0 = b/r[0] if n == 1: return b_0 y[0] = -r_0[1] x[0] = b_0[0] beta = 1 alpha = -r_0[1] for k in range(0,n-1): beta = (1 - alpha*alpha)*beta mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu if k < n-2: alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tridisolve(d, e, b, overwrite_b=True):\n\t\tN = len(b)\n\t\t# work vectors\n\t\tdw = d.copy()\n\t\tew = e.copy()\n\t\tif overwrite_b:\n\t\t\tx = b\n\t\telse:\n\t\t\tx = b.copy()\n\t\tfor k in range(1, N):\n\t\t\t# e^(k-1) = e(k-1) / d(k-1)\n\t\t\t# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\n\t\t\tt = ew[ k - 1 ]\n\t\t\tew[ k - 1 ] = t / dw[ k - 1 ]\n\t\t\tdw[ k ] = dw[ k ] - t * ew[ k - 1 ]\n\t\tfor k in range(1, N):\n\t\t\tx[ k ] = x[ k ] - ew[ k - 1 ] * x[ k - 1 ]\n\t\tx[ N - 1 ] = x[ N - 1 ] / dw[ N - 1 ]\n\t\tfor k in range(N - 2, -1, -1):\n\t\t\tx[ k ] = x[ k ] / dw[ k ] - ew[ k ] * x[ k + 1 ]\n\n\t\tif not overwrite_b:\n\t\t\treturn x", "def tridisolve(d, e, b, overwrite_b=True):\r\n N = len(b)\r\n # work vectors\r\n dw = d.copy()\r\n ew = e.copy()\r\n if overwrite_b:\r\n x = b\r\n else:\r\n x = b.copy()\r\n for k in range(1, N):\r\n # e^(k-1) = e(k-1) / d(k-1)\r\n # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\r\n t = ew[k - 1]\r\n ew[k - 1] = t / dw[k - 1]\r\n dw[k] = dw[k] - t * ew[k - 1]\r\n for k in range(1, N):\r\n x[k] = x[k] - ew[k - 1] * x[k - 1]\r\n x[N - 1] = x[N - 1] / dw[N - 1]\r\n for k in range(N - 2, -1, -1):\r\n x[k] = x[k] / dw[k] - ew[k] * x[k + 1]\r\n\r\n if not overwrite_b:\r\n return x", "def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x", "def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def SelfDualNewtonSystem(A, b, c, e):\n \n n = A.shape[1]\n m = A.shape[0]\n \n b_bar = b - np.matmul(A,e)\n c_bar = c - e\n alpha = 1 + np.dot(c, e)\n beta = n + 2\n \n A_star = np.c_[A,-b,b_bar]\n C = np.zeros((n+2,n+2))\n C[0:n,n] = c\n C[n,0:n] = -C[0:n,n].T\n C[0:n,n+1] = -c_bar\n C[n+1,0:n] = -C[0:n,n+1].T\n C[n,n+1] = alpha\n C[n+1,n] = -C[n,n+1].T\n \n yA = np.r_[np.zeros((m,m)), -A_star.T, np.zeros((n+2, m))]\n xA = np.r_[A_star, C, np.eye(n+2)]\n sA = np.r_[np.zeros((m, n+2)), -np.eye(n+2), np.eye(n+2)]\n \n return np.c_[yA, xA, sA]", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def __solve(self, tsnMat, vecB):\n A_d = np.linalg.inv(np.dot(tsnMat.T, tsnMat))\n return np.dot(np.dot(A_d, tsnMat.T), vecB)", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a,b)\n loss =compute_loss_LS(y,tx,w)\n return loss, w", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def forward_committor_sensitivity(T, A, B, index):\n\n n = len(T)\n set_X = numpy.arange(n) # set(range(n))\n set_A = numpy.unique(A) # set(A)\n set_B = numpy.unique(B) # set(B)\n set_AB = numpy.union1d(set_A, set_B) # set_A | set_B\n notAB = numpy.setdiff1d(set_X, set_AB, True) # list(set_X - set_AB)\n m = len(notAB)\n\n K = T - numpy.diag(numpy.ones(n))\n\n U = K[numpy.ix_(notAB.tolist(), notAB.tolist())]\n\n v = numpy.zeros(m)\n\n # for i in xrange(0, m):\n # for k in xrange(0, len(set_B)):\n # v[i] = v[i] - K[notAB[i], B[k]]\n v[:] = v[:] - K[notAB[:], B[:]]\n\n qI = numpy.linalg.solve(U, v)\n\n q_forward = numpy.zeros(n)\n #q_forward[set_A] = 0 # double assignment.\n q_forward[set_B] = 1\n #for i in range(len(notAB)):\n q_forward[notAB[:]] = qI[:]\n\n target = numpy.eye(1, n, index)\n target = target[0, notAB]\n\n UinvVec = numpy.linalg.solve(U.T, target)\n Siab = numpy.zeros((n, n))\n\n for i in range(m):\n Siab[notAB[i]] = - UinvVec[i] * q_forward\n\n return Siab", "def nnls(A, b, maxiter=None, eps=1e-11):\n m, n = A.shape\n x = np.zeros(n)\n P = []\n Z = list(range(n))\n k = 0\n\n if maxiter is None:\n maxiter = 3 * m\n\n while True:\n if k == maxiter:\n return x\n\n w = np.matmul(A.T, (b - np.matmul(A, x)))\n if Z == [] or np.all(w[Z] <= eps):\n return x\n\n while True:\n\n t = np.argmax(ma.masked_array(w, mask=[not i in Z for i in range(n)]))\n P.append(t)\n Z.remove(t)\n Ap = A.copy()\n Ap[:, Z] = 0\n\n z = np.linalg.lstsq(Ap, b, rcond=None)[0]\n\n if np.all(z[P] > 0):\n x = z\n break\n\n alpha = np.min(ma.masked_array(x / (x - z), mask=[not i in P or z[i] > 0 for i in range(n)]))\n x = x + alpha * (z - x)\n\n T = np.where(x == 0.0)[0]\n Z = [z for z in set(Z + P) if z in Z or z in P and z in T]\n P = [p for p in P if not p in T]\n\n k = k + 1", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_cost(y, tx, w)\n return w, loss", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def solve_fwd_bkwd(matrix_a, b):\n _L = cholesky(matrix_a) \n _U = transpose_matrix(_L) \n \n n = len(b)\n x = [0 for i in xrange(n)] \n y = [0 for i in xrange(n)] \n\n #forward solve _Ly = b\n for i in xrange(n):\n y[i] = b[i]\n for j in xrange(i):\n\t y[i] -= _L[i][j] * y[j]\n\ty[i] /= _L[i][i]\n\n #backward solve _Ux = y\n for i in xrange(n-1, -1, -1):\n\tx[i] = y[i]\n for j in xrange(i+1, n):\n x[i] -= _U[i][j] * x[j]\n x[i] /= _U[i][i]\n\n return x", "def lp_acent(A,b,c,x_0):\n #Parameters\n b = b.flatten()\n c = c.flatten()\n ALPHA = 0.01\n BETA = 0.5\n EPSILON = 1e-6\n MAXITERS = 100\n if (np.min(x_0)<=0) and (np.linalg.norm>1e-3):\n print 'failed' \n return 0\n #m = len(b)\n #n = len(x_0)\n lambda_hist = []\n x = x_0\n for iter in range(MAXITERS):\n # H = np.diag(1/np.power(x,3))\n g = c-np.power(x,-1)\n #print g.shape\n #solving KKT system\n w = np.linalg.solve(np.dot(np.dot(A,np.diag(np.power(x,2))),A.T),\n np.dot(np.dot(-A,np.diag(np.power(x,2))),g))\n dx = np.dot(-np.diag(np.power(x,2)),np.dot(A.T,w)+g)\n lambdasqr = np.dot(-g.T,dx) #dx'*T*dx: newton incremental\n lambda_hist.append(lambdasqr/2)\n if lambdasqr/2 <= EPSILON:\n break\n # backtracking line search\n t = 1\n # brin the point inside the domain\n while np.min(x+t*dx)<=0:\n t =BETA*t\n while np.dot(c.T,np.dot(t,dx))-np.sum(np.log(x+t*dx))+np.sum(np.log(x))-ALPHA*t*np.dot(g.T,dx)>0:\n t = BETA*t\n x = x+t*dx\n if iter == MAXITERS:\n print 'ERROR: MAXITERS reached'\n else:\n #plt.figure()\n #plt.plot(range(len(lambda_hist)),lambda_hist,'b-',range(len(lambda_hist)),lambda_hist,'bo')\n return x,w,lambda_hist", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n loss = compute_loss(y, tx, w)\n return w, loss", "def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def project_L1_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n # By Moreau's identity, we convert to proximal of dual problem (L-inf norm)\n return x - project_Linf_ball(x, t)", "def wasserstein(X,t,p,lam=10,its=10,sq=False,backpropT=False):\n\n it = torch.where(t > 0)[0] # getting the positions\n ic = torch.where(t < 1)[0]\n\n Xt = torch.index_select(X, 0, it) # Getting the nx100 for each value\n Xc = torch.index_select(X, 0, ic)\n\n nc = Xc.shape[0]\n nt = Xt.shape[0]\n\n ''' Compute distance matrix'''\n if sq:\n M = pdist2sq(Xt,Xc)\n else:\n M = safe_sqrt(pdist2sq(Xt,Xc))\n\n ''' Estimate lambda and delta '''\n M_mean = torch.mean(M)\n M_drop = torch.nn.Dropout(10/(nc*nt))(M)\n delta = torch.max(M)\n eff_lam = lam/M_mean\n\n ''' Compute new distance matrix '''\n Mt = M\n row = delta*torch.ones(M.shape[1])\n col = torch.cat((delta*torch.ones(M.shape[0]),torch.zeros((1))),0)\n Mt = torch.cat((M, torch.unsqueeze(row, 0)), 0)\n Mt = torch.cat((Mt, torch.unsqueeze(col, 1)), 1)\n\n ''' Compute marginal vectors '''\n temp = torch.where(t > 0)[0].shape\n a = torch.cat((p * torch.ones((torch.where(t > 0)[0].shape[0],1)) / nt, (1 - p) * torch.ones((1,1))), 0)\n b = torch.cat(((1-p) * torch.ones((torch.where(t < 1)[0].shape[0],1)) / nc, p * torch.ones((1,1))), 0)\n\n ''' Compute kernel matrix'''\n Mlam = eff_lam*Mt\n K = torch.exp(-Mlam) + 1e-6 # added constant to avoid nan\n U = K*Mt\n ainvK = K/a\n\n u = a\n for i in range(0,its):\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n u = 1.0/(torch.matmul(ainvK,( b / temp)))\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n v = b/(temp)\n\n T = u*(torch.transpose(v,0,1)*K)\n\n E = T*Mt\n D = 2*torch.sum(E)\n\n return D, Mlam", "def solve_L(L, b):\n n = b.size\n assert L.shape == (n,n)\n x = zeros(n)\n for i in range(n):\n x[i] = (b[i] - dot(x[:i], L[i,:i])) / L[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n return w, compute_mse(y, tx, w)", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def SYR_forward(b, alpha, V, s0, y0, T=100):\n n = len(y0)\n\n du = np.zeros(n+1)\n u0 = np.zeros(n+1)\n u0[0] = s0\n u0[1:] = y0\n \n def f(t,u):\n s = u[0]\n y = u[1:]\n force = np.dot(y,b) # Force of infection\n du[0] = - s*force\n du[1:] = s*force*alpha - np.dot(V,y)\n return du\n\n times = np.linspace(0,T,10000)\n solution = solve_ivp(f,[0,T],u0,t_eval=times,method='RK23',max_step=0.1)\n s = solution.y[0,:]\n y = solution.y[1:,:]\n t = solution.t\n \n return s, y, t", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir", "def least_squares(y, tx):\n\n N = tx.shape[0]\n w = np.linalg.pinv(tx.T @ tx) @ tx.T @ y\n e = y - tx @ w\n loss = 1. / 2 / N * e.T @ e\n return w, loss", "def find_argmin_T(p_s, p_t, A_d,\n A, b):\n def f_error(x):\n A_tmp = np.reshape(x[0:9], newshape=(3,3))\n b_tmp = x[9:12]\n return(find_error(p_s, p_t, A_d,\n A_tmp, b_tmp))\n def flatten(A, b):\n # Flatten out A and b into x_0\n return(np.concatenate((np.reshape(A, newshape=(9,)), b)))\n x_0 = flatten(A, b)\n #sol = optimize.root(f_error, x_0, method='lm')\n print(\"minimizing the function now!!!\")\n sol = optimize.minimize(f_error, x_0)\n def expand(x):\n # Un-flattens x into the tuple of A and b\n return(np.reshape(x[0:9], newshape=(3,3)), x[9:12])\n\n A_tmp, b = expand(sol.x)\n print(\"==============\")\n print(\"A_tmp, before we make it near orthogonal\")\n print(A_tmp)\n print(\"its determinant\")\n print(np.linalg.det(A_tmp))\n print(\"==============\")\n #print(\"\")\n A = near_orthog(A_tmp)\n return(A, b)", "def rforwardsolve(A, b, d):\n \n \n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[0] = x[0] / A[0, 0]\n for k in range(1, n):\n lk = max(0, k-d)\n x[k] = b[k] - np.dot(A[k, lk : k], x[lk : k])\n x[k] = x[k] / A[k, k] \n return x", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def householder_ls(A, b):\n m, n = A.shape\n Ahat = np.zeros((m, n+1))\n Ahat[:,:n] = 1.0*A\n Ahat[:, n] = 1.0*b\n\n Rhat = householder(Ahat)\n x = solve_triangular(Rhat[:n,:n], Rhat[:n,n])\n\n return x", "def _solve(self, H, T):\n P = pinv(H)\n Beta = np.dot(P, T)\n return Beta", "def rothesstri(A, b):\n n = shape(A)[0]\n A = hstack([A, b])\n for k in range(n-1):\n r = linalg.norm([ A[k , k] , A[k + 1, k] ])\n if r>0:\n c=A[k, k]/r; s=A[k + 1, k]/r\n A[[k, k + 1],(k + 1):(n + 1)]=[[c, s],[-s, c]]*A[[k, k + 1],(k + 1):(n + 1)]\n A[k, k] = r; A[k+1,k] = 0\n z = A[:, n].copy()\n rbacksolve(A[:, :n], z, n)\n return z", "def SOR_Solve(A,b,tol=1.0e-6,omega=1,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def project_Linf_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n N = len(x)\n xabs = np.abs(x)\n\n # Reverse sort the absolute values of z\n flipped = xabs.copy()\n flipped[::-1].sort()\n\n # Magic\n alpha = np.max((np.cumsum(flipped) - t) / np.arange(1, N+1))\n\n if alpha > 0:\n return np.minimum(xabs, alpha) * np.sign(x)\n else:\n return np.zeros(N)", "def ft_ll(m, t, y, x, x_kernel, x_kernel_params, t_kernel, t_kernel_params):\n # Take copies of everything - this is a function\n m = copy.deepcopy(m)\n t = copy.deepcopy(t)\n y = copy.deepcopy(y)\n x = copy.deepcopy(x)\n\n K_x = x_kernel(x, x, **x_kernel_params)\n N = len(y)\n\n lambd = np.zeros((N, 1))\n gamma = np.zeros((N, 1))\n\n K_t = [None] * N\n\n for n in range(N):\n K_t[n] = t_kernel(t[n], t[n], **t_kernel_params)\n lambd[n] = np.dot(np.ones((1, len(t[n]))), np.linalg.solve(K_t[n], np.ones((len(t[n]), 1))))\n # Making sure y[n] is a column vector\n y[n] = np.array(y[n], ndmin=2)\n if y[n].shape[0] == 1:\n y[n] = y[n].T\n gamma[n] = np.dot(np.ones((1, len(t[n]))), np.linalg.solve(K_t[n], y[n] - m[n] * np.ones(y[n].shape)))\n\n Lambd = np.diag(lambd.ravel())\n\n ll = 0\n\n # Terms relating to individual curves\n for n in range(N):\n ll += - 0.5 * np.dot((y[n] - m[n] * np.ones(y[n].shape)).T,\n np.linalg.solve(K_t[n], y[n] - m[n] * np.ones(y[n].shape)))\n ll += - 0.5 * np.log(np.linalg.det(K_t[n]))\n\n # Terms relating to K_x\n ll += + 0.5 * np.dot(gamma.T, np.linalg.solve(np.linalg.inv(K_x) + Lambd, gamma))\n ll += - 0.5 * np.log(np.linalg.det(np.linalg.inv(K_x) + Lambd))\n ll += - 0.5 * np.log(np.linalg.det(K_x))\n\n return ll", "def solve_cholesky(A, b, debug=False):\n L = cholesky(A, reveal_diagonal=debug)\n if debug:\n Optimizer.stat('L', L)\n x = solve_lower(L, b)\n if debug:\n Optimizer.stat('intermediate', x)\n return solve_upper(L.transpose(), x)", "def solve(self, x, y):\n\t\tx = np.concatenate((np.ones([x.shape[0], 1]), x), axis=1)\n\t\txtx = np.dot(x.T, x)\n\t\txty = np.dot(y, x)\n\t\tself.w = np.dot(np.linalg.inv(xtx), xty.T)", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def tridiag_solver(b):\n b = np.copy(b)\n v = np.zeros_like(b)\n c = np.zeros_like(b)\n\n for i in range(1, len(v) - 1):\n c[i] = -1. / (2 + c[i - 1])\n b[i] = (b[i] + b[i - 1]) / (2 + c[i - 1])\n\n for i in reversed(range(1, len(v) - 1)):\n v[i] = b[i] - c[i] * v[i + 1]\n\n return v", "def analyticalLinearSol(self, t):\n return self.c*t + self.I", "def newton_body(iterand):\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]", "def test_lu_forward_sub():\t\n\t# test 1\n\tL = np.array([\n\t\t[ 2, 3,-4, 2],\n\t\t[-2, 1,-2, 1],\n\t\t[ 1,-1, 3,-1],\n\t\t[-3, 2, 2, 2]])\t\n\n\tb = np.array([4, -8, 9, 6])\n\n\ty = lu_forward_sub(L, b) \t\t\n\ty_soln = np.array([4,0,5,8])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y - y_soln) < 1.e-10\n\n\t# test 2\n\tL2 = np.array([\n\t\t [0.01, 0., 0., 0., 0., 0., 0., 0., 0., 0., 1],\n\t\t [-100., 0.01, 0., 0., 0., 0., 0., 0., 0., 0., 100],\n\t\t [0., -100., 0.01, 0., 0., 0., 0., 0., 0., 0., 10000],\n\t\t [0., 0., -100., 0.01, 0., 0., 0., 0., 0., 0., 1000000],\n\t\t [0., 0., 0., -100., 0.01, 0., 0., 0., 0., 0., 100000000],\n\t\t [0., 0., 0., 0., -100., 0.01, 0., 0., 0., 0., 10000000000],\n\t\t [0., 0., 0., 0., 0., -100., 0.01, 0., 0., 0., 1000000000000],\n\t\t [0., 0., 0., 0., 0., 0., -100., 0.01, 0., 0., 100000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., -100., 0.01, 0., 10000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., -100, 0.01, 1000000000000000000],\n\t\t [0., 0., 0., 0., 0., 0., 0., 0., 0., -100., 100000000000000000000]])\n\n\tb2 = np.array ([[1.01], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [-0.99], [0.]])\n\n\ty2 = lu_forward_sub(L2, b2) \t\t\n\ty_soln2 = np.array([1.01, -101.99, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 98.01, 99])\t\t\t\t\t\t# correct output of LU_FORWARD_SUB\n\tassert norm(y2 - y_soln2) < 1.e-10", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def block_levinson(y, L):\n d = L.shape[1] # Block dimension\n N = int(L.shape[0]/d) # Number of blocks\n\n # This gets the bottom block row B from the left block column L\n B = np.reshape(L, [d, N, d], order='F')\n B = B.swapaxes(1, 2)\n B = B[..., ::-1]\n B = np.reshape(B, [d, N*d], order='F')\n\n f = np.linalg.inv(L[:d, :]) # \"Forward\" vector\n b = f # \"Backward\" vector\n x = np.dot(f, y[:d]) # Solution vector\n\n Ai = np.eye(2*d)\n G = np.zeros((d*N, 2*d))\n for n in range(2, N+1):\n ef = np.dot(B[:, (N-n)*d:N*d], np.vstack((f, np.zeros((d, d)))))\n eb = np.dot(L[:n*d, :].T, np.vstack((np.zeros((d, d)), b)))\n ex = np.dot(B[:, (N-n)*d:N*d], np.vstack((x, np.zeros((d, 1)))))\n Ai[:d, d:] = eb\n Ai[d:, :d] = ef\n A = np.linalg.inv(Ai)\n l = d*(n-1)\n G[:l, :d] = f\n G[d:l+d, d:] = b\n fn = np.dot(G[:l+d, :], A[:, :d])\n bn = np.dot(G[:l+d, :], A[:, d:])\n f = fn\n b = bn\n x = np.vstack((x, np.zeros((d, 1)))) + np.dot(b, y[(n-1)*d:n*d]-ex)\n\n W = x", "def least_squares(y, tx):\r\n w = np.linalg.solve(tx.T@tx,tx.T@y)\r\n loss = compute_loss_MSE(y, tx, w)\r\n return w,loss", "def rforwardsolve(A, b, d):\n n = len(b)\n b[0] /= A[0, 0]\n for k in range(1,n):\n lk = array([0,k-d]).max()\n b[k] = b[k] - dot(A[k, lk:k],b[lk:k])\n b[k] /= A[k, k]", "def backward_committor_sensitivity(T, A, B, index):\n\n # This is really ugly to compute. The problem is, that changes in T induce changes in\n # the stationary distribution and so we need to add this influence, too\n # I implemented something which is correct, but don't ask me about the derivation\n\n n = len(T)\n\n trT = numpy.transpose(T)\n\n one = numpy.ones(n)\n eq = stationary_distribution(T)\n\n mEQ = numpy.diag(eq)\n mIEQ = numpy.diag(1.0 / eq)\n mSEQ = numpy.diag(1.0 / eq / eq)\n\n backT = numpy.dot(mIEQ, numpy.dot(trT, mEQ))\n\n qMat = forward_committor_sensitivity(backT, A, B, index)\n\n matA = trT - numpy.identity(n)\n matA = numpy.concatenate((matA, [one]))\n\n phiM = numpy.linalg.pinv(matA)\n\n phiM = phiM[:, 0:n]\n\n trQMat = numpy.transpose(qMat)\n\n d1 = numpy.dot(mSEQ, numpy.diagonal(numpy.dot(numpy.dot(trT, mEQ), trQMat), 0))\n d2 = numpy.diagonal(numpy.dot(numpy.dot(trQMat, mIEQ), trT), 0)\n\n psi1 = numpy.dot(d1, phiM)\n psi2 = numpy.dot(-d2, phiM)\n\n v1 = psi1 - one * numpy.dot(psi1, eq)\n v3 = psi2 - one * numpy.dot(psi2, eq)\n\n part1 = numpy.outer(eq, v1)\n part2 = numpy.dot(numpy.dot(mEQ, trQMat), mIEQ)\n part3 = numpy.outer(eq, v3)\n\n sensitivity = part1 + part2 + part3\n\n return sensitivity", "def hexapodZernikeLinearModel():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n \n M22realTrefoil2 = b[:,37] # for x decenter\n M22imagTrefoil1 = b[:,54] \n M22TrefoilXshift = 0.5*(M22realTrefoil2+M22imagTrefoil1)\n\n M22realTrefoil1 = b[:,34] # for y decenter\n M22imagTrefoil2 = b[:,57] \n M22TrefoilYshift = 0.5*(M22realTrefoil1 - M22imagTrefoil2)\n\n M20defocus = b[:,12] # for defocus\n\n M22realComa2 = b[:,36] # for x-tilt\n M22imagComa1 = b[:,55]\n M22ComaXtilt = 0.5*(M22realComa2+M22imagComa1)\n\n M22realComa1 = b[:,35] # for y-tilt\n M22imagComa2 = b[:,56]\n M22ComaYtilt = 0.5*(M22realComa1 - M22imagComa2)\n \n pl.figure(figsize=(21,12))\n pl.subplot(2,3,1)\n t=bp.bin_scatter(M22TrefoilXshift,x,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilXshift,x)\n pl.plot(M22TrefoilXshift,M22TrefoilXshift*res[1]+res[0],'r,')\n pl.ylabel('x-decenter')\n pl.xlabel('(M22realTrefoil2+M22imagTrefoil1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,2)\n t=bp.bin_scatter(M22TrefoilYshift,y,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22TrefoilYshift,y)\n pl.plot(M22TrefoilYshift,M22TrefoilYshift*res[1]+res[0],'r,')\n pl.ylabel('y-decenter')\n pl.xlabel('(M22realTrefoil1 - M22imagTrefoil2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,3)\n t=bp.bin_scatter(M20defocus,z,nbins=20,fmt='bo',scatter=True)\n res = linefit(M20defocus,z)\n pl.plot(M20defocus,M20defocus*res[1]+res[0],'r,')\n pl.ylabel('z-defocus')\n pl.xlabel('M20defocus')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,4)\n t=bp.bin_scatter(M22ComaXtilt,thetax,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaXtilt,thetax)\n pl.plot(M22ComaXtilt,M22ComaXtilt*res[1]+res[0],'r,')\n pl.ylabel('x-tilt')\n pl.xlabel('(M22realComa2+M22imagComa1)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n pl.subplot(2,3,5)\n t=bp.bin_scatter(M22ComaYtilt,thetay,nbins=20,fmt='bo',scatter=True)\n res = linefit(M22ComaYtilt,thetay)\n pl.plot(M22ComaYtilt,M22ComaYtilt*res[1]+res[0],'r,')\n pl.ylabel('y-tilt')\n pl.xlabel('(M22realComa1 - M22imagComa2)/2')\n pl.title('slope: '+str(round(res[1],4))+' Intercept: '+str(round(res[0],4)))\n\n pl.close()", "def brentq(x1, b, U, gamma, idens, ixmom, iymom, iener,\n TOL=1.e-6, ITMAX=100):\n\n # initialize variables\n a = x1\n c = 0.0\n d = 0.0\n fa = f(a, U, gamma, idens, ixmom, iymom, iener)\n fb = f(b, U, gamma, idens, ixmom, iymom, iener)\n fc = 0.0\n\n # root found\n if fa * fb >= 0.0:\n return x1\n\n # switch variables\n if abs(fa) < abs(fb):\n a, b = b, a\n fa, fb = fb, fa\n\n c = a\n fc = fa\n\n mflag = True\n\n for _ in range(ITMAX):\n if fa != fc and fb != fc: # pylint: disable=consider-using-in\n s = a*fb*fc / ((fa-fb) * (fa-fc)) + b*fa*fc / ((fb-fa)*(fb-fc)) + \\\n c*fa*fb / ((fc-fa)*(fc-fb))\n else:\n s = b - fb * (b-a) / (fb-fa)\n\n # test conditions and store in con1-con5\n con1 = False\n\n if 0.25 * (3.0 * a + b) < b:\n if s < 0.25 * (3.0 * a + b) or s > b:\n con1 = True\n elif s < b or s > 0.25 * (3.0 * a + b):\n con1 = True\n\n con2 = mflag and abs(s-b) >= 0.5 * abs(b-c)\n\n con3 = (not mflag) and abs(s-b) >= 0.5 * abs(c-d)\n\n con4 = mflag and abs(b-c) < TOL\n\n con5 = (not mflag) and abs(c-d) < TOL\n\n if con1 or con2 or con3 or con4 or con5:\n s = 0.5 * (a + b)\n mflag = True\n else:\n mflag = False\n\n # evaluate at midpoint and set new limits\n fs = f(s, U, gamma, idens, ixmom, iymom, iener)\n\n if abs(fa) < abs(fb):\n a, b = b, a\n fa, fb = fb, fa\n\n d = c\n c = b\n fc = fb\n\n if fa * fs < 0.0:\n b = s\n fb = fs\n else:\n a = s\n fa = fs\n\n # found solution to required tolerance\n if fb == 0.0 or fs == 0.0 or abs(b-a) < TOL:\n return b\n\n return x1", "def solve(self, b):\n raise NotImplementedError", "def newtonJacobian(self,r):\n #x_vec=np.array(r)\n x=r[0]\n y=r[1]\n jacobi=np.zeros([2,2], float)\n \n \n jacobi[0][0]=(4.0*(self.x_0-x)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][1]=(4.0*(self.y_0-y)**2.0-2.0)*self.sfunc(x,y)\n jacobi[1][0]=4.0*(self.x_0-x)*(self.y_0-y)*self.sfunc(x,y)\n jacobi[0][1]=jacobi[1][0]\n #print \"newton jacobian is \",jacobi\n try:\n return mat.inv(jacobi)\n except:\n print \"singular jacobi not invertable\"\n return 0", "def rbacksolve(A, b, d):\n n = len(b)\n b[n - 1] /= A[n - 1,n - 1]\n for k in range(n-2,-1,-1):\n uk = array([n, k + d + 1]).min()\n b[k] = b[k] - dot(A[k,(k+1):uk], b[(k+1):uk])\n b[k] /= A[k,k]", "def JacobiSolve_Short(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n #update is (b - whole row * x + diagonal part * x)/diagonal\n x_new = (b - np.dot(A,x)+ A.diagonal()*x)/A.diagonal()\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def Solver(line1, line2):\n\ta = np.array(line1[0])\n\tb = np.array(line1[1])\n\tu = np.array(line2[0])\n\tv = np.array(line2[1])\n\t#print(a,b,u,v)\n\tc = u[:2]-a[:2]\n\tA = np.vstack((b[:2],-v[:2])).T\n\t#print(A)\n\tx = np.linalg.solve(A,c)\n\t#print(x)\n\tp = a+x[0]*b\n\t#print(p)\n\treturn p", "def Lotka_Volterra(a, b, c, d, init, t0, tf, eps):\n\n \n Y=meth_epsilon(init, t0, tf, eps,\\\n eq_Lotka_Volterra(a, b, c, d) , step_RK4)\n return Y", "def ridge_regression(y, tx, lambda_):\n lambda_prime = lambda_ * 2*tx.shape[0]\n\n a = tx.T.dot(tx) + lambda_prime*np.eye(tx.shape[1])\n b = tx.T.dot(y)\n w_star = np.linalg.solve(a, b)\n\n loss = compute_loss(y, tx, w_star)\n\n return w_star, loss", "def LUsolve(a,b):\n b=float64(b)\n n=len(b)\n LU=LUdecomp(a)\n y=zeros((n,1))\n x=zeros((n,1))\n y[0]=b[0]\n for i in range(1,n):\n sum=b[i]\n for j in range(i):\n sum=sum-LU[i][j]*y[j]\n y[i]=sum\n x[n-1]=float(y[n-1])/LU[n-1][n-1]\n for i in range(n-2,-1,-1):\n sum=y[i]\n for j in range(i+1,n):\n sum=sum-LU[i][j]*x[j]\n x[i]=float(sum)/LU[i][i]\n return x", "def blk_chol_inv(A_Txdxd, B_Tm1xdxd, b_Txd, lower=True, transpose=False):\n # Define a matrix-vector dot product because the tensorflow developers feel\n # this is beneath them.\n tf_dot = lambda M, v : tf.reduce_sum(tf.multiply(M, v), axis=1)\n if transpose:\n A_Txdxd = tf.transpose(A_Txdxd, [0,2,1])\n B_Tm1xdxd = tf.transpose(B_Tm1xdxd, [0,2,1])\n \n # Whether B is lower or upper doesn't matter. The function to be passed to\n # scan is the same.\n def step(x_d, ABb_2x_):\n A_dxd, B_dxd, b_d = ABb_2x_[0], ABb_2x_[1], ABb_2x_[2]\n return tf_dot(tf.matrix_inverse(A_dxd),\n b_d - tf_dot(B_dxd, x_d))\n if lower:\n x0_d = tf_dot(tf.matrix_inverse(A_Txdxd[0]), b_Txd[0])\n result_Tm1xd = tf.scan(fn=step, elems=[A_Txdxd[1:], B_Tm1xdxd, b_Txd[1:]], \n initializer=x0_d)\n result_Txd = tf.concat([tf.expand_dims(x0_d, axis=0), result_Tm1xd], axis=0)\n else:\n xN_d = tf_dot(tf.matrix_inverse(A_Txdxd[-1]), b_Txd[-1])\n result_Tm1xd = tf.scan(fn=step, \n elems=[A_Txdxd[:-1][::-1], B_Tm1xdxd[::-1], b_Txd[:-1][::-1]],\n initializer=xN_d )\n result_Txd = tf.concat([tf.expand_dims(xN_d, axis=0), result_Tm1xd],\n axis=0)[::-1]\n\n return result_Txd", "def tanh_backward(dA, internal_params):\n Z = internal_params\n Zt=tanh(Z)\n dzp=np.power(Zt,2)\n print(dzp.shape)\n dZ=np.multiply(dzp,dA)\n return dZ\n # raise NotImplementedError", "def LU_solve(A, d, b):\n \n\n L, U = L1U(A, d)\n\n y = rforwardsolve(L, b, d)\n x = rbackwardsolve(U, y, d)\n\n return x", "def JacobiSolve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n x_new *= 0 #reset x_new\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def Ax_b(A, b):\n x = Matrix([x1, x2])\n Ax = A*x\n Ax_b = Ax - b\n x = linsolve([Ax_b[0], Ax_b[1]], x1, x2)\n return tuple(*x)", "def ridge_regression(y, tx, lambda_):\n N,D = tx.shape\n\n aI = 2 * N * lambda_ * np.identity(D)\n a = tx.T.dot(tx) + aI\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n return w, compute_mse(y, tx, w)", "def eq_Lotka_Volterra(a, b, c, d):\n return lambda y, t : np.array([ y[0] * ( a - b * y[1]),\\\n y[1] * ( c * y[0] - d)])", "def solve_step(self, bc_left=0):\n status = 0\n self.t += self.dt\n\n\n ### Construct the RHS vector\n # Implicit terms\n #cff1 = 0. # Fully implicit\n #cff2 = 0.\n cff1 = 0.5*(1. - 2.*self.c_im)*self.dt\n cff2 = 0.5*self.c_im*self.dt\n RHS = cff1*self.L_rhs.dot(self.B) +\\\n cff2*self.L_rhs.dot(self.B_n_m1)\n\n # Nonlinear (explicit) terms\n cff3 = self.dt*(3 + self.b_ex)*0.5\n cff4 = -self.dt*(1+2*self.b_ex)*0.5\n cff5 = self.dt*(self.b_ex)*0.5\n \n RHS += cff3*self.calc_nonlinear_rhs(self.B)\n RHS += cff4*self.calc_nonlinear_rhs(self.B_n_m1)\n RHS += cff5*self.calc_nonlinear_rhs(self.B_n_m2)\n\n # Other terms from the time-derivative\n RHS += self.B\n\n # Add the BCs to the RHS\n cff0 = 0.5*(1 + self.c_im)*self.dt\n self.add_bcs(RHS, bc_left, cff0, cff1, cff2)\n\n # Use the direct banded matrix solver (faster)\n self.B_n_p1[:] = la.solve_banded( (self._j,self._j), self.L_lhs.data[::-1,:], RHS)\n\n # Check solutions\n if np.any( np.isnan(self.B_n_p1)):\n return -1\n\n # Update the terms last\n self.B_n_m2[:] = self.B_n_m1\n self.B_n_m1[:] = self.B\n self.B[:] = self.B_n_p1\n\n ## Update the boundary terms in these equations\n self.bcs[2] = self.bcs[1]\n self.bcs[1] = self.bcs[0]\n self.bcs[0] = bc_left\n\n return status", "def ridge_regression(y, tx, lambda_):\n x_t = tx.T\n lambd = lambda_ * 2 * len(y)\n w = np.linalg.solve (np.dot(x_t, tx) + lambd * np.eye(tx.shape[1]), np.dot(x_t,y)) \n loss = compute_mse(y, tx, w)\n\n return w,loss", "def lorzrk(s,t,param):\n r = param[0]\n sigma = param[1]\n b = param[2]\n # For clarity, unravel input vectors\n x = s[0]; y = s[1]; z = s[2]\n # Return the derivatives [dx/dt dy/dt dz/dt]\n deriv = np.zeros(3)\n deriv[0] = sigma*(y-x)\n deriv[1] = r*x - y - x*z\n deriv[2] = x*y - b*z\n return deriv", "def rbackwardsolve(A, b, d):\n\n n = len(b)\n if np.iscomplexobj(A) or np.iscomplexobj(b):\n A = A.astype('complex128')\n b = b.astype('complex128')\n x = b.copy()\n x[n-1] = b[n-1] / A[n-1, n-1]\n\n for k in range(n-2, -1, -1):\n uk = min(n-1, k+d)\n x[k] = (b[k] - np.dot(A[k, k+1:uk+1], x[k+1:uk+1])) / A[k, k]\n\n return x", "def Pol_Newton_un_punto(x,datos_x,datos_y):\n n = datos_x.shape[0]\n matriz=np.ones([n,n])\n for j in range(n):\n for i in range(n):\n if j>i:\n matriz[i][j]=0\n else:\n producto=1\n for k in range(j):\n producto=producto*(datos_x[i]-datos_x[k])\n matriz[i][j]=producto\n matriz,datos_y1= pivoteo_parcial(matriz,datos_y)\n x1 = descompo_LU(matriz,datos_y1)\n prod=np.zeros(x1.shape[0])\n for i in range(n):\n if i==0:\n prod[i]=1\n else: \n prod[i]=prod[i-1]*(x-datos_x[i-1])\n solucion=x1@prod\n return solucion", "def leastsquares(A,b,qr=qrfact.qri_mgs_piv,alpha=0.5):\n \n\n A = numpy.array(A, dtype=float)\n m,n = A.shape\n z = numpy.zeros( n )\n a = numpy.zeros( n )\n x = numpy.zeros( n )\n b = numpy.transpose(b)[0]\n\n # do the QR factorization\n try:\n Q,R = qr(A)[:2] # Some QR routines return a third permutation P solving AP=QR.\n PA = A\n except TypeError:\n Q,R,P = qr(A,alpha)[:3] # Some QR routines return a third permutation P solving AP=QR.\n AP = numpy.dot( A, P )\n\n # Step 1'': orthogonalization of b against Q\n u = b\n for j in range( 0, n ) :\n # print \"Qj = \", Q[:,j]\n # print \"u = \", u\n # print \"dot = \", numpy.dot( Q[:,j], u )\n z[j] = numpy.dot( Q[:,j], u )\n u = u - z[j] * Q[:,j]\n\n # Step 2'': iterative orthogonalization of u\n ul2norm = numpy.linalg.norm( u )\n ii = 0\n while True : # iterate\n for j in range( 0, n ) :\n a[j] = numpy.dot( Q[:,j], u )\n z[j] = z[j] + a[j]\n u = u - a[j] * Q[:,j]\n\n ii = ii + 1\n ulnorm = ul2norm\n ul2norm = numpy.linalg.norm( u )\n\n #print ul2norm, ulnorm\n \n if (ul2norm > alpha * ulnorm) or ul2norm == 0 :\n # print \"used\", ii, \"orthogonalizations\"\n break\n\n #print z\n #print R\n\n # Step 3'': use back substitution to solve Rx = z\n for i in range( n-1, -1, -1 ) :\n x[i] = z[i]\n for j in range( i+1, n ) :\n x[i] = x[i] - R[i,j] * x[j]\n x[i] = x[i] / R[i,i]\n #print x\n\n #need to permute x according to permutation matrix P\n \n return numpy.dot( P, x )", "def least_squares(y, tx, loss_function=rmse):\n w = np.linalg.solve(tx.T @ tx, tx.T @ y)\n loss = loss_function(y, tx, w)\n return w, loss", "def w_0_update(eta_0, u, T, alpha, B):\n return prox(eta_0, T.dot(u) - eta_0*alpha, B)", "def linsolve(A, b, symmetric=True):\n try:\n F = b.asarray()\n except AttributeError:\n F = np.asarray(b)\n\n use_np_solve = not symmetric or flapack is None\n x, info = None, 1\n if not use_np_solve:\n c, x, info = flapack.dposv(A, F, lower=0, overwrite_a=0, overwrite_b=0)\n if info < 0:\n raise ValueError(\n \"ILLEGAL VALUE IN {0}-TH ARGUMENT OF \" \"INTERNAL DPOSV\".format(-info)\n )\n if info != 0:\n use_np_solve = True\n\n if use_np_solve:\n try:\n x = la.solve(A, F)\n info = 0\n except la.LinAlgError:\n raise RuntimeError(\"ATTEMPTING TO SOLVE UNDER CONSTRAINED SYSTEM\")\n\n if info > 0:\n tty.warn(\"LINSOLVE FAILED, USING LEAST SQUARES \" \"TO SOLVE SYSTEM\")\n x = la.lstsq(A, F)[0]\n\n return x", "def householder_solve(A, b):\n m, k = b.shape\n Ahat = np.zeros((m,m+1))\n x = np.zeros((m,k))\n for i in range(k):\n Ahat[:,:m] = 1.0*A\n Ahat[:,m] = 1.0*b[:,i]\n Rhat = householder(Ahat, m)\n x[:,i] = solve_triangular(Rhat[:,:m], Rhat[:,m])\n return x", "def _solve_tikho(matrix, result, t_mat, **kwargs):\n\n # Note that the indexing is applied inside the function\n tikho = atoca_utils.Tikhonov(matrix, result, t_mat)\n\n return tikho.solve(**kwargs)", "def get_1D_Taylors_to_spline_patch_matrix(a, b, deg):\n Taylors_matrix = np.concatenate([\n get_1D_Taylor_matrix(a, deg = 2 * deg, trunc = deg),\n get_1D_Taylor_matrix(b, deg = 2 * deg, trunc = deg),\n ])\n \n #print(Taylors_matrix)\n return la.inv(Taylors_matrix)", "def f(t,y):\n return (lam*y)", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def state_eq(t, x, u, tu):\n # u = u[0, :]\n u_interp = np.interp(t, tu, u[0, :])\n # print(f'u: {u}')\n return np.vstack((x[1], -x[1] + u_interp))", "def RHSnet(y,t,a,b0,b1,g,k,w):\n S = y[:N]\n E = y[N:2*N]\n C = y[2*N:3*N]\n b = b0 + b1*(1+np.cos(2*np.pi*t))\n dy = np.zeros(3*N)\n dy[:N]= k*(1-S)-b*C*S+w*np.dot(P,S)-w*S\n dy[N:2*N]= b*C*S-(k+a)*E+w*np.dot(P,E)-w*E\n dy[2*N:3*N]= a*E-(g+k)*C+w*np.dot(P,C)-w*C\n return dy", "def weighted_solve(A, b, w):\n assert len(b) == len(w)\n \n n = len(b)\n W = spdiags(w, [0,], n, n)\n y = A.T.dot(W.dot(b))\n iCov = A.T.dot(W.dot(A))\n x = np.linalg.lstsq(iCov, y)[0]\n return x, iCov", "def test_el_below_lcl():\n p = [902.1554, 897.9034, 893.6506, 889.4047, 883.063, 874.6284, 866.2387, 857.887,\n 849.5506, 841.2686, 833.0042, 824.7891, 812.5049, 796.2104, 776.0027, 751.9025,\n 727.9612, 704.1409, 680.4028, 656.7156, 629.077, 597.4286, 565.6315, 533.5961,\n 501.2452, 468.493, 435.2486, 401.4239, 366.9387, 331.7026, 295.6319, 258.6428,\n 220.9178, 182.9384, 144.959, 106.9778, 69.00213] * units.hPa\n t = [-3.039381, -3.703779, -4.15996, -4.562574, -5.131827, -5.856229, -6.568434,\n -7.276881, -7.985013, -8.670911, -8.958063, -7.631381, -6.05927, -5.083627,\n -5.11576, -5.687552, -5.453021, -4.981445, -5.236665, -6.324916, -8.434324,\n -11.58795, -14.99297, -18.45947, -21.92021, -25.40522, -28.914, -32.78637,\n -37.7179, -43.56836, -49.61077, -54.24449, -56.16666, -57.03775, -58.28041,\n -60.86264, -64.21677] * units.degC\n td = [-22.08774, -22.18181, -22.2508, -22.31323, -22.4024, -22.51582, -22.62526,\n -22.72919, -22.82095, -22.86173, -22.49489, -21.66936, -21.67332, -21.94054,\n -23.63561, -27.17466, -31.87395, -38.31725, -44.54717, -46.99218, -43.17544,\n -37.40019, -34.3351, -36.42896, -42.1396, -46.95909, -49.36232, -48.94634,\n -47.90178, -49.97902, -55.02753, -63.06276, -72.53742, -88.81377, -93.54573,\n -92.92464, -91.57479] * units.degC\n prof = parcel_profile(p, t[0], td[0]).to('degC')\n el_p, el_t = el(p, t, td, prof)\n assert_nan(el_p, p.units)\n assert_nan(el_t, t.units)", "def TriangleForwardSub(L,b):\n C = solve(L,b)\n return C", "def __inverse_kinematics(self, guess, target_point):\n\n error = 1.0\n tolerance = 0.05\n\n # Initial Guess - Joint Angles\n thetas = np.matrix(guess) # thetas is list which is contain all axes theta angles.\n target_point = np.matrix(target_point) # X, Y, Z list to matrix for Target Position\n # print(target_point.shape)\n # Jacobian\n self.__calc_jacobian_matrix()\n tf_matrix_first_to_last = self.tf_matrices_list[-1]\n\n error_grad = []\n\n theta_dict = {}\n\n lr = 0.2\n while error > tolerance:\n for i in range(len(np.array(thetas)[0])):\n theta_dict[self.q[i]] = np.array(thetas)[0][i]\n\n theta_dict[self.q[-1]] = self.q[-1]\n\n calculated_target_point = np.matrix(self.get_coords_from_forward_kinematics(self.__forward_kinematics(np.array(thetas)[0])[-1]))\n logger.debug(f'calculated target point is \\n{calculated_target_point}')\n\n diff_wanted_calculated = target_point - calculated_target_point\n\n jacob_mat = np.matrix(self.jacobian_matrix.evalf(subs=theta_dict, chop=True, maxn=4)).astype(np.float64).T\n logger.debug(f'jacobian matrix is\\n{jacob_mat} \\n\\n diff is \\n {diff_wanted_calculated}')\n\n thetas = thetas + lr * (jacob_mat * diff_wanted_calculated.T)\n # thetas = np.array(thetas)[0] # this line's purpose is changing Q from matrix level to array level.\n\n prev_error = error\n\n error = linalg.norm(diff_wanted_calculated)\n\n if error > 10 * tolerance:\n lr = 0.3\n elif error < 10 * tolerance:\n lr = 0.2\n error_grad.append((error - prev_error))\n\n # print(error)\n return np.array(thetas)[0]", "def solveVerticalTrajectory(self,t,T_s,T_i,el,v,coord,alt_sp,v_sp):\n\n bal = sphere_balloon.Sphere_Balloon()\n rad = radiation.Radiation()\n\n T_atm = rad.getTemp(el)\n p_atm = rad.getPressure(el)\n rho_atm = rad.getDensity(el)\n\n rho_int = p_atm/(self.Rsp_air*T_i)\n tm_air = rho_int*self.vol*self.Cp_air0\n\n #Numerically integrate change in Surface Temperature\n coord[\"alt\"] = el\n q_rad = rad.get_rad_total(t,coord)\n q_surf = bal.get_sum_q_surf(q_rad, T_s, el, v)\n q_int = bal.get_sum_q_int(T_s, T_i, el)\n dT_sdt = (q_surf-q_int)/self.k\n\n #Numerically integrate change in Surface Temperature\n tm_air = rho_atm*self.vol*self.Cp_air0\n dT_idt = (q_int-self.get_convection_vent(T_i,el))/tm_air\n\n #Add the new surface and internal Temperatures\n T_s_new = T_s+dT_sdt*self.dt\n T_i_new = T_i+dT_idt*self.dt\n\n #solve for accellration, position, and velocity\n dzdotdt = self.get_acceleration(v,el,T_s,T_i)\n zdot = v + dzdotdt*self.dt\n z = el+zdot*self.dt\n\n #Add the new velocity and position\n if z < self.min_alt:\n v_new = 0\n el_new = self.min_alt\n else:\n v_new = zdot\n el_new = z\n\n # Venting commands for an altitude setpoint. Vent is either on or off.\n if el_new > alt_sp:\n self.mdot = self.vent\n\n if el_new < alt_sp:\n self.mdot = 0\n\n return [T_s_new,T_i_new,T_atm,el_new,v_new, q_rad, q_surf, q_int]", "def lfun(z, lparams):\n W, b = unpack(lparams)\n return np.tanh(np.dot(z, W) + b)", "def _ig_tsz(self, x, b):\n return self.P(x*self.r500) * (x / np.sqrt(x**2. - b**2.))", "def tt_gmres_leftprecond(AOp, b, nrm_b, eps=1.e-6, maxIter=20, verbose=True, preconOp=None, adaptiveTolerance=True):\n\n def calc_solution():\n x = pitts_py.TensorTrain_double(b.dimensions())\n x.setZero()\n nrm_x = 0\n for i in range(len(y)):\n nrm_x = pitts_py.axpby(y[i], V[i], nrm_x, x, eps)\n return x, nrm_x\n\n def residual_error(x, nrm_x):\n #print(\"TT-GMRES: solution max rank %d\" % np.max(x.getTTranks()))\n # calculate real residual\n r = pitts_py.TensorTrain_double(b.dimensions())\n r_nrm = nrm_x * AOp(x, r, eps/10, maxRank=9999)\n if preconOp is not None:\n r_nrm = pitts_py.axpby(orig_nrm_b, orig_b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/orig_nrm_b) )\n else:\n r_nrm = pitts_py.axpby(nrm_b, b, -r_nrm, r, eps/10, maxRank=9999)\n #print(\"TT-GMRES: real residual norm %g\" % (r_nrm/nrm_b) )\n return r_nrm\n\n if verbose:\n if preconOp is None:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n else:\n print('# \"iteration\" \"rel LSTQ norm\" \"rel residual norm\" \"new direction rank\" \"precond direction rank\" \"new Krylov vector rank\" \"solution rank\"')\n\n # assumes b is normalized and nrm_b is the desired rhs norm\n\n # left-preconditioning, transform RHS\n if preconOp is not None:\n orig_b = b\n orig_nrm_b = nrm_b\n b = pitts_py.TensorTrain_double(orig_b.dimensions())\n nrm_b = nrm_b * preconOp.apply(orig_b, b, eps / 10, 9999)\n nrm_b = nrm_b * pitts_py.normalize(b, eps/10, 9999)\n\n # define initial subspace\n beta = nrm_b\n curr_beta = beta\n V = [b]\n m = maxIter\n H = np.zeros((m + 1, m), order='F')\n\n if preconOp is not None:\n z = pitts_py.TensorTrain_double(b.dimensions())\n\n if verbose:\n #print(\"TT-GMRES: initial residual norm: %g, max. rank: %d\" % (beta, np.max(b.getTTranks())))\n if preconOp is None:\n print(0, 1, 1, np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n #print(\"TT-GMRES: un-preconditioned RHS max. rank: %d\" % np.max(orig_b.getTTranks()))\n else:\n print(0, 1, 1, np.max(orig_b.getTTranks()), np.max(b.getTTranks()), np.max(b.getTTranks()), 0)\n\n for j in range(m):\n if adaptiveTolerance:\n delta = eps / (curr_beta / beta) / (1.2 * m)\n else:\n delta = eps\n w = pitts_py.TensorTrain_double(b.dimensions())\n\n if preconOp is not None:\n z_nrm = AOp(V[j], z, delta, 9999)#, (j+1)*rank_b)\n w_nrm = z_nrm * preconOp.apply(z, w, delta, 9999)#, (j+2)*rank_b)\n else:\n w_nrm = AOp(V[j], w, delta, 9999)#, (j+2)*rank_b)\n\n if preconOp is not None:\n rank_z = np.max(z.getTTranks())\n rank_w = np.max(w.getTTranks())\n\n H[:j+2,j] = w_nrm * tt_pivmgs(V, w, delta, maxRank=9999)\n\n rank_vj = np.max(w.getTTranks())\n\n Hj = H[:j+2,:j+1]\n betae = np.zeros(j+2)\n betae[0] = beta\n # solving Hj * y = beta e_1\n y, curr_beta, rank, s = np.linalg.lstsq(Hj, betae, rcond=None)\n curr_beta = np.sqrt(curr_beta[0]) if curr_beta.size > 0 else 0\n if verbose:\n #print(\"TT-GMRES: LSTSQ residual norm: %g \" % (curr_beta / beta) )\n x, nrm_x = calc_solution()\n r_nrm = residual_error(x, nrm_x)\n rank_x = np.max(x.getTTranks())\n if preconOp is None:\n print(j+1, curr_beta/beta, r_nrm / nrm_b, rank_w, rank_vj, rank_x)\n else:\n print(j+1, curr_beta/beta, r_nrm / orig_nrm_b, rank_w, rank_z, rank_vj, rank_x)\n if curr_beta / beta <= eps:\n break\n\n if not verbose:\n x, nrm_x = calc_solution()\n return x, nrm_x", "def least_squares(y, tx):\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\tA = np.dot(tx.T, tx)\n\tb = np.dot(tx.T, y)\n\n\tw = np.linalg.solve(A, b)\n\n\tloss = compute_loss(y, tx, w)\n\n\treturn w, loss", "def solve_cholesky(matvec: Callable, b: jnp.ndarray) -> jnp.ndarray:\n if len(b.shape) == 0:\n return b / _materialize_array(matvec, b.shape)\n elif len(b.shape) == 1:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b, sym_pos=True)\n elif len(b.shape) == 2:\n A = _materialize_array(matvec, b.shape)\n return jax.scipy.linalg.solve(A, b.ravel(), sym_pos=True).reshape(*b.shape)\n else:\n raise NotImplementedError", "def cal_tether_J(self):\n\n self.B_tether_plus = np.zeros((self.point_matrix.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0],\n self.attach_points_tether.shape[0]))\n self.J_tether = np.zeros((self.point_matrix.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0],\n self.point_matrix.shape[1]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tether_plus[i, :, :] = pinv(self.B_tether[i, :, :])\n self.J_tether[i, :, :] = np.dot(self.B_tether_plus[i, :, :],\n self.A_tether[i, :, :])", "def implicit_ftcs(T, A, nt, sigma, qdx):\n\n for t in range(nt):\n Tn = T.copy()\n b = generateRHS(Tn, sigma, qdx)\n # Use numpy.linalg.solve\n T_interior = solve(A,b)\n T[1:-1] = T_interior\n # Enforce Neumann BC (Dirichlet is enforced automatically)\n T[-1] = T[-2] + qdx\n\n return T", "def analyticSol (x):\n\treturn x*(1-x);", "def least_squares(y, tx):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # least squares: TODO\n # returns mse, and optimal weights\n # ***************************************************\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w_opt = np.linalg.solve(a, b)\n mse = compute_mse(y, tx, w_opt)\n return mse, w_opt", "def linear_forward_calculation(A, W, b):\n # Your code here\n # print(W.shape, A.shape, b.shape)\n Z=np.dot(W,A)+b\n\n return Z\n # raise NotImplementedError" ]
[ "0.63466734", "0.61827254", "0.61033237", "0.6093494", "0.60769826", "0.5885008", "0.58844715", "0.5877297", "0.58737326", "0.58588946", "0.5838278", "0.5794063", "0.57753825", "0.5773156", "0.5763559", "0.57562786", "0.574674", "0.57452273", "0.57390094", "0.57179475", "0.5697003", "0.5690629", "0.5688454", "0.56821567", "0.567632", "0.56759006", "0.56697446", "0.56637865", "0.5655258", "0.5654495", "0.563661", "0.55948097", "0.5586641", "0.5582456", "0.55767703", "0.5573332", "0.5565758", "0.5558991", "0.5547946", "0.5531439", "0.55293393", "0.5524061", "0.5517735", "0.55066854", "0.54999024", "0.54964954", "0.54848313", "0.5473882", "0.5463196", "0.5450906", "0.5450599", "0.5440231", "0.54359746", "0.54356855", "0.5433286", "0.5433214", "0.5427014", "0.54206467", "0.5415054", "0.5412449", "0.54066366", "0.5405766", "0.53949964", "0.53888977", "0.538255", "0.5381926", "0.5369192", "0.5363795", "0.5363322", "0.53623146", "0.53561246", "0.53550714", "0.5355003", "0.5354456", "0.5350987", "0.53505236", "0.5346201", "0.5343736", "0.53396535", "0.5334981", "0.53245795", "0.53230286", "0.53116053", "0.53087455", "0.52989954", "0.529494", "0.5292728", "0.528351", "0.5281746", "0.52806205", "0.52776754", "0.527536", "0.52748704", "0.526924", "0.5261879", "0.525743", "0.52564824", "0.5252379", "0.5246616", "0.52427936" ]
0.7257071
0
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited.
def toeplitz_slogdet(r): n = len(r) r_0 = r[0] r = np.concatenate((r, np.array([r_0]))) r /= r_0 # normalize the system so that the T matrix has diagonal of ones logdet = n*np.log(np.abs(r_0)) sign = np.sign(r_0)**n if n == 1: return (sign, logdet) # now on is a modification of Levinson algorithm y = zeros((n,)) x = zeros((n,)) b = -r[1:n+1] r = r[:n] y[0] = -r[1] x[0] = b[0] beta = 1 alpha = -r[1] d = 1 + dot(-b[0], x[0]) sign *= np.sign(d) logdet += np.log(np.abs(d)) for k in range(0,n-2): beta = (1 - alpha*alpha)*beta mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu d = 1 + dot(-b[0:k+2], x[0:k+2]) sign *= np.sign(d) logdet += np.log(np.abs(d)) if k < n-2: alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return(sign, logdet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_", "def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol", "def log_abs_det_jacobian(self, z):\n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(self.w, dim=1) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(self.a)\n logj = torch.log(torch.sum(torch.exp(logj)))#,2).sum(2)\n logdet = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return sum_dims(logdet)", "def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n logdet = self.bn_arn(x)[1]\n return logdet.sum(-1)\n else:\n logdet = intermediates\n return logdet.sum(-1)", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def _inverse_log_det_jacobian(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n h_prime = -(h ** 2)\n beta_h = beta * h\n log_det_jacobian = tf.reduce_sum(\n (self.dim - 1) * tf.math.log1p(beta_h)\n + tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)\n return log_det_jacobian", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n log_scale = self.arn(x)[1]\n log_scale = _clamp_preserve_gradients(\n log_scale, self.log_scale_min_clip, self.log_scale_max_clip\n )\n return log_scale.sum(-1)\n else:\n log_scale = intermediates\n return log_scale.sum(-1)", "def determinant_fast(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = copy_matrix(A)\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0: \n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1,n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, but one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n \n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]", "def determinant(matrix):\n if type(matrix) is not list or len(matrix) == 0:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(matrix) == 1 and len(matrix[0]) == 0:\n return 1\n\n for i in matrix:\n if type(i) is not list:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(i) != len(matrix):\n raise ValueError(\"matrix must be a square matrix\")\n\n if len(matrix) == 1:\n return matrix[0][0]\n\n if len(matrix) == 2:\n return (matrix[0][0] * matrix[1][1]) - (matrix[0][1]\n * matrix[1][0])\n deter = 0\n\n for j, k in enumerate(matrix[0]):\n rows = [r for r in matrix[1:]]\n sub = []\n for r in rows:\n sub.append([r[a] for a in range(len(matrix)) if a != j])\n deter += k * (-1) ** j * determinant(sub)\n return deter", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def local_det_chol(node):\r\n if node.op == det:\r\n x, = node.inputs\r\n for (cl, xpos) in x.clients:\r\n if isinstance(cl.op, Cholesky):\r\n L = cl.outputs[0]\r\n return [tensor.prod(extract_diag(L) ** 2)]", "def logp(value, mu, rowchol, colchol):\n\n if value.ndim != 2:\n raise ValueError(\"Value must be two dimensional.\")\n\n # Compute Tr[colcov^-1 @ (x - mu).T @ rowcov^-1 @ (x - mu)] and\n # the logdet of colcov and rowcov.\n delta = value - mu\n\n # Find exponent piece by piece\n right_quaddist = solve_lower(rowchol, delta)\n quaddist = pt.nlinalg.matrix_dot(right_quaddist.T, right_quaddist)\n quaddist = solve_lower(colchol, quaddist)\n quaddist = solve_upper(colchol.T, quaddist)\n trquaddist = pt.nlinalg.trace(quaddist)\n\n coldiag = pt.diag(colchol)\n rowdiag = pt.diag(rowchol)\n half_collogdet = pt.sum(pt.log(coldiag)) # logdet(M) = 2*Tr(log(L))\n half_rowlogdet = pt.sum(pt.log(rowdiag)) # Using Cholesky: M = L L^T\n\n m = rowchol.shape[0]\n n = colchol.shape[0]\n\n norm = -0.5 * m * n * pm.floatX(np.log(2 * np.pi))\n return norm - 0.5 * trquaddist - m * half_collogdet - n * half_rowlogdet", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return functools.reduce(\n lambda x, y: x ^ y,\n [self[0, j] and\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)],\n )", "def Determinant(matrix, mul):\r\n width = len(matrix)\r\n # Stop Conditions\r\n if width == 1:\r\n return mul * matrix[0][0]\r\n else:\r\n sign = -1\r\n det = 0\r\n for i in range(width):\r\n m = []\r\n for j in range(1, width):\r\n buff = []\r\n for k in range(width):\r\n if k != i:\r\n buff.append(matrix[j][k])\r\n m.append(buff)\r\n # Change the sign of the multiply number\r\n sign *= -1\r\n # Recursive call for determinant calculation\r\n det = det + mul * Determinant(m, sign * matrix[0][i])\r\n return det", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def det(a):\n a = copy.deepcopy(a)\n n = len(a)\n det = 1\n com_k = 1\n for k in range(n-1):\n step = 1\n\n while a[k][k] == 0:\n a[k+step], a[k] = a[k], a[k+step]\n det = -det\n step += 1\n mul = a[k][k]\n\n for i in range(k+1, n):\n for j in range(k+1, n):\n a[i][j] *= mul\n a[i][j] -= a[i][k] * a[k][j]\n a[i][j] /= com_k\n\n com_k = mul\n\n det = det * a[-1][-1]\n\n return det", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return sum([self[0, j] * (-1 if j % 2 else 1) *\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)])", "def log_det_K(self, Ks=None):\n log_det = 0.\n for K in self.Ks:\n rank_d = self.n / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det", "def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)", "def log_det_K(self, Ks=None):\n Ks = self.Ks if Ks is None else Ks\n log_det = 0.\n for K in Ks:\n rank_d = self.m / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def determinant(matrix):\n if matrix == [[]]:\n return 1\n if type(matrix) is not list or len(matrix) < 1 or\\\n not all(isinstance(x, list) for x in matrix):\n raise TypeError(\"matrix must be a list of lists\")\n if not all(len(matrix) == len(x) for x in matrix):\n raise ValueError(\"matrix must be a square matrix\")\n copy = list(map(list, matrix))\n dim = len(matrix)\n if dim == 1:\n return matrix[0][0]\n elif dim == 2:\n return matrix[0][0] * matrix[1][1] - matrix[1][0] * matrix[0][1]\n else:\n for cur in range(dim):\n for i in range(cur + 1, dim):\n if copy[cur][cur] == 0:\n copy[cur][cur] = 1.0e-10\n curScaler = copy[i][cur] / copy[cur][cur]\n for j in range(dim):\n copy[i][j] = copy[i][j] - curScaler * copy[cur][j]\n det = 1\n for i in range(dim):\n det *= copy[i][i]\n return round(det)", "def det_matrix(self):\n return np.linalg.det(self.take_matrix())", "def determinant(self):\n if self.L is None or self.U is None:\n self.decomposeLU()\n\n retval = 1.0\n for i in range(self.rows):\n retval *= self.L[i, i] * self.U[i, i]\n return retval", "def fisher_diag(\n negative_log_likelihood: LossFun,\n params: Any,\n inputs: jnp.ndarray,\n targets: jnp.ndarray,\n) -> jnp.DeviceArray:\n return jnp.square(\n ravel(jax.grad(negative_log_likelihood)(params, inputs, targets)))", "def det(mtx):\n if not is_square(mtx):\n raise ValueError(\"Matrix should be square\")\n if len(mtx) == 2:\n return mtx[0][0] * mtx[1][1] - mtx[0][1] * mtx[1][0]\n\n result = 0\n sign = 1\n for inx in range(len(mtx)):\n next_mtx = get_minor_mtx(mtx, 0, inx)\n result += sign * (mtx[0][inx] * det(next_mtx))\n sign *= -1\n return result", "def log_abs_det_jacobian(self, x, y):\n if self.training:\n var = torch.var(y, dim=0, keepdim=True)\n else:\n # NOTE: You wouldn't typically run this function in eval mode, but included for gradient tests\n var = self.moving_variance\n return (-self.constrained_gamma.log() + 0.5 * torch.log(var + self.epsilon))", "def _calculate_log_det(self, var):\n log_det = []\n\n for k in range(self.n_components):\n evals, evecs = tf.linalg.eig(var[0, k])\n\n log_det.append(tf.reduce_sum(tf.math.log(tf.math.real(evals))))\n log_det = tf.convert_to_tensor(log_det)\n return tf.expand_dims(log_det, -1)", "def determinant(x):\n if len(x) == len(x[0]):\n if len(x) == 2:\n return cross_multiply(x)\n else:\n val = 0\n alt = False\n for i in range(len(x)):\n tmp = x[1:]\n t1, t2 = tmp[0][:], tmp[1][:]\n _ = t1.pop(i), t2.pop(i)\n new_t = [t1, t2]\n print(new_t)\n x_multiply = cross_multiply(new_t)\n if val == 0:\n val = x[0][i] * x_multiply\n else:\n if alt:\n val = val + (x[0][i] * x_multiply)\n alt = False\n else:\n val = val - (x[0][i] * x_multiply)\n alt = True\n return val\n else:\n return 'matrix is not a square matrix.'", "def invwish_logpdf(X, S, df):\n d = X.shape[0]\n if df < d:\n raise ValueError('df must be greater than or equal to the number of '\n ' dimensions of S')\n if d != X.shape[1]:\n raise ValueError('X must be square.')\n if S.shape[0] != d or S.shape[1] != d:\n raise ValueError('S must be the same shape as X.')\n\n _, logdet_S = slogdet(S)\n _, logdet_X = slogdet(X)\n\n logpdf = (df/2)*logdet_S - ((df*d/2)*log(2) + multigammaln(df/2, d))\n logpdf += (-(d+df+1)/2)*logdet_X - (1/2)*trace(solve(X.T, S.T))\n\n return logpdf", "def log_det_precisions(self):\n return tf.math.log(-2 * self.nat2)", "def determinant (self):\n if self.is_square:\n det = 1\n for idx, row in enumerate(echelon_form(self).rows()):\n det *= row[idx]\n return det\n else:\n raise NotImplementedError(\n \"Determinant only defined for square matrices.\")", "def _det(mat):\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n \n # TODO - your code here\n if self.h == 1:\n return self.g[0][0];\n else:\n return self.g[0][0]*self.g[1][1]-self.g[0][1]*self.g[1][0];", "def _smw_logdet(s, A, AtA, B, BI, B_logdet):\n\n p = A.shape[0]\n\n if _no_smw or BI is None:\n mat = np.dot(A, np.dot(B, A.T))\n # Add constant to diagonal\n mat.flat[::p+1] += s\n _, ld = np.linalg.slogdet(mat)\n return ld\n\n ld = p * np.log(s)\n\n qmat = BI + AtA / s\n _, ld1 = np.linalg.slogdet(qmat)\n\n return B_logdet + ld + ld1", "def test_cloglog_transform_deriv_v(self):\n # Note the index has a value that is <= -40 to test whether or not\n # the function correctly uses L'Hopital's rule to deal with underflow\n # and calculating the derivative. When the index is <= -40, the\n # derivative should be 1.\n test_index = np.array([-40, 1, 7])\n # Note we use a compressed sparse-row matrix so that we can easily\n # convert the output matrix to a numpy array using the '.A' attribute.\n test_output = diags(np.ones(test_index.shape[0]),\n 0, format='csr')\n\n # Bundle the arguments needed for the function\n # Not all elements except for test_index are completely fake and only\n # needed because the function requires a given number of arguments.\n # This is for api compatibility with other models.\n args = [test_index,\n np.ones(3),\n diags(np.ones(3), 0, format='csr'),\n None]\n\n # Get the derivative using the function defined in clog_log.py.\n derivative = clog._cloglog_transform_deriv_v(*args,\n output_array=test_output)\n\n # Calculate, 'by hand' what the results should be\n correct_derivatives = np.diag(np.array([1,\n 2.910328703250801,\n 1096.6331584284585]))\n\n self.assertIsInstance(derivative, type(test_output))\n self.assertEqual(len(derivative.shape), 2)\n self.assertEqual(derivative.shape, (3, 3))\n npt.assert_allclose(correct_derivatives, derivative.A)\n\n return None", "def log_det_Jzx(self):\n #return self.log_det_zx.output\n log_det_Jzxs = []\n for l in self.layers:\n if hasattr(l, 'log_det_Jzx'):\n log_det_Jzxs.append(l.log_det_Jzx)\n if len(log_det_Jzxs) == 0:\n return tf.ones((self.output_x.shape[0],))\n if len(log_det_Jzxs) == 1:\n return log_det_Jzxs[0]\n return tf.reduce_sum(log_det_Jzxs, axis=0, keepdims=False)", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def coulog(Te,nev):\n return np.log(12.0*np.pi*nev*(Debye(Te,nev))**3)", "def log_entropy(dm):\n size = len(dm)\n entropy = 0\n w, v = np.linalg.eig(dm)\n for n in range(size):\n if w[n] != 0:\n entropy = entropy - w[n] * np.log2(w[n])\n return entropy", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def det(self,mat):\n if(len(mat[0])==len(mat)):\n result = np.linalg.det(mat)\n self.determinant = result\n return self.determinant\n else:\n print(\"Not a square Matrix\")", "def compute_determinant(matrix):\n det = np.linalg.det(matrix)\n #if det == 0.:\n # The det = 0 could be related to the third feature\n # det = np.linalg.det(matrix[:2, :2])\n if det == 0.:\n # Singular covariance matrix, should not be taken into account\n det = np.nan\n if np.isclose(det, 0):\n det = np.abs(det)\n return det", "def det_nth_root(X, method='lu'):\n N = float(X.shape[0])\n if method == 'lu':\n P, L, U = scipy.linalg.lu(X)\n diags = (np.diag(L) ** (1 / N) * (np.diag(U) ** (1 / N)))\n determinant = np.product(diags)\n elif method == 'eig':\n L = np.linalg.eigvalsh(X)\n determinant = np.product(L ** (1 / float(L.size)))\n elif method == 'qr':\n (R,) = scipy.linalg.qr(X, mode='r')\n determinant = np.product(np.abs(np.diag(R)) ** (1 / N))\n else:\n raise Exception('method not understood')\n\n return np.nan_to_num(determinant)", "def matrix_det(A):\n\tx = A[0,0]*A[1,1]*A[2,2] + A[0,1]*A[1,2]*A[2,0] + A[0,2]*A[1,0]*A[2,1]\n\ty = A[0,0]*A[1,2]*A[2,1] + A[0,1]*A[1,0]*A[2,2] + A[0,2]*A[1,1]*A[2,0]\n\treturn x - y", "def calc_metric3(K_tilda):\n trace = np.trace(K_tilda)\n # determinant = np.linalg.det(K_tilda)\n _, log_determinant = np.linalg.slogdet(K_tilda)\n diff = trace - log_determinant\n print(trace, log_determinant, diff)\n return diff", "def det(self, colBy = 0):\n try:\n if not 0 <= colBy < self.getColCount(): raise self.matrixBadDimension('Podano niewłaściwy numer kolumny macierzy.\\nPodano: %s' % (colBy,))\n if self.getColCount() != self.getRowCount() or not self.matrix: return None\n if self.getColCount() == 1: return self[0,0]\n except self.matrixException as e:\n print \"Wyjątek w A.det(colBy = %d)!\\nA = \\n%s\\n\" % (colBy, indent(self))\n return None\n else:\n return reduce(lambda x,y: x+y, [(-1)**(i+colBy) * self[i,colBy] * self.minor(i,colBy).det() for i in range(self.getColCount())])", "def log(self) -> np.ndarray:\n S = 0.5*(self.A-self.A.T) # Skew-symmetric matrix\n y = np.array([S[2, 1], -S[2, 0], S[1, 0]]) # Axis\n if np.allclose(np.zeros(3), y):\n return np.zeros(3)\n y2 = np.linalg.norm(y)\n return np.arcsin(y2)*y/y2", "def td_const_ldet(a, b, T):\n d = np.sqrt(np.square(a) - 4.0 * np.square(b))\n # Make this split because we are raising something to a potentially large power and\n # need to be ensure that only happens for something < 1.\n return(-np.log(d) + (T+1) * (np.log(0.5) + np.log(a+d)) + np.log(1 - pow((a-d)/(a+d), T+1)))\n #return(-np.log(d) + (pow((a + d)/2,T+1) - pow((a - d) / 2, T+1)))\n # Next line is probably never needed:\n #return(-np.log(d) + (T+1) * (np.log(0.5) + np.log(a-d)) + np.log(pow((a+d) / (a-d), T+1) - 1))", "def calculate_negative_log_likelihood(self):\n data = self.played_points_hist[:self.t]\n kernel_matrix = self.kernel_fn(data, data, self.best_ard_params)\n c_matrix = kernel_matrix + (self.noise_sigma ** 2) * np.eye(data.shape[0])\n c_matrix_inv = np.linalg.inv(c_matrix)\n first_term = np.matmul(self.rews_hist[:self.t].T, np.matmul(c_matrix_inv, self.rews_hist[:self.t]))\n second_term = np.log(np.linalg.det(c_matrix))\n return first_term + second_term", "def Determinant_3x3(A, step_by_step=True ,row=True, n=1):\n \n if A.shape!=(3,3):\n raise ValueError('Dimension of matrix A should be 3x3. The input A must be a sp.Matrix of shape (3,3).')\n if n<1 or n>3 or not isinstance(n, int):\n raise ValueError('n should be an integer between 1 and 3.')\n \n # Construct string for determinant of matrix A\n detA_s = sp.latex(A).replace('[','|').replace(']','|')\n \n # To print all the steps\n if step_by_step:\n\n # If we compute the determinant with row n \n if row:\n # Matrix with row i and col j removed (red_matrix(A, i, j))\n A1 = red_matrix(A, n, 1)\n A2 = red_matrix(A, n, 2)\n A3 = red_matrix(A, n, 3)\n detA1_s = sp.latex(A1).replace('[','|').replace(']','|')\n\n detA2_s = sp.latex(A2).replace('[','|').replace(']','|')\n detA3_s = sp.latex(A3).replace('[','|').replace(']','|')\n\n line1 = \"$\" + detA_s + ' = ' + pl_mi(n,1, True) + sp.latex(A[n-1, 0]) + detA1_s + pl_mi(n,2) + \\\n sp.latex(A[n-1, 1]) + detA2_s + pl_mi(n,3) + sp.latex(A[n-1, 2]) + detA3_s + '$'\n\n line2 = '$' + detA_s + ' = ' + pl_mi(n,1, True) + sp.latex(A[n-1, 0]) + \"\\cdot (\" + sp.latex(sp.det(A1)) \\\n +\")\" + pl_mi(n,2) + sp.latex(A[n-1, 1]) + \"\\cdot (\" + sp.latex(sp.det(A2)) + \")\"+ \\\n pl_mi(n,3) + sp.latex(A[n-1, 2]) + \"\\cdot (\" + sp.latex(sp.det(A3)) + ')$'\n line3 = '$' + detA_s + ' = ' + sp.latex(sp.simplify(sp.det(A))) + '$'\n\n # If we compute the determinant with col n \n else:\n # Matrix with row i and col j removed (red_matrix(A, i, j))\n A1 = red_matrix(A, 1, n)\n A2 = red_matrix(A, 2, n)\n A3 = red_matrix(A, 3, n)\n detA1_s = sp.latex(A1).replace('[','|').replace(']','|')\n detA2_s = sp.latex(A2).replace('[','|').replace(']','|')\n detA3_s = sp.latex(A3).replace('[','|').replace(']','|')\n\n line1 = \"$\" + detA_s + ' = ' + pl_mi(n,1, True) + brackets(A[0, n-1]) + detA1_s + pl_mi(n,2) + \\\n brackets(A[1, n-1]) + detA2_s + pl_mi(n,3) + brackets(A[2, n-1]) + detA3_s + '$'\n\n line2 = '$' + detA_s + ' = ' + pl_mi(n,1, True) + brackets(A[0, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A1))\\\n +\")\" + pl_mi(n,2) + brackets(A[1, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A2)) + \")\"+ \\\n pl_mi(n,3) + brackets(A[2, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A3)) + ')$'\n\n line3 = '$' + detA_s + ' = ' + sp.latex(sp.simplify(sp.det(A))) + '$'\n\n # Display step by step computation of determinant\n display(Latex(line1))\n display(Latex(line2))\n display(Latex(line3))\n # Only print the determinant without any step\n else:\n display(Latex(\"$\" + detA_s + \"=\" + sp.latex(sp.det(A)) + \"$\"))", "def det(self):\n\t\t\n\t\trows = self._rows\n\t\tsign = +1\n\t\tsumm = 0\n\n\t\tfor perm in permutations(range(rows), rows):\n\t\t\tmul = 1\n\t\t\tsign = SquareMatrix.__parity_of_permutation(perm)\n\n\t\t\tfor i in range(rows):\n\t\t\t\tmul *= self[i][perm[i]]\n\n\t\t\tsumm += sign * mul\n\t\treturn summ", "def log_det_Jxz(self):\n return self.log_det_xz", "def log_det_Jxz(self):\n return self.log_det_xz", "def log_det_Jxz(self):\n return self.log_det_xz", "def log_det_Jxz(self):\n return self.log_det_xz", "def log_det_Jzx(self):\n return self.log_det_zx", "def log_det_Jzx(self):\n return self.log_det_zx", "def log_det_Jzx(self):\n return self.log_det_zx", "def log_det_Jzx(self):\n return self.log_det_zx", "def neg_log_L1(alpha):\n Psi_alpha = Psi(alpha)\n logdet = np.linalg.slogdet(Psi_alpha)\n if logdet[0] < 0:\n # Remove any dodgy inversions\n return np.inf\n return np.trace(np.matmul(Psi_alpha, this_mock_cov)) - logdet[1]", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def calc_det_dzh(theta):\n return 919.49 - 27.018 * theta + 0.26209 * theta ** 2 - 0.00083803 * theta ** 3", "def der_log(self, xr, xc=None, out=None):\n if xc is None:\n return self._pder_log(xr, out)\n else:\n return self._pder_log(_np.hstack((xr, xc)), out)", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def logp_nojac(self, *args, **kwargs):\n return self.logp(*args, **kwargs)", "def log_lhood(X, Z, Y, a, ep, lamb):\n \n K = Z.shape[1]\n N, T = X.shape\n \n # p(X)\n ZY = np.dot(Z,Y) \n log_pX = 0\n log_pX = log_pX + np.sum(X * np.log(1 - ((1 - lamb) ** ZY) * (1 - ep)))\n log_pX = log_pX + np.sum((1 - X) * np.log(((1 - lamb) ** ZY) * (1 - ep))) \n \n # p(Z)\n HN = 0\n for n in range(1, N+1):\n HN += 1.0/n\n m = Z.sum(axis=0)\n log_pZ = (K * np.log(a) - (a * HN)) + np.sum(gammaln(m) +\n gammaln(N - m + 1) - gammaln(N + 1)) \n\n return log_pZ + log_pX", "def logp(X, nu, V):\n\n p = V.shape[0]\n\n IVI = det(V)\n IXI = det(X)\n\n return check_parameters(\n (\n (nu - p - 1) * pt.log(IXI)\n - trace(matrix_inverse(V).dot(X))\n - nu * p * pt.log(2)\n - nu * pt.log(IVI)\n - 2 * multigammaln(nu / 2.0, p)\n )\n / 2,\n matrix_pos_def(X),\n pt.eq(X, X.T),\n nu > (p - 1),\n )", "def determinant(self):\n det = 0\n # Check if is square\n # 检验其是否是方形矩阵\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(ValueError, \"Calculating determinant not implement for matrices largerer than 2x2\")\n\n # TODO - your code here\n\n # 这里仅实现了获取1x1 2x2 矩阵的det值\n # For Matrix 1x1\n if (self.h * self.w) == 1:\n det = self.grid[0][0]\n # For Matrix 2x2\n elif self.h == 2 & self.w == 2:\n det = self.g[1][1] * self.g[0][0] - self.g[0][1] * self.g[1][0]\n # In the future could implement determinant for matrix bigger\n else:\n raise(NotImplementedError, \"Calculating determinant not implement for matrices largerer than 2x2.\")\n return det", "def log_det_S(self, Rs = None):\n if Rs is None:\n Rs = self.Rs\n return np.sum([self.n/R.shape[0]*\n np.linalg.slogdet(R.T.dot(R))[1]\n for R in Rs])", "def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]", "def log_den(self, X):\n raise NotImplementedError()", "def chol_inv(L):\r\n\r\n return lapack.dtrtri(L, lower=True)[0]", "def backward_p(self, x):\n log_det_jacob, z = x.new_zeros(x.shape[0]), x\n for i in reversed(range(len(self.t))):\n z_ = self.mask[i] * z\n s = self.s[i](z_) * (1 - self.mask[i])\n t = self.t[i](z_) * (1 - self.mask[i])\n z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_\n log_det_jacob -= s.sum(dim=1)\n return z, log_det_jacob", "def logtrace(m: np.ndarray) -> np.ndarray:\n\n \"\"\" note: performance cannot easily be improve by numba.\n `np.diagonal` not supported by numba 0.52.0\n \"\"\"\n\n return np.sum(np.log(np.diagonal(m, axis1=-2, axis2=-1)), axis=-1)", "def det(self):\n self.matrix() # forces the update of the matrix in the module's default\n # basis, to make sure that the dictionary self._matrices\n # is not empty\n return self._matrices.values()[0].det() # pick a random value in the\n # dictionary self._matrices\n # and compute the determinant", "def cayley_menger_det_no_linalg(x2, y2, z2, xb2, yb2, zb2):\n xs = x2 + xb2\n ys = y2 + yb2\n zs = z2 + zb2\n buf1 = ys + zs\n buf1 -= xs\n buf2 = x2 * xb2\n buf1 *= buf2 # buf1 has first term, halved\n np.multiply(y2, yb2, out=buf2)\n buf3 = xs + zs\n buf3 -= ys\n buf2 *= buf3 # buf2 has second term\n buf1 += buf2 # buf1 is sum of two terms, halved\n np.multiply(z2, zb2, out=buf3)\n np.add(xs, ys, out=buf2) # reuse buf2\n buf2 -= zs\n buf3 *= buf2 # buf3 has third term\n buf1 += buf3 # buf1 is sum of 3 first terms, halved\n buf1 *= 2\n np.subtract(x2, xb2, out=buf2)\n np.subtract(y2, yb2, out=buf3)\n buf2 *= buf3\n np.subtract(z2, zb2, out=buf3)\n buf2 *= buf3\n buf1 += buf2 # buf1 is sum of 4 first terms\n np.multiply(xs, ys, out=buf3)\n buf3 *= zs\n buf1 -= buf3\n return buf1", "def determinant(self):\n return np.linalg.det(self._data)", "def dlogZ(self, T, pos, psi, phi):\n msgs = belief_propagation(T, pos, psi, phi, True)\n dpsi = calculate_gradient(msgs, T, pos, psi, True, True)\n return dpsi", "def log_deriv(error):\n return logistic(error) * (1 - logistic(error))", "def _vertical_log(self, X: np.ndarray) -> (np.ndarray, np.ndarray):\n ret_p = np.zeros_like(X)\n ret_n = np.zeros_like(X)\n log_p = self.manifold.log(X[:-1], X[1:])\n log_n = self.manifold.log(X[1:], X[:-1])\n ret_p[:-1] = log_p\n ret_n[1:] = log_n\n return ret_p, ret_n", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def inverse_3by3_int64(M, return_determinant=True):\n if len(M.shape) > 1:\n M = M.flatten()\n\n determinant = np.int64(0)\n adj_M = np.zeros((9,), 'int64')\n\n # First row of adjugate matrix\n adj_M[0] = (M[4] * M[8] - M[7] * M[5]) # Det #0\n adj_M[1] = -(M[1] * M[8] - M[7] * M[2])\n adj_M[2] = (M[1] * M[5] - M[4] * M[2])\n\n # Second row of adjugate matrix\n adj_M[3] = -(M[3] * M[8] - M[6] * M[5]) # Det #1\n adj_M[4] = (M[0] * M[8] - M[6] * M[2])\n adj_M[5] = -(M[0] * M[5] - M[3] * M[2])\n\n # Third row of adjugate matrix\n adj_M[6] = (M[3] * M[7] - M[6] * M[4]) # Det #2\n adj_M[7] = -(M[0] * M[7] - M[6] * M[1])\n adj_M[8] = (M[0] * M[4] - M[3] * M[1])\n\n if return_determinant:\n if ((np.log2(np.abs(M[0])) + np.log2(np.abs(adj_M[0]))) > 63 or\n (np.log2(np.abs(M[1])) + np.log2(np.abs(adj_M[1]))) > 63 or\n (np.log2(np.abs(M[2])) + np.log2(np.abs(adj_M[6]))) > 63):\n print(\"inverse_3by3_int64: Overflow in determinant calculation!\")\n determinant += int(M[0]) * int(adj_M[0])\n determinant += int(M[1]) * int(adj_M[3]) # Using addition since minus is integrated in adjugate matrix.\n determinant += int(M[2]) * int(adj_M[6])\n else:\n determinant += np.int64(M[0]) * np.int64(adj_M[0])\n determinant += np.int64(M[1]) * np.int64(adj_M[3]) # Using addition since minus is integrated in adjugate matrix.\n determinant += np.int64(M[2]) * np.int64(adj_M[6])\n return adj_M, determinant\n else:\n return adj_M", "def log_likelihood(self):\r\n assert not self.likelihood.is_heteroscedastic\r\n A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision))\r\n B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K\r\n Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm)))\r\n C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing)\r\n C += -0.5*np.sum(self.LQL * self.B)\r\n D = -0.5*self.likelihood.precision*self.likelihood.trYYT\r\n E = np.sum(self.V*self.projected_mean)\r\n return (A+B+C+D+E)/self.data_prop", "def inverse_diff_log(y,log0):\n\n return np.exp(inverse_diff(y,log0))", "def inverse_diff_log(y,log0):\n \n return np.exp(inverse_diff(y,log0))", "def dirichlet_prob(alpha_matrix, p_matrix):\n logL = 0\n n_col = len(p_matrix[0, :])\n for i in range(n_col):\n logL += dirichlet.logpdf(p_matrix[:, i], alpha_matrix[:, i])\n\n return logL", "def log_det_Jxz(self):\n #return self.log_det_xz.output\n log_det_Jxzs = []\n for l in self.layers:\n if hasattr(l, 'log_det_Jxz'):\n log_det_Jxzs.append(l.log_det_Jxz)\n if len(log_det_Jxzs) == 0:\n return tf.ones((self.output_z.shape[0],))\n if len(log_det_Jxzs) == 1:\n return log_det_Jxzs[0]\n return tf.reduce_sum(log_det_Jxzs, axis=0, keepdims=False)", "def log_det_precisions(self):\n raise NotImplementedError", "def determinant(v,w):\n return v[0] * w[1] - v[1] * w[0]" ]
[ "0.7205463", "0.69225436", "0.6803772", "0.6577487", "0.65662503", "0.6258033", "0.6235449", "0.6192166", "0.61640286", "0.60718197", "0.602648", "0.5906651", "0.5904567", "0.58784807", "0.58522433", "0.5850299", "0.58452636", "0.5838441", "0.5796368", "0.57808894", "0.5778876", "0.57782644", "0.5773432", "0.5766508", "0.57584566", "0.5755279", "0.5697666", "0.5686144", "0.5684613", "0.56827873", "0.5681702", "0.5676155", "0.56693953", "0.5655353", "0.56515867", "0.5636375", "0.5634286", "0.5608274", "0.5581664", "0.5578775", "0.5569277", "0.5557574", "0.55358094", "0.5525894", "0.5501086", "0.55005795", "0.5470511", "0.5460774", "0.5459628", "0.54307926", "0.5423552", "0.5412509", "0.5385789", "0.537432", "0.5365599", "0.5361522", "0.5360463", "0.53602827", "0.5353577", "0.53510153", "0.5348857", "0.5340906", "0.5340906", "0.5340906", "0.5340906", "0.5337384", "0.5337384", "0.5337384", "0.5337384", "0.5328081", "0.5323513", "0.53179085", "0.5317563", "0.5308881", "0.53042513", "0.52956426", "0.52950627", "0.52794397", "0.527809", "0.5259378", "0.52393776", "0.52379465", "0.523735", "0.52351797", "0.52265364", "0.52226853", "0.5218632", "0.52171093", "0.5203746", "0.5200604", "0.5197188", "0.5196838", "0.5196499", "0.5191926", "0.5190889", "0.5189467", "0.51738554", "0.5172164", "0.51641923", "0.51619965" ]
0.6977162
1
Preprocessing needed for toeplitz_inverse_multiplication()
def toeplitz_inverse_multiplication_prep(T_column): phi=1 psi=2 assert phi != 0 assert psi != 0 assert phi != psi n = len(T_column) x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) ) y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) ) x_0 = x[0] D_phi = (phi**(1/n))**np.arange(0,n) D_psi = (psi**(1/n))**np.arange(0,n) Lambda_1 = fft(D_psi*x) Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1]))) Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1]))) Lambda_4 = fft(D_phi*x) return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop over each block\n t.append(toeplitz_inverse_multiplication_prep(c))\n return tuple(t)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def back_substitution(U, z):\n n = len(U[0])\n x = [0] * n\n for i in range(n - 1, -1, -1):\n if U[i][i] != 0:\n accum = 0\n for j in range(i, n):\n accum += U[i][j] * x[j]\n x[i] = (z[i] - accum) / U[i][i]\n return x", "def mul(Z,X,Y):", "def reconstruct(A, B, z):\n f = factorint(igcd(A, B))\n for p, e in f.items():\n if e != 1:\n raise ValueError('a and b should be square-free')\n z *= p\n return z", "def preprocessing(ct):\n return value_preprocessing(ct, False)", "def test_inverse_transform(self):", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def test__inverse_transform_continuous(self):", "def complex_inverse(c1,cr):", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def mul_inplace(a, b):", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def multInverse(a, m):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while m != 0:\n p = a // m\n z = a % m\n a = m\n m = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n if(x0):\n return(x0)\n else:\n print(\"multiplicative inverse does not exist\")\n return 0", "def inv_inplace(a):", "def de_mult(self,z):\n z = np.asanyarray(z)\n if not (np.any(z<0) or np.any(z>=9.)):\n return self.de_true_interp(z)\n result = np.zeros_like(z)\n result[z<0.] = (z[z<0.]+1.)**(3.*(1.+self.w))\n result[(z>=0.)*(z<9.)] = self.de_true_interp(z[(z>=0.)*(z<9.)])\n result[z>=9.] = np.exp(3.*(_de_exp_const_w(z[z>=9.],self.w)-_de_exp_const_w(9.,self.w)+np.log(self.de_true_interp(9.))/3.))\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return result", "def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))", "def exp2_inplace(a):", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]", "def inv(z: int) -> int:\n # Adapted from curve25519_athlon.c in djb's Curve25519.\n z2 = z * z % q # 2\n z9 = pow2(z2, 2) * z % q # 9\n z11 = z9 * z2 % q # 11\n z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0\n z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0\n z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...\n z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q\n z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q\n z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q\n z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q\n z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0\n return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2", "def multiply_by_i(z: torch.Tensor):\n return to_complex(-z.imag, z.real)", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def calculate_compressibility_factor(p_in, p_out, temp_in, temp_out):\n temp = np.transpose([200, 300, 400, 500, 600, 800, 1000, 2000])\n\n p = [1, 10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000]\n\n z = [\n [1.0007, 1.0066, 1.0134, 1.0275, 1.0422, 1.0575, 1.0734, 1.163, 1.355, 1.555, 1.753, 1.936],\n [1.0005, 1.0059, 1.0117, 1.0236, 1.0357, 1.0479, 1.0603, 1.124, 1.253, 1.383, 1.510, 1.636],\n [1.0004, 1.0048, 1.0096, 1.0192, 1.0289, 1.0386, 1.0484, 1.098, 1.196, 1.293, 1.388, 1.481],\n [1.0004, 1.0040, 1.0080, 1.0160, 1.0240, 1.0320, 1.0400, 1.080, 1.159, 1.236, 1.311, 1.385],\n [1.0003, 1.0034, 1.0068, 1.0136, 1.0204, 1.0272, 1.0340, 1.068, 1.133, 1.197, 1.259, 1.320],\n [1.0002, 1.0026, 1.0052, 1.0104, 1.0156, 1.0208, 1.0259, 1.051, 1.100, 1.147, 1.193, 1.237],\n [1.0002, 1.0021, 1.0042, 1.0084, 1.0126, 1.0168, 1.0209, 1.041, 1.080, 1.117, 1.153, 1.187],\n [1.0009, 1.0013, 1.0023, 1.0044, 1.0065, 1.0086, 1.0107, 1.021, 1.040, 1.057, 1.073, 1.088],\n ]\n\n interp_func = interpolate.interp2d(p, temp, z)\n\n z_in = interp_func(p_in, temp_in)\n z_out = interp_func(p_out, temp_out)\n\n return [z_in, z_out]", "def exp_inplace(a):", "def nontuple_preprocess_features(features):\n rowsum = np.array(features.sum(1))\n ep = 1e-10\n r_inv = np.power(rowsum + ep, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.toarray() # densify -- these are tiny and we don't care", "def preprocess_img_inv(img):\n img = img.data.numpy().copy()\n\n img[0] = img[0] * TORCH_IMG_STD[0] + TORCH_IMG_MEAN[0]\n img[1] = img[1] * TORCH_IMG_STD[1] + TORCH_IMG_MEAN[1]\n img[2] = img[2] * TORCH_IMG_STD[2] + TORCH_IMG_MEAN[2]\n img = img.transpose(1, 2, 0) * 255.0\n\n return img.round().astype('uint8')", "def preprocessing_fn(inputs):\n outputs = {}\n\n # This function is the entry point for your feature engineering with\n # TensorFlow Transform, using the TFX Transform component. In this example\n # the feature engineering is very simple, only applying z-score scaling.\n for key in Features.FEATURE_KEYS:\n outputs[transformed_name(key)] = tft.scale_to_z_score(inputs[key])\n\n # inputs[key]\n\n # tft.scale_to_z_score(inputs[key])\n\n # Do not apply label transformation as it will result in wrong evaluation.\n outputs[transformed_name(\n Features.LABEL_KEY)] = inputs[Features.LABEL_KEY]\n\n return outputs", "def imag(z):", "def _call(self, z): \n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n # Compute\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n zp = torch.log(x_pre_clipped) - torch.log(1 - x_pre_clipped)\n return zp", "def mXZ(nxz,P_dot_Dj,P_dot_ej):\n return np.divide(np.multiply(P_dot_ej, np.sum(nxz, axis=0)), P_dot_Dj)", "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def preprocess(self):", "def erfcinv(a):", "def pulp_smash():", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def fixedZMPchi2(self, pars):\n\t\tif not self.hasZMP and self.nZero > 0:\n\t\t\traise RuntimeError(\"No zero mode parameters set\")\n\t\tif pars is not None:\n\t\t\tself.setShapeParameters(pars)\n\t\ta,b,c = self.getOwnTheoryABC()\n\t\tA = np.zeros((2*self.nFunc, 2*self.nFunc))\n\t\tB = np.zeros((2*self.nFunc))\n\t\tC = c\n\t\tfor i in range(2*self.nZero):\n\t\t\tC += b[i]*self.zeroModeParameters[i]\n\t\t\tfor j in range(2*self.nZero):\n\t\t\t\tC += self.zeroModeParameters[i]*self.zeroModeParameters[j]*a[i,j]\n\t\tfor i in range(2*self.nFunc):\n\t\t\tB[i] += b[2*self.nZero+i]\n\t\t\tfor j in range(2*self.nZero):\n\t\t\t\tB[i] += (a[2*self.nZero+i,j]+a[j,2*self.nZero+i])*self.zeroModeParameters[j]\n\t\t\tfor j in range(2*self.nFunc):\n\t\t\t\tA[i,j] += a[2*self.nZero + i, 2*self.nZero+j]\n\t\tif self.ownPinv:\n\t\t\tcouplings = -np.dot(B, utils.pinv(np.transpose(A) + A, numLim = self.numLim))\n\t\telse:\n\t\t\tcouplings = -np.dot(B, la.pinv(np.transpose(A) + A))\n\t\treturn np.dot(couplings, np.dot(A,couplings)) + np.dot(B,couplings) + C", "def forward(self, z):\n return self.mul(torch.inverse(self.weights), (z - self.shift).unsqueeze(-1)).squeeze(-1)\n #return self.mul(self.weights, z.unsqueeze(-1)).squeeze(-1) + self.shift", "def apply_Lorentz_correction(qz, scale):\n for i in xrange(len(scale)):\n scale[i] = scale[i] * qz[i]", "def de_mult(self,z):\n raise NotImplementedError('de mult must be implemented in subclass')", "def decompose(self):\r\n dummy = self.ortho()\r\n dummy.setRow(3,_vec4(0.0, 0.0, 0.0, 1.0))\r\n\r\n x = dummy.getColumn(0)\r\n y = dummy.getColumn(1)\r\n z = dummy.getColumn(2)\r\n xl = x.length()\r\n yl = y.length()\r\n zl = z.length()\r\n scale = _vec3(xl,yl,zl)\r\n \r\n x/=xl\r\n y/=yl\r\n z/=zl\r\n dummy.setColumn(0,x)\r\n dummy.setColumn(1,y)\r\n dummy.setColumn(2,z)\r\n if dummy.determinant()<0.0:\r\n dummy.setColumn(0,-x)\r\n scale.x=-scale.x\r\n\r\n return (_vec3(self.mlist[3], self.mlist[7], self.mlist[11]),\r\n dummy,\r\n scale)", "def prelu(input, weight):\n return FunctionLib.apply('PRelu', input.device, [input, weight])", "def expm1_inplace(a):", "def _vzlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._zlerchphi(z_, a) for z_ in z])", "def multiple_inverse(p_final, n):\r\n return one_minus_exp(log_one_minus(p_final) / n)", "def compute_static_zp_renormalization(self):\n self.zero_point_renormalization = (\n self.sum_qpt_function('get_zpr_static'))\n self.renormalization_is_dynamical = False", "def mxz(mXZ,P_dot_Dj,P_times_Dj):\n return np.divide(np.multiply(P_times_Dj, mXZ), P_dot_Dj)", "def forward_substitution(L, b):\n n = len(L[0])\n z = [0] * n\n for i in range(0, n):\n if L[i][i] != 0:\n accum = 0\n for j in range(0, i):\n accum += L[i][j] * z[j]\n z[i] = (b[i] - accum) / L[i][i]\n return z", "def preprocess(image):\n return (image / 255) * 2 - 1", "def _vlerchphi(self, z: np.ndarray, a: int) -> np.ndarray:\n return np.array([self._lerchphi(z_, a) for z_ in z])", "def preprocess(image):\n return image - MEAN_PIXEL", "def magmablas_zsymmetrize(uplo, n, A, lda):\n\n uplo = _uplo_conversion[uplo]\n status = _libmagma.magmablas_zsymmetrize(uplo, n, int(A), lda)\n magmaCheckStatus(status)", "def factorize(x):\n pass", "def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def dct(self, z, out=None):\n z = np.asfarray(z)\n out = np.multiply(z, z, out)\n out *= -0.5\n np.exp(out, out)\n return out", "def real(z):", "def transform_to_magic_basis(U: np.ndarray, reverse: bool = False) -> np.ndarray:\n if reverse:\n return _B_nonnormalized_dagger @ U @ _B_nonnormalized\n return _B_nonnormalized @ U @ _B_nonnormalized_dagger", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def zzx_neg(f):\n return [ -coeff for coeff in f ]", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def scale_invert(self):", "def pow_inplace(a, b):", "def _u_ellipk(z):\n ellipk = _ellipk_z(np.asarray(z, dtype=complex))\n try:\n ellipk = ellipk.astype(complex)\n except AttributeError: # complex not np.ndarray\n pass\n return ellipk", "def inverse_cubic ( a , b , c , d ) :\n\n x0 , f0 = a.x , a.fx\n x1 , f1 = b.x , b.fx\n x2 , f2 = c.x , c.fx\n x3 , f3 = d.x , d.fx\n\n ## switch to inverse parabolic if some function values coincide \n if f0 == f1 or isequal ( f0 , f1 ) : return inverse_parabolic ( a , c , d ) \n if f0 == f2 or isequal ( f0 , f2 ) : return inverse_parabolic ( a , b , d )\n if f0 == f3 or isequal ( f0 , f3 ) : return inverse_parabolic ( a , b , c )\n if f1 == f2 or isequal ( f1 , f2 ) : return inverse_parabolic ( a , b , d ) \n if f1 == f3 or isequal ( f1 , f3 ) : return inverse_parabolic ( a , b , c ) \n if f2 == f3 or isequal ( f2 , f3 ) : return inverse_parabolic ( a , b , c ) \n\n f01 = 1.0 / ( f0 - f1 ) ; f10 = - f01\n f02 = 1.0 / ( f0 - f2 ) ; f20 = - f02\n f03 = 1.0 / ( f0 - f3 ) ; f30 = - f03\n f12 = 1.0 / ( f1 - f2 ) ; f21 = - f12\n f13 = 1.0 / ( f1 - f3 ) ; f31 = - f13\n f23 = 1.0 / ( f2 - f3 ) ; f32 = - f23\n\n xx = -x0 * f1 * f2 * f3 * f01 * f02 * f03 \n xx += -x1 * f0 * f2 * f3 * f10 * f12 * f13 \n xx += -x2 * f0 * f2 * f3 * f20 * f21 * f23 \n xx += -x3 * f0 * f1 * f2 * f30 * f31 * f32\n \n return xx", "def __invert(self, args):", "def machin(coefs, prec, hyperbolic=False):\n extraprec = 10\n s = MPZ_ZERO\n for a, b in coefs:\n s += MPZ(a) * acot_fixed(MPZ(b), prec+extraprec, hyperbolic)\n return (s >> extraprec)", "def tan_inplace(a):", "def preprocess_input(img):\n img /= 255.\n img -= 0.5\n img *= 2.\n return img", "def test_functional_inverse(self, dim):\n M = np.random.rand(dim, dim)\n assert np.all(M == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(M)))\n assert np.all(M == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(M)))\n\n v = np.random.rand(dim)\n assert np.all(v == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(v)))\n assert np.all(v == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(v)))", "def _build_preprocessing(self):\n\n # For now, do nothing\n pass", "def multiplier(self) -> global___Expression:", "def preprocess_3d(im_stack):\n im_stack /= 127.5\n im_stack -= 1.0\n return im_stack", "def inverse_transform(v):\n v, k = divmod(v - 1, N)\n v, j = divmod(v, N)\n v, i = divmod(v, N)\n return i, j, k", "def mag_postprocessing(variables):\n return variables", "def transforms0(self):\n if self.tokenize:\n return torchvision.transforms.Lambda(\n lambda x: x + 1 + torch.arange(3) * 256\n )\n else:\n # return torchvision.transforms.Normalize(mean=127.5, std=127.5)\n return torchvision.transforms.Lambda(lambda x: (x.float() - 127.5) / 127.5)", "def skip_mul(n):\n if n == 0:\n return 0\n else:\n return n * skip_mul(n - 2)", "def pseudoInversa(J):\n\tJinv = np.linalg.pinv(J)\n\treturn Jinv", "def compute_hessian_vector_product(self, function, arguments):", "def tanh_inplace(a):", "def __det3x3__(a):\r\n # val = +a[0,0] * ( a[1,1] * a[2,2] - a[2,1] * a[1,2] )\r\n # val += -a[0,1] * ( a[1,0] * a[2,2] - a[2,0] * a[1,2] )\r\n # val += +a[0,2] * ( a[1,0] * a[2,1] - a[2,0] * a[1,1] )\r\n val = +a[0] * (a[4] * a[8] - a[7] * a[5])\r\n val += -a[1] * (a[3] * a[8] - a[6] * a[5])\r\n val += +a[2] * (a[3] * a[7] - a[6] * a[4])\r\n return val", "def multiplicative_inverse(self, a: 'PFElement') -> 'PFElement':\n return self(self._pf_multiplicative_inverse(a.value, self.multiplicative_group))", "def inverse_transform(self, data: np.ndarray) -> np.ndarray:\n for i in range(self.n_layers):\n j = self.n_layers-1-i\n new_data = np.nan_to_num(data)\n new_data = self.pca_list[j].inverse_transform(X=new_data)\n if i != self.n_layers - 1:\n new_data = self.power_list[j-1].transform(new_data)\n data = new_data\n return data", "def zzx_primitive(f):\n cont = zzx_content(f)\n\n if cont == 1:\n return 1, f\n else:\n return cont, [ coeff // cont for coeff in f ]", "def inv_cross_ration(z1, z2, z3, a):\n return ((1-a)*z1*z3 + a*z2*z3 - z1*z2)/((a-1)*z2 - a*z1 + z3)", "def mobius(decomp): #fix 1 value\n return 0 if any([decomp[p] >= 2 for p in decomp]) else (-1) ** (breadth(decomp) % 2)", "def test_inverse_c(self):\n for q in self.all:\n self.assertTrue((q * q.inverse()).almost_equal(q.inverse()*q))", "def postprocess(self, U):\n # de-normalize so to say\n U = self.scalarU.inverse_transform(U.reshape(1, -1))\n U = U.ravel()\n return np.array(U)" ]
[ "0.65743506", "0.63173485", "0.60780877", "0.60345995", "0.5920918", "0.5710167", "0.5684219", "0.56176597", "0.56087387", "0.5590726", "0.5568226", "0.556281", "0.5558012", "0.5548983", "0.5540906", "0.5426001", "0.5426001", "0.5406237", "0.53970987", "0.5395093", "0.53894615", "0.53726643", "0.53536415", "0.5352041", "0.5330332", "0.53212094", "0.5295059", "0.52926826", "0.5283007", "0.5263422", "0.5261125", "0.5258962", "0.5256952", "0.52559406", "0.52538943", "0.5237146", "0.5233899", "0.5230244", "0.5228722", "0.5213071", "0.5212425", "0.5204553", "0.52042365", "0.52010965", "0.51986367", "0.5186596", "0.5183978", "0.5181363", "0.5179526", "0.5177803", "0.51690584", "0.5168878", "0.5166747", "0.5165713", "0.51469177", "0.5146547", "0.5135948", "0.5133284", "0.51120025", "0.5098808", "0.5083644", "0.50828636", "0.5082575", "0.5079303", "0.50749373", "0.506287", "0.505595", "0.505595", "0.505595", "0.505595", "0.505595", "0.5050844", "0.5045652", "0.50450176", "0.5028977", "0.5028479", "0.5025465", "0.50249165", "0.5021041", "0.50129855", "0.50045526", "0.5004226", "0.5001851", "0.5001187", "0.49984795", "0.49983868", "0.49926275", "0.4990848", "0.4990536", "0.498987", "0.4989519", "0.49857777", "0.49853414", "0.49755695", "0.49712405", "0.49638525", "0.49585238", "0.49575883", "0.49552712", "0.49498633" ]
0.65871215
0
Compute y = inv(T) u Where T is a symmetrix Toeplitz matrix. Requires preprocessing with toeplitz_inverse_multiplication_prep() See Gohberg, I. and V. Olshevsky (1994)
def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4): y = fft(D_phi*u) a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y)) b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y)) y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi)) return y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def invert(x):\n return linalg.inv(x)", "def T_inv(T):\n R, xyz = rigmech.T2Rxyz(T)\n R_inv = R.T\n xyz_inv = -R_inv * xyz\n T_inv = R_inv.row_join(xyz_inv).col_join(sp.Matrix([[0, 0, 0, 1]]))\n return T_inv", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def inv(self, y):\n pass", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def tinv(A,*kargs,**kwargs):\n U,s,VT = tsvd(A,*kargs,**kwargs)\n return (VT.T * s**(-1.0)) @ U.T", "def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff", "def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def inv(a):\n a, cv, isMatrix = get_computation_matrix(a)\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n if a.numRows() != a.numCols():\n raise ValueError(\"inv: input a is not a square matrix!\")\n #compute LU using getrf\n (lu, piv, _) = getrf(a, overwrite_a=1, dtype=t_dtype)\n (ainv, _) = getri(lu, piv, lwork=0, overwrite_lu=1, dtype=t_dtype)\n if cv:\n if isMatrix:\n return ainv.to_numpy_matrix()\n else:\n return ainv.to_numpy_array()\n else:\n return ainv", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def inverse_affine_transformation_matrix(A):\n n, n = A.shape\n\n # extract components R, an n-1xn-1 linear transformation matrix, and T, an nx1 translation matrix\n R = A[:n-1, :n-1]\n T = A[:n-1, n-1]\n\n # find R^-1\n R_inv = np.linalg.inv(R)\n\n # Find A^-1/A_inv\n A_inv = np.copy(A).astype(float) # copy A for base of A^-1 matrix and ensure it is of data type float\n A_inv[:n-1, :n-1] = R_inv # set top left nxn sub matrix equal to R^-1\n A_inv[:n-1, n-1] = np.negative(R_inv.dot(T)) # place -R^-1*T in top right corner\n\n return A_inv", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def L_pseudo_inverse_tf(self) -> tf.Tensor:\n return tf.py_func(np.linalg.pinv, [self.L_tf], tf.float32)", "def fit_pseudo_inverse(self, X, y):\r\n self.pseudo_inv = np.linalg.pinv(X)\r\n self.weights = np.dot(y,self.pseudo_inv)", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def inverse(self, x, y):", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def inverse_transform(self, y: Array2D) -> Array2D:", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def tensorinv(a, ind=2):\n return TensorInv(ind)(a)", "def inverse(self, z, y):\n y_summary = self.summary_net(y)\n return self.invertible_net(z, y_summary, inverse=True)", "def inverse(self, u: Tensor, covariates: Tensor) -> Tensor:\n return self.real_nvp.inverse(u, covariates)", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def _calc_T_inv(self, name, x, lambdify=True):\n\n T_inv = None\n T_inv_func = None\n filename = name + '[0,0,0]' if np.allclose(x, 0) else name\n filename += '_Tinv'\n # check to see if we have our transformation saved in file\n T_inv, T_inv_func = self._load_from_file(filename, lambdify)\n\n if T_inv is None and T_inv_func is None:\n print('Generating inverse transform function for %s' % filename)\n T = self._calc_T(name=name)\n rotation_inv = T[:3, :3].T\n translation_inv = -rotation_inv * T[:3, 3]\n T_inv = rotation_inv.row_join(translation_inv).col_join(\n sp.Matrix([[0, 0, 0, 1]]))\n T_inv = sp.Matrix(T_inv)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/%s' % (self.config_folder, filename))\n cloudpickle.dump(T_inv, open(\n '%s/%s/%s' % (self.config_folder, filename, filename), 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return T_inv\n\n if T_inv_func is None:\n T_inv_func = self._generate_and_save_function(\n filename=filename, expression=T_inv,\n parameters=self.q+self.x)\n return T_inv_func", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inv(P):\n L = cho_factor(P)\n return cho_solve(L, np.eye(P.shape[0]))", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def inv(self):\n self.inverse = not self._inverse\n return self", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def compute_inverse(in1, in2):\n aL = [in1]\n bL = [in2]\n tL = [0]\n t = 1\n sL = [1]\n s = 0\n q = math.floor((aL[0] / bL[0]))\n r = (aL[0] - (q * bL[0]))\n\n while r > 0:\n temp = (tL[0] - (q * bL[0]))\n tL[0] = t\n t = temp\n temp = (sL[0] - (q * s))\n sL[0] = s\n s = temp\n aL[0] = bL[0]\n bL[0] = r\n q = math.floor(aL[0] / bL[0])\n r = (aL[0] - (q * bL[0]))\n\n inverse = s % in2\n return inverse", "def test_functional_inverse(self, dim):\n M = np.random.rand(dim, dim)\n assert np.all(M == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(M)))\n assert np.all(M == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(M)))\n\n v = np.random.rand(dim)\n assert np.all(v == symplectic.xxpp_to_xpxp(symplectic.xpxp_to_xxpp(v)))\n assert np.all(v == symplectic.xpxp_to_xxpp(symplectic.xxpp_to_xpxp(v)))", "def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def _inv(M):\n ll, mm = M.shape\n M2 = M + 1e-10 * np.eye(ll)\n L = np.linalg.cholesky(M2)\n inv_L = np.linalg.inv(L)\n inv_M = inv_L.T @ inv_L\n return inv_M", "def inv(siso):\n num = siso.num[0][0]\n den = siso.den[0][0]\n return ctrl.tf(den,num,siso.dt)", "def inv(q):\n return q * np.array([-1,-1,-1,1]) / amplitude(q) ** 2", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def inv_signal(self, y):\n spect = self.inv_spectrogram(y)\n if self.use_complex:\n return self.stft_fn.inverse(spect, complex=self.use_complex)\n return self.stft_fn.inverse(spect, torch.rand(*spect.shape)*2*np.pi)\n # return self.stft_fn.inverse(spect, torch.zeros(*spect.shape))", "def inverse_transform(self, y_enc):\n return self.le_.inverse_transform(\n self.bsidx_lexsorted_[np.require(y_enc, dtype=int)])", "def fit_pseudo_inverse(self, X, y):\n X_inverse = np.linalg.pinv(X)\n self.weights = np.dot(y,X_inverse)", "def invert(self,el):\n return el^(self.q-2)", "def invert (y):\n\n if eq(y,pos) : return y\n elif lt(y,nil) : return neg(invert(neg(y)))\n elif eq(y,nil) : raise ZeroDivisionError()\n yl,yr = split(y)\n il = nil\n ir = None\n r = None,None\n iyr,iyl = None,None\n width = 0\n while (il or ir) and width < 3:\n width += 1\n nl = nr = None\n if il is not None:\n r = (il,r[1])\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n left = mul(mul(add(pos,sub(yr,y)),il),iyr)\n if r[0] is None or gt(left,r[0]):\n nl = left\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n right = mul(mul(add(pos,sub(yl,y)),il),iyl)\n if r[1] is None or lt(right,r[1]):\n nr = right\n if ir:\n r = (r[0],ir)\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n left = mul(mul(add(pos,sub(yl,y)),ir),iyl)\n if r[0] is None or (gt(left,r[0]) and (not nl or gt(left,nl))):\n nl = left\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n right = mul(mul(add(pos,sub(yr,y)),ir),iyr)\n if r[1] is None or (lt(right,r[1]) and (not nr or lt(right,nr))):\n nr = right\n il,ir = nl,nr\n #print(r)\n if r[0] is None: r = (0,r[1])\n if r[1] is None: r = (r[0],0)\n return join(*r)", "def _inverse_transform(self, X, y=None):\n return clone(self.transformer).fit(X=X, y=y).inverse_transform(X=X, y=y)", "def __invert__(self):\n return self.inverse()", "def inv(self,output,*args,**kwargs):\n raise ModelTypeError('Model is not invertible')", "def inv_inplace(a):", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def ginverse(p, x):\n x_ = x.data\n sinu = torch.sin(p.narrow(-1,0,1)).pow(2)\n x_.narrow(-1,1,1).mul_(sinu.pow(-1))\n return x_", "def inverse(self: T) -> T:", "def _r_inv(self):\n raise NotImplementedError", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def my_inv(A):\n cho = scipy.linalg.flapack.dpotrf(A)\n choinv = scipy.linalg.flapack.dtrtri(cho[0])\n upper = scipy.linalg.flapack.dlauum(choinv[0])[0]\n\n # upper is the upper triangular entries of A^{-1}, so need to fill in the\n # lower triangular ones; unfortunately this has nontrivial overhead\n temp = np.diag(upper)\n return upper + upper.T - np.diag(temp)", "def pseudoInversa(J):\n\tJinv = np.linalg.pinv(J)\n\treturn Jinv", "def inverse_transform(self, Xt):\n return self.transformer.inverse_transform(Xt)", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def inv(matrix):\n if sp.sparse.issparse(matrix):\n result = spla.inv(matrix)\n else:\n result = sp.linalg.inv(matrix)\n return result", "def fit_transform_inverse(self, X_t):\r\n X_t = to_2dnp_array(X_t)\r\n mini_batch_size, n_dim = X_t.shape\r\n\r\n X_before = self.ts_data[-mini_batch_size:]\r\n X_after = self._get_prev_transform()[-mini_batch_size:]\r\n X_o = X_t - X_after + X_before\r\n return X_o", "def pinv(self, x: np.ndarray):\n return self.rmatvec(x) / self.gram", "def get_inverse_2x2(u, v):\n if not is_linearly_independent_2x2(u, v):\n return\n uv = get_uv(u, v)\n iden = get_uv([1, 0],[0, 1])\n a = np.zeros((2, 4))\n for i in range(2):\n for j in range(2):\n a[i][j] = uv[i][j]\n a[i][j+2] = iden[i][j]\n\n q = a[0][1] / a[1][1]\n a[0] = a[0] - q * a[1]\n\n q = a[1][0] / a[0][0]\n a[1] = a[1] - q * a[0]\n\n a[0] /= a[0][0]\n\n a[1] /= a[1][1]\n\n for i in range(2):\n for j in range(2):\n uv[i][j] = a[i][j+2]\n return uv", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def invert_technosphere_matrix(self):\n assert hasattr(self, \"inventory\"), \"Must do lci first\"\n assert PYPARDISO, \"pardiso solver needed for efficient matrix inversion\"\n\n MESSAGE = \"\"\"Technosphere matrix inversion is often not the most efficient approach.\n See https://github.com/brightway-lca/brightway2-calc/issues/35\"\"\"\n warnings.warn(MESSAGE)\n\n self.inverted_technosphere_matrix = spsolve(\n self.technosphere_matrix, np.eye(*self.technosphere_matrix.shape)\n )\n return self.inverted_technosphere_matrix", "def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)", "def affine_transform_inverse(np_transform):\n rotation = np_transform[:3, :3]\n translation = np_transform[:3, 3]\n rotation_inv = numpy.linalg.inv(rotation)\n translation_inv = -1 * numpy.dot(rotation_inv, translation)\n result = numpy.identity(4)\n result[:3, :3] = rotation_inv\n result[:3, 3] = translation_inv.flatten()\n return result", "def erfcinv(a):", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inv_heaviside(n, axis=0, normalized=True):\n w = jnp.sqrt(jnp.arange(n, 0, -1))\n\n times_u = lambda x: jnp.diff(x, prepend=0)\n trans_u = lambda x: -jnp.diff(x, append=0)\n\n times_n = lambda x: jnp.diff(x, prepend=0) * w\n trans_n = lambda x: -jnp.diff(x * w, append=0)\n\n times, trans = (times_n, trans_n) if normalized else (times_u, trans_u) \n times, trans = apply_along_axis(times, trans, axis)\n return Operator(times=times, trans=trans, shape=(n, n))", "def inv_m(self):\n self.m = -self.m", "def invert(M):\r\n M = isMatrix(M)\r\n return M.I", "def unwhiten(self, U, A, m):\n X = np.matmul(A, U.T).T\n X += m\n\n return X", "def inverse(self):\n tfft_inv = np.zeros(\n self.tfft.shape, dtype=np.complex)\n tfft_inv[self.tfft != 0] = 1. / self.tfft[self.tfft != 0]\n efft_inv = np.zeros(\n self.efft.shape, dtype=np.complex)\n efft_inv[self.efft != 0] = 1. / self.efft[self.efft != 0]\n bfft_inv = np.zeros(\n self.bfft.shape, dtype=np.complex)\n bfft_inv[self.bfft != 0] = 1. / self.bfft[self.bfft != 0]\n\n ret = tebfft(\n self.nx,\n self.dx, [tfft_inv, efft_inv, bfft_inv],\n ny=self.ny,\n dy=self.dy)\n\n return ret", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def invert(s):\n return s.translate(INVERT_TBL)", "def _inverse_transform(self, x):\n if x.atleast_2d().shape[1] != self.w.size:\n raise ValueError(\"array to revert must have {:} \"\n \"features (columns).\".format(self.w.size))\n\n v = (x - self.b).atleast_2d()\n\n v[:, self.w != 0] /= self.w[self.w != 0] # avoids division by zero\n\n return v.ravel() if x.ndim <= 1 else v", "def inv(self):\n return self.conjugate()" ]
[ "0.7413214", "0.7380314", "0.7238792", "0.711224", "0.7110274", "0.70157236", "0.6988002", "0.69570976", "0.69044274", "0.68427217", "0.6829249", "0.68215156", "0.6778755", "0.6680136", "0.6677371", "0.66401356", "0.66035974", "0.6586728", "0.65792894", "0.6540509", "0.6510561", "0.6458543", "0.64023006", "0.6383236", "0.63668734", "0.63501394", "0.6348877", "0.63381463", "0.6337291", "0.6323735", "0.63211423", "0.6303579", "0.6302793", "0.63025147", "0.629576", "0.62885225", "0.6280153", "0.6259124", "0.6242787", "0.62317103", "0.6224502", "0.62198204", "0.621688", "0.6187253", "0.6147371", "0.6140895", "0.6124544", "0.612342", "0.611726", "0.611566", "0.6107041", "0.61049426", "0.6094766", "0.60905343", "0.6085933", "0.6053689", "0.6053515", "0.60370743", "0.60106", "0.6008633", "0.59970593", "0.59954876", "0.5994363", "0.59895164", "0.59874713", "0.59874713", "0.59874713", "0.59874713", "0.59874713", "0.59798855", "0.597849", "0.597286", "0.59563607", "0.5943037", "0.5935474", "0.59311086", "0.59225774", "0.5916523", "0.59075594", "0.59074485", "0.5903496", "0.59025884", "0.58977413", "0.58975035", "0.5895193", "0.58949536", "0.5864283", "0.58609235", "0.5860922", "0.5849665", "0.5846727", "0.58418727", "0.584029", "0.58334106", "0.5832786", "0.5831347", "0.58287823", "0.5826621", "0.58234465", "0.582153" ]
0.7070988
5
Determinant of a blockdiagonal matrix having Toeplitz blocks
def bd_toeplitz_slogdet(*arrs): sign = 1 logdet = 0 for c in arrs: # loop over each block (t_sign, t_logdet) = toeplitz_slogdet(c) sign *= t_sign logdet += t_logdet return (sign, logdet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _block_diagonal(factor_matrices):\n shapes_dict = {}\n for i, matrix_i in enumerate(factor_matrices):\n for j, matrix_j in enumerate(factor_matrices):\n shapes_dict[(i, j)] = matrix_i.shape[:-1] + matrix_j.shape[-1:]\n rows = []\n # concacatenate along axis = -2\n for i, matrix_i in enumerate(factor_matrices):\n # concatenate along axis = -1\n blocks_to_concatenate = []\n for j, _ in enumerate(factor_matrices):\n if i == j:\n blocks_to_concatenate.append(matrix_i)\n else:\n blocks_to_concatenate.append(gs.zeros(shapes_dict[(i, j)]))\n row = gs.concatenate(blocks_to_concatenate, axis=-1)\n rows.append(row)\n metric_matrix = gs.concatenate(rows, axis=-2)\n return metric_matrix", "def block_diag_full(W_):\n assert(W_.ndim == 3)\n bsize = W_.shape[0]\n full = np.concatenate([\n np.concatenate([ np.diag(W_[:,i,j]) for j in range(W_.shape[2]) ], axis=1)\n for i in range(W_.shape[1]) ], axis=0)\n return full", "def det(self):\n\t\t\n\t\trows = self._rows\n\t\tsign = +1\n\t\tsumm = 0\n\n\t\tfor perm in permutations(range(rows), rows):\n\t\t\tmul = 1\n\t\t\tsign = SquareMatrix.__parity_of_permutation(perm)\n\n\t\t\tfor i in range(rows):\n\t\t\t\tmul *= self[i][perm[i]]\n\n\t\t\tsumm += sign * mul\n\t\treturn summ", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total", "def blockDiag(matrixList):\n \n # Check if all input matrices are square matrices\n dimension = 0\n for block in matrixList:\n if block.shape[0] != block.shape[1]:\n raise Error(\"Non-square input matrix.\")\n dimension += block.shape[0]\n \n # Construct diagonal block matrix\n index = 0\n blockMatrix = np.zeros((dimension, dimension))\n for block in matrixList:\n matSize = block.shape[0]\n blockMatrix[index:index+matSize,index:index+matSize] = block\n index += matSize\n \n return blockMatrix", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def determinant_fast(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = copy_matrix(A)\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0: \n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1,n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, but one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n \n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def get_diagonal(matrix):\n\tdegree_vector = tf.reduce_sum(matrix, 1)\n\tdiagonal = tf.diag(degree_vector, name = 'diagonal')\n\treturn diagonal", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return functools.reduce(\n lambda x, y: x ^ y,\n [self[0, j] and\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)],\n )", "def diagonalsNeg (matrix, cols, rows):\r\n for diagonal in ([(j, i - cols + j + 1) for j in range(cols)] for i in range(cols + rows - 1)):\r\n yield [matrix[i][j] for i, j in diagonal if i >= 0 and j >= 0 and i < cols and j < rows]", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return sum([self[0, j] * (-1 if j % 2 else 1) *\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)])", "def total_electronic_hamiltonian(self):\n return block_diag(*[self.electronic_manifold(n) for n in range(3)])", "def block_diagonal(matrices, dtype=tf.float32):\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked", "def block_diagonal(matrices, dtype=tf.float32):\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype)\n for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked", "def is_diagonal(row, col):\n return 1 if row == col else 0", "def finite_difference(n):\n B=sparse.diags([1,-4,1],[-1,0,1],shape=(n,n))\n A=sparse.block_diag([B for i in xrange(0,n)])\n A.setdiag(1,k=-n)\n A.setdiag(1,k=n) \n b=[-100]+[0 for i in xrange(1,n-1)]+[-100]\n b=b*n\n return A,b", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def diagonal(self):\n return self.rep.diagonal()", "def det(a):\n a = copy.deepcopy(a)\n n = len(a)\n det = 1\n com_k = 1\n for k in range(n-1):\n step = 1\n\n while a[k][k] == 0:\n a[k+step], a[k] = a[k], a[k+step]\n det = -det\n step += 1\n mul = a[k][k]\n\n for i in range(k+1, n):\n for j in range(k+1, n):\n a[i][j] *= mul\n a[i][j] -= a[i][k] * a[k][j]\n a[i][j] /= com_k\n\n com_k = mul\n\n det = det * a[-1][-1]\n\n return det", "def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def get_off_diagonal(matrix):\n\toff_diag = scipy.array(matrix, dtype=matrix.dtype)\n\toff_diag[scipy.diag_indices_from(matrix)] = 0\n\treturn off_diag", "def determinant (self):\n if self.is_square:\n det = 1\n for idx, row in enumerate(echelon_form(self).rows()):\n det *= row[idx]\n return det\n else:\n raise NotImplementedError(\n \"Determinant only defined for square matrices.\")", "def flow_det(z, params):\n lparams, mparams = np.split(params, 2)\n diag = (1-mask)*lfun(mask*z,lparams)\n if len(z.shape) > 1:\n return np.sum(diag, axis=1)\n else:\n return np.sum(diag)", "def det(mtx):\n if not is_square(mtx):\n raise ValueError(\"Matrix should be square\")\n if len(mtx) == 2:\n return mtx[0][0] * mtx[1][1] - mtx[0][1] * mtx[1][0]\n\n result = 0\n sign = 1\n for inx in range(len(mtx)):\n next_mtx = get_minor_mtx(mtx, 0, inx)\n result += sign * (mtx[0][inx] * det(next_mtx))\n sign *= -1\n return result", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def is_diagonal(i, j):\n return 1 if i == j else 0", "def is_diagonal(i, j):\n return 1 if i == j else 0", "def isToeplitz(mat):\n for j in range(row):\n if not checkDiag(mat, 0, j):\n return False\n for i in range(1, col):\n if not checkDiag(mat, i, 0):\n return False\n return True", "def obtain_ones_in_the_main_diagonal(self):\r\n for row in range(self.SIZE):\r\n self.check_solvability(self.matrix[row][row], self.matrix[row][-1])\r\n self.matrix[row][-1] = self.divide(self.matrix[row][-1], self.matrix[row][row])\r\n self.matrix[row][row] = self.divide(self.matrix[row][row], self.matrix[row][row])", "def test_diagonal_gate_wrapper(self):\n shots = 100\n lsts = [\n [1, -1],\n [1, -1, -1, 1],\n [1.0, -1.0, -1.0, 1.0]]\n circuits = [ ref_diagonal_gate.diagonal_gate_circuits_deterministic_w(state)\n for state in [ np.array(lst, dtype=t) \n for t in (None, float, np.float32, complex, np.complex64)\n for lst in lsts ] ]\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))", "def offDiagPairs(self):\n return np.transpose(np.nonzero(np.triu(self.LaplacianMatrix,k=2)))", "def get_diagonal(self, parameters=None, space_group='default'):\n operators = self.get_ops(parameters, space_group)\n D = assembly.BlockedOperator(self.N * 2, self.N * 2)\n for (row, col), ops_dict in operators.items():\n if row == col:\n ops = list(ops_dict.values())\n op_sum = sum(ops[1:], ops[0])\n assign_in_place_subblock(D, op_sum, row, col)\n return D", "def get_reverse_diagonal(self):\n start = 0\n end = 9\n step = 4\n\n return self.grid[start:end:step] # array[start:end:step]", "def test_diagonal(self, nqubits):\n dim = 2**nqubits\n mat = np.diag(np.exp(1j * np.random.normal(size=dim)))\n circ = self.qsd(mat, opt_a1=True, opt_a2=False)\n ccirc = transpile(circ, basis_gates=[\"u\", \"cx\"], optimization_level=0)\n self.assertTrue(np.allclose(mat, Operator(ccirc).data))\n if nqubits > 1:\n expected_cx = self._qsd_l2_cx_count(nqubits) - self._qsd_l2_a1_mod(nqubits)\n self.assertLessEqual(ccirc.count_ops().get(\"cx\"), expected_cx)", "def det_matrix(self):\n return np.linalg.det(self.take_matrix())", "def main():\n diagonals_in_hd()", "def determinant(matrix):\n if type(matrix) is not list or len(matrix) == 0:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(matrix) == 1 and len(matrix[0]) == 0:\n return 1\n\n for i in matrix:\n if type(i) is not list:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(i) != len(matrix):\n raise ValueError(\"matrix must be a square matrix\")\n\n if len(matrix) == 1:\n return matrix[0][0]\n\n if len(matrix) == 2:\n return (matrix[0][0] * matrix[1][1]) - (matrix[0][1]\n * matrix[1][0])\n deter = 0\n\n for j, k in enumerate(matrix[0]):\n rows = [r for r in matrix[1:]]\n sub = []\n for r in rows:\n sub.append([r[a] for a in range(len(matrix)) if a != j])\n deter += k * (-1) ** j * determinant(sub)\n return deter", "def diagonalsPos (matrix, cols, rows):\r\n for diagonal in ([(j, i - j) for j in range(cols)] for i in range(cols + rows -1)):\r\n yield [matrix[i][j] for i, j in diagonal if i >= 0 and j >= 0 and i < cols and j < rows]", "def _det(mat):\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))", "def Determinant(matrix, mul):\r\n width = len(matrix)\r\n # Stop Conditions\r\n if width == 1:\r\n return mul * matrix[0][0]\r\n else:\r\n sign = -1\r\n det = 0\r\n for i in range(width):\r\n m = []\r\n for j in range(1, width):\r\n buff = []\r\n for k in range(width):\r\n if k != i:\r\n buff.append(matrix[j][k])\r\n m.append(buff)\r\n # Change the sign of the multiply number\r\n sign *= -1\r\n # Recursive call for determinant calculation\r\n det = det + mul * Determinant(m, sign * matrix[0][i])\r\n return det", "def block_diagonals(board: Board, pawn_value: int, x: int, y: int) -> None:\n\n for row_index, row in enumerate(board):\n for col_index, _ in enumerate(row):\n if abs(row_index-y) is abs(col_index-x):\n block(board, pawn_value, col_index, row_index)", "def __fcc_transition_diagonals(self) -> cq.cq.Workplane:\n corner_points = self.unit_cell_size * np.array(\n [(0, 0),\n (1, 0),\n (1, 0),\n (1, 1),\n (1, 1),\n (0, 1)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_fcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": 45},\n {\"unit_cell_size\": self.unit_cell_size * 0.5,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": - 45},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": -45,\n \"angle_y\": 0},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 45,\n \"angle_y\": 0},\n {\"unit_cell_size\": self.unit_cell_size * 0.5,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": - 45},\n {\"unit_cell_size\": self.unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": 0,\n \"angle_y\": 45}\n ],\n useLocalCoords = True\n )\n )\n return result", "def diagonal(self):\n M = self.rep\n m, n = self.shape\n return [M[i, i] for i in range(min(m, n))]", "def __diagonal(self,rows,cols):\n diag1 = [row + col for row,col in zip(rows,cols)]\n\n #reverse column elements\n diag2 = [row + col for row,col in zip(rows,cols[::-1])]\n\n return [diag1,diag2]", "def test_evaluate_block_diag_metric_tensor(self, sample_circuit, tol):\r\n dev, circuit, non_parametrized_layer, a, b, c = sample_circuit\r\n\r\n params = [-0.282203, 0.145554, 0.331624, -0.163907, 0.57662, 0.081272]\r\n x, y, z, h, g, f = params\r\n\r\n G = circuit.metric_tensor(*params)\r\n\r\n # ============================================\r\n # Test block diag metric tensor of first layer is correct.\r\n # We do this by comparing against the known analytic result.\r\n # First layer includes the non_parametrized_layer,\r\n # followed by observables corresponding to generators of:\r\n # qml.RX(x, wires=0)\r\n # qml.RY(y, wires=1)\r\n # qml.RZ(z, wires=2)\r\n\r\n G1 = np.zeros([3, 3])\r\n\r\n # diag elements\r\n G1[0, 0] = np.sin(a) ** 2 / 4\r\n G1[1, 1] = (\r\n 16 * np.cos(a) ** 2 * np.sin(b) ** 3 * np.cos(b) * np.sin(2 * c)\r\n + np.cos(2 * b) * (2 - 8 * np.cos(a) ** 2 * np.sin(b) ** 2 * np.cos(2 * c))\r\n + np.cos(2 * (a - b))\r\n + np.cos(2 * (a + b))\r\n - 2 * np.cos(2 * a)\r\n + 14\r\n ) / 64\r\n G1[2, 2] = (3 - np.cos(2 * a) - 2 * np.cos(a) ** 2 * np.cos(2 * (b + c))) / 16\r\n\r\n # off diag elements\r\n G1[0, 1] = np.sin(a) ** 2 * np.sin(b) * np.cos(b + c) / 4\r\n G1[0, 2] = np.sin(a) ** 2 * np.cos(b + c) / 4\r\n G1[1, 2] = (\r\n -np.sin(b)\r\n * (\r\n np.cos(2 * (a - b - c))\r\n + np.cos(2 * (a + b + c))\r\n + 2 * np.cos(2 * a)\r\n + 2 * np.cos(2 * (b + c))\r\n - 6\r\n )\r\n / 32\r\n )\r\n\r\n G1[1, 0] = G1[0, 1]\r\n G1[2, 0] = G1[0, 2]\r\n G1[2, 1] = G1[1, 2]\r\n\r\n assert np.allclose(G[:3, :3], G1, atol=tol, rtol=0)\r\n\r\n # =============================================\r\n # Test block diag metric tensor of second layer is correct.\r\n # We do this by computing the required expectation values\r\n # numerically using multiple circuits.\r\n # The second layer includes the non_parametrized_layer,\r\n # RX, RY, RZ gates (x, y, z params), and a 2nd non_parametrized_layer.\r\n #\r\n # Observables are the generators of:\r\n # qml.RY(f, wires=1)\r\n # qml.RZ(g, wires=2)\r\n G2 = np.zeros([2, 2])\r\n\r\n def layer2_diag(x, y, z, h, g, f):\r\n non_parametrized_layer(a, b, c)\r\n qml.RX(x, wires=0)\r\n qml.RY(y, wires=1)\r\n qml.RZ(z, wires=2)\r\n non_parametrized_layer(a, b, c)\r\n return qml.var(qml.PauliZ(2)), qml.var(qml.PauliY(1))\r\n\r\n layer2_diag = qml.QNode(layer2_diag, dev)\r\n\r\n def layer2_off_diag_first_order(x, y, z, h, g, f):\r\n non_parametrized_layer(a, b, c)\r\n qml.RX(x, wires=0)\r\n qml.RY(y, wires=1)\r\n qml.RZ(z, wires=2)\r\n non_parametrized_layer(a, b, c)\r\n return qml.expval(qml.PauliZ(2)), qml.expval(qml.PauliY(1))\r\n\r\n layer2_off_diag_first_order = qml.QNode(layer2_off_diag_first_order, dev)\r\n\r\n def layer2_off_diag_second_order(x, y, z, h, g, f):\r\n non_parametrized_layer(a, b, c)\r\n qml.RX(x, wires=0)\r\n qml.RY(y, wires=1)\r\n qml.RZ(z, wires=2)\r\n non_parametrized_layer(a, b, c)\r\n return qml.expval(qml.Hermitian(np.kron(Z, Y), wires=[2, 1]))\r\n\r\n layer2_off_diag_second_order = qml.QNode(layer2_off_diag_second_order, dev)\r\n\r\n # calculate the diagonal terms\r\n varK0, varK1 = layer2_diag(x, y, z, h, g, f)\r\n G2[0, 0] = varK0 / 4\r\n G2[1, 1] = varK1 / 4\r\n\r\n # calculate the off-diagonal terms\r\n exK0, exK1 = layer2_off_diag_first_order(x, y, z, h, g, f)\r\n exK01 = layer2_off_diag_second_order(x, y, z, h, g, f)\r\n\r\n G2[0, 1] = (exK01 - exK0 * exK1) / 4\r\n G2[1, 0] = (exK01 - exK0 * exK1) / 4\r\n\r\n assert np.allclose(G[4:6, 4:6], G2, atol=tol, rtol=0)\r\n\r\n # =============================================\r\n # Test block diag metric tensor of third layer is correct.\r\n # We do this by computing the required expectation values\r\n # numerically.\r\n # The third layer includes the non_parametrized_layer,\r\n # RX, RY, RZ gates (x, y, z params), a 2nd non_parametrized_layer,\r\n # followed by the qml.RY(f, wires=2) operation.\r\n #\r\n # Observable is simply generator of:\r\n # qml.RY(f, wires=2)\r\n #\r\n # Note: since this layer only consists of a single parameter,\r\n # only need to compute a single diagonal element.\r\n\r\n def layer3_diag(x, y, z, h, g, f):\r\n non_parametrized_layer(a, b, c)\r\n qml.RX(x, wires=0)\r\n qml.RY(y, wires=1)\r\n qml.RZ(z, wires=2)\r\n non_parametrized_layer(a, b, c)\r\n qml.RY(f, wires=2)\r\n return qml.var(qml.PauliX(1))\r\n\r\n layer3_diag = qml.QNode(layer3_diag, dev)\r\n G3 = layer3_diag(x, y, z, h, g, f) / 4\r\n assert np.allclose(G[3:4, 3:4], G3, atol=tol, rtol=0)\r\n\r\n # ============================================\r\n # Finally, double check that the entire metric\r\n # tensor is as computed.\r\n\r\n G_expected = block_diag(G1, G3, G2)\r\n assert np.allclose(G, G_expected, atol=tol, rtol=0)", "def Dmat(numpts, delta=1):\n a = 0.5 / delta * ones(numpts)\n a[0] = 0\n a[-2] = 0\n #b=-2./delta**2*ones(numpts); b[0]=0;b[-1]=0\n c = -0.5 / delta * ones(numpts)\n c[1] = 0\n c[-1] = 0\n return sparse.spdiags([a, c], [-1, 1], numpts, numpts)", "def gstate(N, periodic):\n\n # Create Hamiltonian matrix\n H = kronH(N, periodic)\n \n # Diagonalize\n print('Diagonalizing...', end=' ', flush=True)\n w, v = eigsh(H, k=1, which='SA')\n print('Done')\n\n return w[0]", "def Determinant_3x3(A, step_by_step=True ,row=True, n=1):\n \n if A.shape!=(3,3):\n raise ValueError('Dimension of matrix A should be 3x3. The input A must be a sp.Matrix of shape (3,3).')\n if n<1 or n>3 or not isinstance(n, int):\n raise ValueError('n should be an integer between 1 and 3.')\n \n # Construct string for determinant of matrix A\n detA_s = sp.latex(A).replace('[','|').replace(']','|')\n \n # To print all the steps\n if step_by_step:\n\n # If we compute the determinant with row n \n if row:\n # Matrix with row i and col j removed (red_matrix(A, i, j))\n A1 = red_matrix(A, n, 1)\n A2 = red_matrix(A, n, 2)\n A3 = red_matrix(A, n, 3)\n detA1_s = sp.latex(A1).replace('[','|').replace(']','|')\n\n detA2_s = sp.latex(A2).replace('[','|').replace(']','|')\n detA3_s = sp.latex(A3).replace('[','|').replace(']','|')\n\n line1 = \"$\" + detA_s + ' = ' + pl_mi(n,1, True) + sp.latex(A[n-1, 0]) + detA1_s + pl_mi(n,2) + \\\n sp.latex(A[n-1, 1]) + detA2_s + pl_mi(n,3) + sp.latex(A[n-1, 2]) + detA3_s + '$'\n\n line2 = '$' + detA_s + ' = ' + pl_mi(n,1, True) + sp.latex(A[n-1, 0]) + \"\\cdot (\" + sp.latex(sp.det(A1)) \\\n +\")\" + pl_mi(n,2) + sp.latex(A[n-1, 1]) + \"\\cdot (\" + sp.latex(sp.det(A2)) + \")\"+ \\\n pl_mi(n,3) + sp.latex(A[n-1, 2]) + \"\\cdot (\" + sp.latex(sp.det(A3)) + ')$'\n line3 = '$' + detA_s + ' = ' + sp.latex(sp.simplify(sp.det(A))) + '$'\n\n # If we compute the determinant with col n \n else:\n # Matrix with row i and col j removed (red_matrix(A, i, j))\n A1 = red_matrix(A, 1, n)\n A2 = red_matrix(A, 2, n)\n A3 = red_matrix(A, 3, n)\n detA1_s = sp.latex(A1).replace('[','|').replace(']','|')\n detA2_s = sp.latex(A2).replace('[','|').replace(']','|')\n detA3_s = sp.latex(A3).replace('[','|').replace(']','|')\n\n line1 = \"$\" + detA_s + ' = ' + pl_mi(n,1, True) + brackets(A[0, n-1]) + detA1_s + pl_mi(n,2) + \\\n brackets(A[1, n-1]) + detA2_s + pl_mi(n,3) + brackets(A[2, n-1]) + detA3_s + '$'\n\n line2 = '$' + detA_s + ' = ' + pl_mi(n,1, True) + brackets(A[0, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A1))\\\n +\")\" + pl_mi(n,2) + brackets(A[1, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A2)) + \")\"+ \\\n pl_mi(n,3) + brackets(A[2, n-1]) + \"\\cdot (\" + sp.latex(sp.det(A3)) + ')$'\n\n line3 = '$' + detA_s + ' = ' + sp.latex(sp.simplify(sp.det(A))) + '$'\n\n # Display step by step computation of determinant\n display(Latex(line1))\n display(Latex(line2))\n display(Latex(line3))\n # Only print the determinant without any step\n else:\n display(Latex(\"$\" + detA_s + \"=\" + sp.latex(sp.det(A)) + \"$\"))", "def test_take_fifth_diagonal_below_main(self):\n offset = -5\n matrix = [['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.', '.'],\n ['.', 'o', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.'],\n ['.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.'],\n ['.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.'],\n ['.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.'],\n ['.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.']]\n table = self._create_table(matrix)\n\n expected = ['.', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o']\n current_list = utils.get_diagonal(table, offset)\n\n self.assertListEqual(expected, current_list)", "def determinant(self):\n if not self.isSquare():\n raise ValueError(\"Determinant is not defined for non-square matrix\")\n if (self._height == 1 and self._width == 1):\n return self._value[0][0]\n returnvalue = 0\n for i in range(self._width):\n returnvalue += self._value[0][i] * self.cofactor(0, i)\n return returnvalue", "def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full", "def diago_triangle(self,inv):\n [r,c] = self.D\n assert c == 2*r, \"Le tableau doit être un rectangle L x (2L)\"\n m = r - 1\n S = self\n T = zeros(r,c)\n while m >= 0:\n pivot = S[m,m]\n assert pivot !=0, \"matrice non inversible\"\n for k in range(m-1,-1,-1):\n if S[k,m] != 0:\n S = S.comb_lignes(pivot, -S[k,m],k,m)\n T = T.remplace_ligneg(m,S.F)\n S = S.decoupe_bas()\n m -= 1\n for k in range(r):\n T = T.mult_ligne(inv(T[k,r-1]),k)\n return T", "def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist", "def diag(B,s,H,ia,ib,ic,chia,chic):\n # Get a guess for the ground state based on the old MPS\n d = B[0].shape[0]\n theta0 = np.tensordot(np.diag(s[ia]),np.tensordot(B[ib],B[ic],axes=(2,1)),axes=(1,1))\n theta0 = np.reshape(np.transpose(theta0,(1,0,2,3)),((chia*chic)*(d**2)))\n\n # Diagonalize Hamiltonian\n e0,v0 = arp.eigsh(H,k=1,which='SA',return_eigenvectors=True,v0=theta0,ncv=20)\n \n return np.reshape(v0.squeeze(),(d*chia,d*chic)),e0", "def get_A3(n):\n # Create a matrix B\n Bdiag = -60 * np.eye(n)\n Bupper1 = np.diag([16] * (n - 1), 1)\n Bupper2 = np.diag([-1] * (n - 2), 2)\n Blower1 = np.diag([16] * (n - 1), -1)\n Blower2 = np.diag([-1] * (n - 2), -2)\n B = Bdiag + Bupper1 + Blower1 + Bupper2 + Blower2\n\n # Creat a list [B,B,B,...,B] with n Bs\n blst = [B] * n\n\n # Unpack and rearrange list of Bs into diagonal of matrix A\n A = sp.linalg.block_diag(*blst)\n\n # Upper diagonal array offset by n: we've got (n-1) I blocks\n # each containing n ones\n Dupper1 = np.diag(16*np.ones(n * (n - 1)), n)\n Dupper2 = np.diag(-1*np.ones(n * (n - 2)), 2*n)\n\n # Lower diagonal array offset by -n\n Dlower1 = np.diag(16*np.ones(n * (n - 1)), -n)\n Dlower2 = np.diag(-1*np.ones(n * (n - 2)), -2*n)\n A += Dupper1 + Dlower1 + Dupper2 + Dlower2\n\n # Print the A matrix\n # print A.astype(int) \n return A", "def __bcc_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def diagonalizing_gates(self):\n raise NotImplementedError", "def diagonal(self) -> Float[Array, \" N\"]:\n return self.value * jnp.ones(self.size)", "def determinant(matrix):\n if matrix == [[]]:\n return 1\n if type(matrix) is not list or len(matrix) < 1 or\\\n not all(isinstance(x, list) for x in matrix):\n raise TypeError(\"matrix must be a list of lists\")\n if not all(len(matrix) == len(x) for x in matrix):\n raise ValueError(\"matrix must be a square matrix\")\n copy = list(map(list, matrix))\n dim = len(matrix)\n if dim == 1:\n return matrix[0][0]\n elif dim == 2:\n return matrix[0][0] * matrix[1][1] - matrix[1][0] * matrix[0][1]\n else:\n for cur in range(dim):\n for i in range(cur + 1, dim):\n if copy[cur][cur] == 0:\n copy[cur][cur] = 1.0e-10\n curScaler = copy[i][cur] / copy[cur][cur]\n for j in range(dim):\n copy[i][j] = copy[i][j] - curScaler * copy[cur][j]\n det = 1\n for i in range(dim):\n det *= copy[i][i]\n return round(det)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n \n # TODO - your code here\n if self.h == 1:\n return self.g[0][0];\n else:\n return self.g[0][0]*self.g[1][1]-self.g[0][1]*self.g[1][0];", "def make_block_diag(M, num_reps, out=None):\n if out is None:\n big_M = np.zeros((M.shape[0] * num_reps, M.shape[1] * num_reps))\n else:\n big_M = out\n for i in range(num_reps):\n big_M[i * M.shape[0]:(i + 1) * M.shape[0], i * M.shape[1]:(i + 1) * M.shape[1]] = M\n return big_M", "def build_cumulative_downhill_matrix(self):\n\n import time\n from scipy import sparse as sparse\n\n\n walltime = time.clock()\n\n downHillaccuMat = self.downhillMat.copy() \n accuM = self.downhillMat.copy() # work matrix\n\n DX = np.ones(self.tri.npoints) # measure when all the info has been propagated out.\n previous_nonzero = 0\n it = 0\n\n while np.count_nonzero(DX) != previous_nonzero:\n accuM = accuM.dot(self.downhillMat)\n downHillaccuMat = downHillaccuMat + accuM \n previous_nonzero = np.count_nonzero(DX)\n\n DX = self.downhillMat.dot(DX) \n\n it += 1\n \n\n print \" - Dense downhill matrix storage time \", time.clock() - walltime\n print \" - Maximum path length \",it\n\n walltime = time.clock()\n\n\n # Turn this into a loop !\n\n A1 = self.downhillMat.tocsr()\n A2 = A1.dot(A1)\n A2a = A1 + A2\n A4 = A2.dot(A2)\n A4a = A2a + A2.dot(A2a)\n A8 = A4.dot(A4)\n A8a = A4a + A4.dot(A4a)\n A16 = A8.dot(A8)\n A16a = A8a + A8.dot(A8a)\n A32 = A16.dot(A16)\n A32a = A16a + A16.dot(A16a)\n A64 = A32.dot(A32)\n A64a = A32a + A32.dot(A32a)\n A128 = A64.dot(A64)\n A128a = A64a + A64.dot(A64a)\n\n print \"A32.nnz = \", A32.nnz\n print \"A64.nnz = \", A64.nnz\n print \"A128.nnz = \", A128.nnz\n\n\n print \" - Dense downhill matrix storage time v2\", time.clock() - walltime\n print \" - Maximum path length \", 128\n\n\n downHillaccuMat = downHillaccuMat + sparse.identity(self.tri.npoints, format='csr')\n\n downHillaccuMat2 = A128a + sparse.identity(self.tri.npoints, format='csr')\n\n\n return downHillaccuMat, downHillaccuMat2", "def hermitian(matrix):\n return sp.allclose(matrix, sp.conj(matrix.T))", "def matrix_neumann2D(Omega,Nx,Ny):\r\n \r\n hx = (Omega[1]-Omega[0])/Nx\r\n hy = (Omega[3]-Omega[2])/Ny\r\n hx2 = hx*hx\r\n hy2 = hy*hy\r\n\r\n # Les inconnues sont numérotés de 0 à Nx suivant x et 0 à Ny\r\n # suivant y. La taille du problème est donc (Nx+1)*(Ny+1).\r\n\r\n # Pour -Laplacien(u), la matrice est constituée de (Ny+1)x(Ny+1)\r\n # blocs de taille (Nx+1)x(Nx+1), de la forme\r\n #\r\n # A = [ A0 B ]\r\n # [ B A1 B ]\r\n # [ B A1 B ]\r\n # [ . . . ]\r\n # [ B A1 B ]\r\n # [ B A0 ]\r\n #\r\n # Au final, on peut commencer à remplir avec des diagonales\r\n N = (1+Nx)*(1+Ny)\r\n diags = np.zeros((5,N))\r\n # La diagonale est constante\r\n diags[2,:] = 2./hx2+2./hy2\r\n # Diagonale -1\r\n diags[1,:] = -1./hx2 # en général\r\n diags[1,np.arange(Nx,N,Nx+1)] = 0. # bord gauche\r\n diags[1,np.arange(Nx-1,N,Nx+1)] = -2./hx2 # bord droit\r\n # Diagonale +1\r\n diags[3,:] = -1./hx2 # en général\r\n diags[3,np.arange(0,N,Nx+1)] = 0. # bord droit\r\n diags[3,np.arange(1,N,Nx+1)] = -2./hx2 # bord gauche\r\n # Diagonale -(Nx+1)\r\n diags[0,:] = -1./hy2 # en général\r\n diags[0,(Nx+1)*(Ny-1):(Nx+1)*Ny] = -2./hy2 # bord bas\r\n # Diagonale +(Nx+1)\r\n diags[4,:] = -1./hy2 # en général\r\n diags[4,Nx+1:2*(Nx+1)] = -2./hy2 # bord haut\r\n\r\n # Construction de la matrice creuse de u --> -Laplacien(u)\r\n A = sp.spdiags(diags,[-(Nx+1),-1,0,1,(Nx+1)], (Nx+1)*(Ny+1),\r\n (Nx+1)*(Ny+1), format=\"csc\")\r\n\r\n return A", "def diagonal(matrix):\n if sp.sparse.issparse(matrix):\n diag = np.array(matrix.diagonal())\n else:\n diag = np.diagonal(matrix).copy()\n return diag", "def diagonal_size(self):\n b = self.GetBounds()\n return np.sqrt((b[1] - b[0]) ** 2 + (b[3] - b[2]) ** 2)", "def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod", "def is_diagonal(self):\n return self.rep.is_diagonal()", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def tridiag_det(A):\n if (type(A) is not np.ndarray):\n raise ValueError(\"A must be a np.ndarray\")\n if (A.shape[0] is not A.shape[1]):\n raise ValueError(\"A must be square.\")\n N = A.shape[0]\n f = np.empty(N+2)\n f[0] = 0\n f[1] = 1\n f[2] = A[1,1]\n for n in range(3,N+2):\n f[n] = A[n-2,n-2] * f[n-1] - A[n-2,n-3] * A[n-3,n-2] * f[n-2]\n\n return(f[-1])", "def matrix_det(A):\n\tx = A[0,0]*A[1,1]*A[2,2] + A[0,1]*A[1,2]*A[2,0] + A[0,2]*A[1,0]*A[2,1]\n\ty = A[0,0]*A[1,2]*A[2,1] + A[0,1]*A[1,0]*A[2,2] + A[0,2]*A[1,1]*A[2,0]\n\treturn x - y", "def diag_inv(A):\n return diag(1. / diag(A))", "def test_take_eighth_opposite_diagonal_below_main(self):\n offset = -8\n matrix = [['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', 'x', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', 'o', '.', '.', 'x', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'o', 'o', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'x', 'x', '.', '.', '.', '.', '.', '.'],\n ['.', 'o', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.'],\n ['.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.'],\n ['.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', 'o'],\n ['.', '.', '.', 'x', 'o', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.'],\n ['.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', 'x', '.', '.'],\n ['.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', 'x', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', 'x', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', 'o', 'x', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', 'x', 'o', '.', '.', '.', '.', '.']]\n table = self._create_table(matrix)\n\n expected = ['x', 'x', 'x', 'x', 'x', '.', 'o']\n current_list = utils.get_opposite_diagonal(table, offset)\n\n self.assertListEqual(expected, current_list)", "def determinant(self):\n if self.L is None or self.U is None:\n self.decomposeLU()\n\n retval = 1.0\n for i in range(self.rows):\n retval *= self.L[i, i] * self.U[i, i]\n return retval", "def diagonalize(operator):\n eig_values, eig_vecs = la.eigh(operator)\n # eig_values -= np.amin(eig_values)\n return eig_values, eig_vecs", "def diagonals(self):\n left_top_shifts = map(lambda i: (-(i + 1), -(i + 1)), range(min(\n self.left_distance, self.top_distance)))\n left_bottom_shifts = map(lambda i: (-(i + 1), +(i + 1)), range(min(\n self.left_distance, self.bottom_distance)))\n right_top_shifts = map(lambda i: (+(i + 1), -(i + 1)), range(min(\n self.right_distance, self.top_distance)))\n right_bottom_shifts = map(lambda i: (+(i + 1), +(i + 1)), range(min(\n self.right_distance, self.bottom_distance)))\n return set(chain(\n left_top_shifts, left_bottom_shifts,\n right_top_shifts, right_bottom_shifts))", "def getdiag(self):\n out = []\n for x in xrange(0, self.lendiag()):\n out.append(self.retrieve(x))\n return out", "def diagonal(t, x, y):\n from math import atan2, sqrt, pi\n angle = atan2(y, x) * 180 / pi\n dist = sqrt(x**2 + y**2)\n lt(t, angle)\n fdbk(t, dist)\n rt(t, angle)", "def cayley_menger_det_no_linalg(x2, y2, z2, xb2, yb2, zb2):\n xs = x2 + xb2\n ys = y2 + yb2\n zs = z2 + zb2\n buf1 = ys + zs\n buf1 -= xs\n buf2 = x2 * xb2\n buf1 *= buf2 # buf1 has first term, halved\n np.multiply(y2, yb2, out=buf2)\n buf3 = xs + zs\n buf3 -= ys\n buf2 *= buf3 # buf2 has second term\n buf1 += buf2 # buf1 is sum of two terms, halved\n np.multiply(z2, zb2, out=buf3)\n np.add(xs, ys, out=buf2) # reuse buf2\n buf2 -= zs\n buf3 *= buf2 # buf3 has third term\n buf1 += buf3 # buf1 is sum of 3 first terms, halved\n buf1 *= 2\n np.subtract(x2, xb2, out=buf2)\n np.subtract(y2, yb2, out=buf3)\n buf2 *= buf3\n np.subtract(z2, zb2, out=buf3)\n buf2 *= buf3\n buf1 += buf2 # buf1 is sum of 4 first terms\n np.multiply(xs, ys, out=buf3)\n buf3 *= zs\n buf1 -= buf3\n return buf1", "def generate_diagonal(n, l):\n res = []\n arr = [1] * l\n l = l+1\n for diag in range(n):\n res = []\n for index in range(1, l):\n summed = sum(arr[:index]) # sum is really slow for large numbers\n res.append(summed)\n arr = res\n return (arr)", "def det(self,mat):\n if(len(mat[0])==len(mat)):\n result = np.linalg.det(mat)\n self.determinant = result\n return self.determinant\n else:\n print(\"Not a square Matrix\")", "def modalDiffMatrix(n):\n k = np.arange(n)\n a = (-1)**k\n A = sp.triu(1-np.outer(a,a))\n D = np.dot(A,np.diag(k))\n D[0,:] = D[0,:]/2\n return D", "def get_diagonal(self, parameters, space_group='preconditioner'):\n return super(RWGDominantSystem, self).get_diagonal(parameters, space_group).weak_form()", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def test_take_third_diagonal_above_main(self):\n offset = 3\n matrix = [['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'x', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'o', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.'],\n ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.']]\n table = self._create_table(matrix)\n\n expected = ['.', '.', 'x', 'o', 'o', 'x', 'x', 'o', 'x', 'o', '.', '.']\n current_list = utils.get_diagonal(table, offset)\n\n self.assertListEqual(expected, current_list)", "def check_diagonal_dominant(self):\r\n\r\n for i in range(self.SIZE):\r\n row_sum = 0\r\n for j in range(self.SIZE):\r\n if i != j:\r\n row_sum += abs(self.matrix[i][j])\r\n if abs(self.matrix[i][i]) < row_sum:\r\n if not self.has_error:\r\n self.error += NOT_DIAGONALLY_DOMINANT + ', '\r\n return", "def get_diagonals(board, row, column):\n fdiagonal, bdiagonal = get_diagonal_points(board, row, column)\n diag1 = [board[record[0]][record[1]] for record in fdiagonal]\n diag2 = [board[record[0]][record[1]] for record in bdiagonal]\n return diag1, diag2", "def laplacian(A):\n #calculate D by creating a diagonal matrix with the column sum of A\n D = np.diag(A.sum(axis=0))\n return D - A", "def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def det(self, colBy = 0):\n try:\n if not 0 <= colBy < self.getColCount(): raise self.matrixBadDimension('Podano niewłaściwy numer kolumny macierzy.\\nPodano: %s' % (colBy,))\n if self.getColCount() != self.getRowCount() or not self.matrix: return None\n if self.getColCount() == 1: return self[0,0]\n except self.matrixException as e:\n print \"Wyjątek w A.det(colBy = %d)!\\nA = \\n%s\\n\" % (colBy, indent(self))\n return None\n else:\n return reduce(lambda x,y: x+y, [(-1)**(i+colBy) * self[i,colBy] * self.minor(i,colBy).det() for i in range(self.getColCount())])", "def laplacian(A):\n D = np.diag(np.sum(A, axis=1))\n return D - A", "def generate_diagonal_factors(n):\n\tfactors = np.array([1, -1]) # Initialize the diag terms with the diagonal of the Z Pauli matrix\n\tfor _ in range(n - 1): # Iterate n - 1 times\n\t\tfactors = np.hstack([factors, factors * -1]) # Append the same array multiplied by -1\n\treturn factors", "def tridiag_matrix(bc_surface_type, upsilon, space_divisions, dx, k, T, h, hc, emissivity, sigma):\n # create tri-diagonal matrix\n A = np.diagflat([-upsilon for i in range(space_divisions - 1)], -1) +\\\n np.diagflat([1 + 2 * upsilon for i in range(space_divisions)]) +\\\n np.diagflat([-upsilon for i in range(space_divisions - 1)], 1)\n\n # adjust matrix depending on the boundary condition at the exposed surface\n if bc_surface_type == \"linear\":\n A[0,0] = 1 + 2*upsilon + 2*upsilon*dx*h/k\n A[0,1] = -2*upsilon\n \n elif bc_surface_type == \"non-linear\":\n A[0,0] = 1 + 2*upsilon + 2*dx*hc*upsilon/k+ 8*emissivity*sigma*dx*upsilon*T[0]**3/k\n A[0,1] = -2*upsilon\n \n # adjust matrix for the back boundary conditions\n A[-1, -2] = - 2 * upsilon\n A[-1, -1] = 1 + 2 * upsilon\n\n return A", "def Dinvmatrix(N):\r\n import numpy as np\r\n D = np.zeros((N,N,2))\r\n D[:,:,0] = np.diag((np.append(np.ones((1,int(N/2))),np.zeros((1,int(N/2))))))\r\n D[:,:,1] = np.diag((np.append(np.zeros((1,int(N/2))),np.ones((1,int(N/2))))))\r\n return D" ]
[ "0.6306205", "0.62420654", "0.6225076", "0.62029904", "0.61590856", "0.6147025", "0.61394644", "0.60699403", "0.6061244", "0.60523754", "0.6031275", "0.6016392", "0.6009104", "0.60022414", "0.59936255", "0.59645134", "0.5953049", "0.59515077", "0.5947138", "0.59100145", "0.5891339", "0.5849779", "0.58210385", "0.57936865", "0.5786145", "0.57740945", "0.5772036", "0.57656825", "0.5763999", "0.57528675", "0.5745271", "0.5745271", "0.5696057", "0.569127", "0.5676369", "0.5673311", "0.5669219", "0.56605184", "0.5648961", "0.5642543", "0.56242204", "0.56123865", "0.5599719", "0.55577964", "0.5551745", "0.5551161", "0.55287266", "0.55269414", "0.552329", "0.55170655", "0.55119336", "0.55043274", "0.5495334", "0.54948884", "0.54942584", "0.5480105", "0.54766387", "0.54761344", "0.5475872", "0.5462892", "0.545733", "0.5456869", "0.5454809", "0.5453696", "0.5446089", "0.54451275", "0.54418826", "0.5440337", "0.54377544", "0.5436878", "0.54098475", "0.54018307", "0.53980434", "0.5380846", "0.53669924", "0.53617704", "0.5348683", "0.53479594", "0.5346391", "0.53323597", "0.532229", "0.5314632", "0.5311922", "0.5309539", "0.5301656", "0.5293438", "0.5290621", "0.52875924", "0.5283585", "0.5277649", "0.5274783", "0.5262039", "0.5259575", "0.5259302", "0.52544224", "0.52486444", "0.5244765", "0.52397376", "0.5239018", "0.523808", "0.5235997" ]
0.0
-1
Preprocessing for block diagonal matrices analogous to toeplitz_inverse_multiplication_prep()
def bd_toeplitz_inverse_multiplication_prep(*arrs): t = [] for c in arrs: # loop over each block t.append(toeplitz_inverse_multiplication_prep(c)) return tuple(t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _block_diagonal(factor_matrices):\n shapes_dict = {}\n for i, matrix_i in enumerate(factor_matrices):\n for j, matrix_j in enumerate(factor_matrices):\n shapes_dict[(i, j)] = matrix_i.shape[:-1] + matrix_j.shape[-1:]\n rows = []\n # concacatenate along axis = -2\n for i, matrix_i in enumerate(factor_matrices):\n # concatenate along axis = -1\n blocks_to_concatenate = []\n for j, _ in enumerate(factor_matrices):\n if i == j:\n blocks_to_concatenate.append(matrix_i)\n else:\n blocks_to_concatenate.append(gs.zeros(shapes_dict[(i, j)]))\n row = gs.concatenate(blocks_to_concatenate, axis=-1)\n rows.append(row)\n metric_matrix = gs.concatenate(rows, axis=-2)\n return metric_matrix", "def block_diag_full(W_):\n assert(W_.ndim == 3)\n bsize = W_.shape[0]\n full = np.concatenate([\n np.concatenate([ np.diag(W_[:,i,j]) for j in range(W_.shape[2]) ], axis=1)\n for i in range(W_.shape[1]) ], axis=0)\n return full", "def make_block_diag(M, num_reps, out=None):\n if out is None:\n big_M = np.zeros((M.shape[0] * num_reps, M.shape[1] * num_reps))\n else:\n big_M = out\n for i in range(num_reps):\n big_M[i * M.shape[0]:(i + 1) * M.shape[0], i * M.shape[1]:(i + 1) * M.shape[1]] = M\n return big_M", "def Controlled(U):\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1])", "def mounting_matrix(self):\n # fmt: off\n count = 0\n for x in range(self.ntheta):\n self.M[count][count] = 1\n self.f[count][0] = self.p_in\n count = count + self.nz - 1\n self.M[count][count] = 1\n self.f[count][0] = self.p_out\n count = count + 1\n count = 0\n for x in range(self.nz - 2):\n self.M[self.ntotal - self.nz + 1 + count][1 + count] = 1\n self.M[self.ntotal - self.nz + 1 + count][self.ntotal - self.nz + 1 + count] = -1\n count = count + 1\n count = 1\n j = 0\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i][self.ntheta - 1])\n self.M[count][self.ntotal - 2 * self.nz + count] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1, j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][self.ntheta - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = self.nz + 1\n for j in range(1, self.ntheta - 1):\n for i in range(1, self.nz - 1):\n a = (1 / self.dtheta ** 2) * (self.c1[i, j - 1])\n self.M[count][count - self.nz] = a\n b = (1 / self.dz ** 2) * (self.c2[i - 1][j])\n self.M[count][count - 1] = b\n c = -((1 / self.dtheta ** 2) * ((self.c1[i][j]) + self.c1[i][j - 1])\n + (1 / self.dz ** 2) * (self.c2[i][j] + self.c2[i - 1][j]))\n self.M[count, count] = c\n d = (1 / self.dz ** 2) * (self.c2[i][j])\n self.M[count][count + 1] = d\n e = (1 / self.dtheta ** 2) * (self.c1[i][j])\n self.M[count][count + self.nz] = e\n count = count + 1\n count = count + 2\n count = 1\n for j in range(self.ntheta - 1):\n for i in range(1, self.nz - 1):\n if j == 0:\n self.f[count][0] = (self.c0w[i][j] - self.c0w[i][self.ntheta - 1]) / self.dtheta\n else:\n self.f[count][0] = (self.c0w[i, j] - self.c0w[i, j - 1]) / self.dtheta\n count = count + 1\n count = count + 2\n # fmt: on", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def _block_lower_triangular_dense(expected_shape, blocks):\n rows = []\n num_cols = 0\n for row_blocks in blocks:\n\n # Get the batch shape for the block.\n batch_row_shape = array_ops.shape(row_blocks[0])[:-1]\n\n num_cols += array_ops.shape(row_blocks[-1])[-1]\n zeros_to_pad_after_shape = array_ops.concat(\n [batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)\n zeros_to_pad_after = array_ops.zeros(\n zeros_to_pad_after_shape, dtype=row_blocks[-1].dtype)\n\n row_blocks.append(zeros_to_pad_after)\n rows.append(array_ops.concat(row_blocks, axis=-1))\n\n return array_ops.concat(rows, axis=-2)", "def block_diagonal(matrices, dtype=tf.float32):\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype)\n for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked", "def block_diagonal(matrices, dtype=tf.float32):\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked", "def BlockToMatrix(self):\n for h in range(height):\n for w in range(width):\n if self.matrix[h][w] == 2:\n self.matrix[h][w] = 0\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 2", "def num_47():\n\n def block_reshape(a, rows, cols, nodata=-1, as_masked=True):\n \"\"\" \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape\n pad = ((0, ypad), (0, xpad))\n p_with =((nodata, nodata), (nodata, nodata))\n b = np.pad(a, pad_width=pad, mode='constant', constant_values=p_with)\n w_y, w_x = w # Blocksize\n y, x = b.shape # padded array\n c = b.reshape((y//w_y, w_y, x//w_x, w_x))\n c = c.swapaxes(1, 2).reshape(-1, w_y, w_x)\n if as_masked:\n mask_val = nodata\n c = np.ma.masked_equal(c, mask_val)\n c.set_fill_value(mask_val)\n return b, c\n y, x = 5, 6\n rows, cols = [3, 4]\n nodata = -1\n a = np.arange(x*y).reshape(y,x)\n b, c = block_reshape(a, rows, cols, nodata)\n print(\"\\n{}\".format(num_47.__doc__))\n print(\"a\\n{}\\nb\\n{}\\nc\\n{}\".format(a, b, c))\n return a, b, c", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def invert_L1_svd():", "def _unroll_block_matrix(mat1: tf.Tensor) -> tf.Tensor:\n n_dim, m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [n_dim, m1, 1, n1])\n mat2 = tf.eye(n_dim, dtype=tf.float64)\n mat2_rsh = tf.reshape(mat2, [n_dim, 1, n_dim, 1])\n return tf.reshape(mat1_rsh * mat2_rsh, [n_dim * m1, n_dim * n1])", "def mult_diag(d, mtx, left=True):\n if left:\n return (d*mtx.T).T\n else:\n return d*mtx", "def mult_diag(d, mtx, left=True):\n if left:\n return (d*mtx.T).T\n else:\n return d*mtx", "def cal_B(self):\n self.B = np.zeros((self.point_matrix.shape[0],\n self.attach_points.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B[i, :, :] = block_diag(* self.L[i, :, :])\n self.L_tether = self.L[:, self.attach_points[:, 3] == 0, :]\n self.L_tube = self.L[:, self.attach_points[:, 3] == 1, :]\n\n self.B_tether = np.zeros((self.point_matrix.shape[0],\n self.attach_points_tether.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tether.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tether[i, :, :] = block_diag(* self.L_tether[i, :, :])\n\n self.B_tube = np.zeros((self.point_matrix.shape[0],\n self.attach_points_tube.shape[0],\n self.point_matrix.shape[1]\n * self.attach_points_tube.shape[0]))\n for i in range(0, self.point_matrix.shape[0]):\n self.B_tube[i, :, :] = block_diag(* self.L_tube[i, :, :])", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def form_square_block_matrix(mat1,mat2):\n if mat1.cols==1:\n mat3 = mp.matrix(mat1.rows+mat2.rows,1)\n mat3[:mat1.rows] = mat1[:]\n mat3[mat1.rows:mat3.rows] = mat2[:]\n else:\n mat3 = mp.matrix(mat1.rows+mat2.rows, mat1.rows+mat2.rows)\n mat3[:mat1.rows,:mat1.rows] = mat1[:,:]\n mat3[mat1.rows:mat3.rows,mat1.rows:mat3.rows] = mat2[:,:]\n return mat3", "def block_diag_matmul(X, W_, bsize):\n assert(W_.ndim == 3)\n I, M, N = X.shape[0], X.shape[1], W_.shape[2]*bsize\n assert(W_.shape[0] == bsize)\n assert(W_.shape[1]*bsize == M)\n\n X_ = X.reshape(I, M/bsize, bsize)\n X_ = np.swapaxes(np.swapaxes(X_, 1,2), 0,1)\n Y_ = np.matmul(X_, W_)\n Y_ = np.swapaxes(np.swapaxes(Y_, 0,1), 1,2)\n Y = Y_.reshape(I, N)\n return Y", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv", "def diagonalizing_gates(self):\n raise NotImplementedError", "def _preprocess(self):\n # Size of each micro tree: B = 1/4 logn.\n self._block_size = int(1/4 * math.log2(self._size))\n\n # Build a list of ladders and a sparse table for the jump nodes.\n super()._preprocess()\n\n # Decompose the tree into macro tree and micro trees.\n self._micro_macro_decomposition()\n\n # Build simple tables for the micro trees.\n self._build_micro_tree_tables()", "def blockDiag(matrixList):\n \n # Check if all input matrices are square matrices\n dimension = 0\n for block in matrixList:\n if block.shape[0] != block.shape[1]:\n raise Error(\"Non-square input matrix.\")\n dimension += block.shape[0]\n \n # Construct diagonal block matrix\n index = 0\n blockMatrix = np.zeros((dimension, dimension))\n for block in matrixList:\n matSize = block.shape[0]\n blockMatrix[index:index+matSize,index:index+matSize] = block\n index += matSize\n \n return blockMatrix", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def soti_block_slab(size, p , q, nu, zu, t = -1, M = 2.3, D1 = 0.8, D2 = 0.5):\n # put unit_blocks into diag\n \n # make blocks array with dims (size,4q,4q)\n blocks = np.zeros((size,4*q,4*q),dtype=complex) \n \n # fill up\n #xs = linspace(0,size,num=size) # for completeness\n for i in range(size):\n #x = xs[i] # doesn't actually do anything\n blocks[i,:,:] = unit_block_slab(p=p,q=q,nu=nu,zu=zu,t=t,M=M,D1=D1,D2=D2)\n \n # put in diagonal\n M_diags = ss.block_diag(blocks)\n \n # off diagonals x -> x+1 & h.c.\n hop_x = 1/2 * (t * pms.s0_tz() + 1j * D1 * pms.sx_tx() + D2 * pms.s0_ty())\n hop_x_dag = hop_x.conj().T\n \n # fill up to identity\n hop_x_mat = np.kron(np.eye(N=size), hop_x)\n hop_x_mat_dag = np.kron(np.eye(N=size), hop_x_dag)\n \n # put these \"identity\" matrices on the off-diagonals\n ### double check the math for this section please\n M_top_diag = np.kron(np.diag(np.ones(size-1), k=1), hop_x_mat)\n M_bot_diag = np.kron(np.diag(np.ones(size-1), k=-1), hop_x_mat_dag)\n \n M_off_diags = M_top_diag + M_bot_diag\n \n MAT = M_diags + M_off_diags\n \n return MAT", "def to_compound_symmetric(z: torch.Tensor) -> torch.Tensor:\n a, b = z.real, z.imag\n return block_matrix([[a, b], [b, -a]])", "def fold_diag(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n for r in range(len(pixels)):\n for c in range(r):\n copy[r][c] = [255, 255, 255]\n return copy", "def build_Qdiagnol_block(self, Q, P):\n \n N = self.N # number of MPC steps\n num_input = self.num_input\n \n row_list = [] # reocrd the every row in B_hat\n zero = Variable(torch.zeros(num_input, num_input*(N-1)))\n zero = self.vari_gpu(zero)\n row_long = torch.cat([zero, Q, zero],1) # [0 0 ... Q 0 0 ...]\n for i in range(N, 1, -1):\n row_list.append(row_long[:, (i-1)*num_input : (i+N-1)*num_input])\n \n row = torch.cat([zero, P],1) # last line by [0 P]\n row_list.append(row)\n \n return torch.cat(row_list,0)", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def block_diag_zip(W, bsize):\n assert(W.ndim == 2)\n assert(W.shape[0] % bsize == 0)\n assert(W.shape[1] % bsize == 0)\n zipped = np.array([\n [ np.diag(W[i:i+bsize,j:j+bsize]) for j in range(0,W.shape[1],bsize) ]\n for i in range(0,W.shape[0],bsize) ])\n zipped = np.swapaxes(np.swapaxes(zipped, 1,2), 0,1)\n return zipped", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)", "def blocks_to_matrix(blocks, frame, orbs):\n\n io_base, _ = orbs_base(orbs)\n norbs = 0\n for el in list(frame.symbols):\n norbs+= len(orbs[el])\n nat = len(list(frame.symbols))\n unfock = np.zeros((norbs, norbs))\n\n bidx = {}\n for k in blocks.keys():\n bidx[k] = {}\n for bk in blocks[k].keys():\n bidx[k][bk] = 0\n cur_a = ()\n ki = 0\n nat = len(frame.numbers)\n for i in range(nat):\n el_a = frame.symbols[i]\n cur_a = ()\n for ia, oa in enumerate(orbs[el_a]):\n na, la, ma = oa\n na += io_base[el_a]\n # we read the Hamiltonian in blocks\n if (cur_a == (na,la)): continue\n cur_a = (na,la)\n kj = 0\n for j in range(nat):\n el_b = frame.symbols[j]\n cur_b = ()\n for ib, ob in enumerate(orbs[el_b]):\n nb, lb, mb = ob\n nb += io_base[el_b] # adds element offset\n if (cur_b == (nb,lb)): continue # only read at the beginning of each m block\n cur_b = (nb,lb)\n if (nb<na or (nb==na and lb<la)): continue\n orb = (na,la,nb,lb)\n if (i==j):\n blockij = blocks['diag'][orb][bidx['diag'][orb]]\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[ki+ib:ki+ib+2*lb+1, kj+ia:kj+ia+2*la+1] = blockij.T\n bidx['diag'][orb] += 1\n elif (el_a == el_b and i<j):\n blockij = ( ( blocks['offd_p'][orb][bidx['offd_p'][orb]] if orb in blocks['offd_p'] else 0)\n + ( blocks['offd_m'][orb][bidx['offd_m'][orb]] if orb in blocks['offd_m'] else 0)\n )/np.sqrt(2)\n blockji = ( ( blocks['offd_p'][orb][bidx['offd_p'][orb]] if orb in blocks['offd_p'] else 0)\n - ( blocks['offd_m'][orb][bidx['offd_m'][orb]] if orb in blocks['offd_m'] else 0)\n )/np.sqrt(2)\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[kj+ib:kj+ib+2*lb+1, ki+ia:ki+ia+2*la+1] = blockij.T\n unfock[kj+ia:kj+ia+2*la+1, ki+ib:ki+ib+2*lb+1] = blockji\n unfock[ki+ib:ki+ib+2*lb+1, kj+ia:kj+ia+2*la+1] = blockji.T\n if orb in bidx['offd_p']:\n bidx['offd_p'][orb] += 1\n if orb in bidx['offd_m']:\n bidx['offd_m'][orb] += 1\n elif (el_a != el_b):\n blockij = blocks['hete'][orb][bidx['hete'][orb]]\n unfock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1] = blockij\n unfock[kj+ib:kj+ib+2*lb+1, ki+ia:ki+ia+2*la+1] = blockij.T\n bidx['hete'][orb] += 1\n kj += len(orbs[el_b])\n ki += len(orbs[el_a])\n return unfock", "def post_heatdiag(self,ds):\n #\n self.drmid=self.rmid*0 # mem allocation\n self.drmid[1:-1]=(self.rmid[2:]-self.rmid[0:-2])*0.5\n self.drmid[0]=self.drmid[1]\n self.drmid[-1]=self.drmid[-2]\n\n dt = np.zeros_like(self.time)\n dt[1:] = self.time[1:] - self.time[0:-1]\n dt[0] = dt[1]\n rst=np.nonzero(dt<0) #index when restat happen\n dt[rst]=dt[rst[0]+1]\n self.dt = dt\n\n #get separatrix r\n self.rs=np.interp([1],self.psin,self.rmid)\n \n self.rmidsepmm=(self.rmid-self.rs)*1E3 # dist from sep in mm\n\n #get heat\n self.qe=np.transpose(self.e_perp_energy_psi + self.e_para_energy_psi)/dt/ds\n self.qi=np.transpose(self.i_perp_energy_psi + self.i_para_energy_psi)/dt/ds\n self.ge=np.transpose(self.e_number_psi)/dt/ds\n self.gi=np.transpose(self.i_number_psi)/dt/ds\n\n self.qe = np.transpose(self.qe)\n self.qi = np.transpose(self.qi)\n self.ge = np.transpose(self.ge)\n self.gi = np.transpose(self.gi)\n\n self.qt=self.qe+self.qi\n #imx=self.qt.argmax(axis=1)\n mx=np.amax(self.qt,axis=1)\n self.lq_int=mx*0 #mem allocation\n\n for i in range(mx.shape[0]):\n self.lq_int[i]=np.sum(self.qt[i,:]*self.drmid)/mx[i]", "def diagonalize(operator):\n eig_values, eig_vecs = la.eigh(operator)\n # eig_values -= np.amin(eig_values)\n return eig_values, eig_vecs", "def _block_to_full(block_mat, inverse, shape):\n # block_map = cartprod(inverse[0], inverse[1]).T\n block_map = cartprod(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat", "def _block_to_full(\n block_mat: np.ndarray, inverse: np.ndarray, shape: Tuple[int, ...]\n) -> np.ndarray:\n block_map = cartesian_product(inverse, inverse).T\n mat_by_edge = block_mat[block_map[0], block_map[1]]\n full_mat = mat_by_edge.reshape(shape)\n return full_mat", "def preprocess_adj(adj):\n adj = adj + sp.eye(adj.shape[0])\n adj = sp.coo_matrix(adj)\n row_sum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(row_sum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)", "def convolution_as_maultiplication(I, F, print_ir=False):\n # number of columns and rows of the input \n I_row_num, I_col_num = I.shape \n\n # number of columns and rows of the filter\n F_row_num, F_col_num = F.shape\n\n # calculate the output dimensions\n output_row_num = I_row_num + F_row_num - 1\n output_col_num = I_col_num + F_col_num - 1\n if print_ir: print('output dimension:', output_row_num, output_col_num)\n\n # zero pad the filter\n F_zero_padded = np.pad(F, ((output_row_num - F_row_num, 0),\n (0, output_col_num - F_col_num)),\n 'constant', constant_values=0)\n if print_ir: print('F_zero_padded: ', F_zero_padded)\n\n # use each row of the zero-padded F to creat a toeplitz matrix. \n # Number of columns in this matrices are same as numbe of columns of input signal\n toeplitz_list = []\n for i in range(F_zero_padded.shape[0]-1, -1, -1): # iterate from last row to the first row\n c = F_zero_padded[i, :] # i th row of the F \n r = np.r_[c[0], np.zeros(I_col_num-1)] # first row for the toeplitz fuction should be defined otherwise\n # the result is wrong\n toeplitz_m = toeplitz(c,r) # this function is in scipy.linalg library\n toeplitz_list.append(toeplitz_m)\n if print_ir: print('F '+ str(i)+'\\n', toeplitz_m)\n\n # doubly blocked toeplitz indices: \n # this matrix defines which toeplitz matrix from toeplitz_list goes to which part of the doubly blocked\n c = range(1, F_zero_padded.shape[0]+1)\n r = np.r_[c[0], np.zeros(I_row_num-1, dtype=int)]\n doubly_indices = toeplitz(c, r)\n if print_ir: print('doubly indices \\n', doubly_indices)\n\n ## creat doubly blocked matrix with zero values\n toeplitz_shape = toeplitz_list[0].shape # shape of one toeplitz matrix\n h = toeplitz_shape[0]*doubly_indices.shape[0]\n w = toeplitz_shape[1]*doubly_indices.shape[1]\n doubly_blocked_shape = [h, w]\n doubly_blocked = np.zeros(doubly_blocked_shape)\n\n # tile toeplitz matrices for each row in the doubly blocked matrix\n b_h, b_w = toeplitz_shape # hight and withs of each block\n for i in range(doubly_indices.shape[0]):\n for j in range(doubly_indices.shape[1]):\n start_i = i * b_h\n start_j = j * b_w\n end_i = start_i + b_h\n end_j = start_j + b_w\n doubly_blocked[start_i: end_i, start_j:end_j] = toeplitz_list[doubly_indices[i,j]-1]\n\n if print_ir: print('doubly_blocked: ', doubly_blocked)\n\n # convert I to a vector\n vectorized_I = matrix_to_vector(I)\n if print_ir: print('vectorized_I: ', vectorized_I)\n \n # get result of the convolution by matrix mupltiplication\n result_vector = np.matmul(doubly_blocked, vectorized_I)\n if print_ir: print('result_vector: ', result_vector)\n\n # reshape the raw rsult to desired matrix form\n out_shape = [output_row_num, output_col_num]\n output = vector_to_matrix(result_vector, out_shape)\n if print_ir: print('Result of implemented method: \\n', output)\n \n return output", "def tridiagonal_matrix_algorithm(l, d, u, b): # noqa:E741\n n = l.size\n cp = np.zeros(n)\n dp = np.zeros(n)\n x = np.zeros(n)\n cp[0] = u[0]/d[0]\n dp[0] = b[0]/d[0]\n for k in range(1, n):\n cp[k] = u[k] / (d[k]-l[k]*cp[k-1])\n dp[k] = (b[k]-l[k]*dp[k-1]) / (d[k]-l[k]*cp[k-1])\n x[-1] = dp[-1]\n for k in range(n-2, -1, -1):\n x[k] = dp[k] - cp[k]*x[k+1]\n return x", "def _embed44(matrix):\n result = np.eye(4)\n r, c = matrix.shape\n result[:r, :c] = matrix\n return result", "def block_prior(m):\n n = 2 * m\n d = np.zeros((m, m, n, n))\n for i in range(m):\n for j in range(m):\n ii = 2 * i\n jj = 2 * j\n d[i, j, ii:ii + 2, jj:jj + 2] = 1\n return d.reshape(m * m, n * n)", "def symmetrize(a):\n return a + a.T - np.diag(a.diagonal());", "def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird später das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # müssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N können Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, müssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, können wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, müssen wir nun noch einmal von unten nach oben durchgehen\n # um die Einträge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I", "def _pmatrix(kn_u, kn_d, thickness):\n p = np.zeros((kn_u.size, 4, 4), np.complex128)\n\n p0 = np.exp(complex(0, 1) * kn_u * thickness)\n p1 = np.exp(complex(0, 1) * kn_d * thickness)\n\n p[:, 0, 0] = 1 / p0\n p[:, 1, 1] = p0\n p[:, 2, 2] = 1 / p1\n p[:, 3, 3] = p1\n\n return p", "def diagonalsNeg (matrix, cols, rows):\r\n for diagonal in ([(j, i - cols + j + 1) for j in range(cols)] for i in range(cols + rows - 1)):\r\n yield [matrix[i][j] for i, j in diagonal if i >= 0 and j >= 0 and i < cols and j < rows]", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def Controlled2(U):\n '''Generalized controlled unitary tensor construction\n Parameters:\n -----------\n U: input tensor which is assumed to be a square Matrix\n\n Returns:\n --------\n Controlled unitary\n\n '''\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1], 2, shp[2])", "def boundary_cond_dirichtlet(matriz,Tx1,Tx2,Ty1,Ty2):\n matriz[-1,:] = Tx2\n matriz[:,0] = Ty1\n matriz[:,-1] = Ty2\n matriz[0,:] = Tx1\n return matriz", "def Dmat(numpts, delta=1):\n a = 0.5 / delta * ones(numpts)\n a[0] = 0\n a[-2] = 0\n #b=-2./delta**2*ones(numpts); b[0]=0;b[-1]=0\n c = -0.5 / delta * ones(numpts)\n c[1] = 0\n c[-1] = 0\n return sparse.spdiags([a, c], [-1, 1], numpts, numpts)", "def main():\n diagonals_in_hd()", "def tridiag_matrix(bc_surface_type, upsilon, space_divisions, dx, k, T, h, hc, emissivity, sigma):\n # create tri-diagonal matrix\n A = np.diagflat([-upsilon for i in range(space_divisions - 1)], -1) +\\\n np.diagflat([1 + 2 * upsilon for i in range(space_divisions)]) +\\\n np.diagflat([-upsilon for i in range(space_divisions - 1)], 1)\n\n # adjust matrix depending on the boundary condition at the exposed surface\n if bc_surface_type == \"linear\":\n A[0,0] = 1 + 2*upsilon + 2*upsilon*dx*h/k\n A[0,1] = -2*upsilon\n \n elif bc_surface_type == \"non-linear\":\n A[0,0] = 1 + 2*upsilon + 2*dx*hc*upsilon/k+ 8*emissivity*sigma*dx*upsilon*T[0]**3/k\n A[0,1] = -2*upsilon\n \n # adjust matrix for the back boundary conditions\n A[-1, -2] = - 2 * upsilon\n A[-1, -1] = 1 + 2 * upsilon\n\n return A", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def _fp32_1_0_2_mc_on_1_last_dim_lt_one_block(loop_cnt, left_data):\n\n # two case:\n # 1. axis_0 * axis_2 > alloc_ub_size // 2, max_core_axis_size = 1\n # 2. axis_0 * axis_2 <= alloc_ub_size // 2, axis_0_loop_cnt = 0, max_core_axis_size != 1\n\n def _fp32_mte_process_lt_one_block(axis_1_lp_index, sub_axis_1):\n \"\"\"\n do transpose for last dim less than one block\n \"\"\"\n\n def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n \"\"\"\n inner process of last dim less than one block\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)\n\n with tik_inst.for_range(0, no_core_loop_cnt) as axis_0_lp_idx:\n _fp32_inner_last_dim_lt_one_block(axis_0_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_inner_last_dim_lt_one_block(no_core_loop_cnt, no_core_left)\n\n with tik_inst.for_range(0, loop_cnt) as axis_1_lp_idx:\n _fp32_mte_process_lt_one_block(axis_1_lp_idx, max_core_axis_size)\n with tik_inst.if_scope(left_data > 0):\n _fp32_mte_process_lt_one_block(loop_cnt, left_data)", "def matrix_inversion_identity(R_inv, K, C, T):\n K_inv = np.linalg.inv(K)\n R_invC = np.dot(R_inv, C)\n sub = K_inv + block_dot_AB(C.T, R_invC, T)\n term1 = block_dot_A(R_invC, np.linalg.solve(sub, make_block_diag(R_invC.T, T)), T)\n return make_block_diag(R_inv, T) - term1", "def dense_block(x, filters: int, a=0.01, dr=0.05, depth=4):\n for _ in range(depth):\n xn = conv_block(x, filters, a, dr)\n x = Concatenate(axis=-1)([x, xn])\n\n return x", "def _fp32_mte_process_lt_one_block(axis_1_lp_index, sub_axis_1):\n\n def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n \"\"\"\n inner process of last dim less than one block\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)\n\n with tik_inst.for_range(0, no_core_loop_cnt) as axis_0_lp_idx:\n _fp32_inner_last_dim_lt_one_block(axis_0_lp_idx, max_no_core_axis_size)\n with tik_inst.if_scope(no_core_left > 0):\n _fp32_inner_last_dim_lt_one_block(no_core_loop_cnt, no_core_left)", "def blockshaped(input_suduko_2d):\n h = input_suduko_2d.shape[0]\n return (input_suduko_2d.reshape(h // 3, 3, -1, 3)\n .swapaxes(1, 2)\n .reshape(-1, 3, 3))", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.toarray() # densify -- these are tiny and we don't care", "def __neg__(self):\n #\n # TODO - your code here\n #\n matrix_neg = []\n for i in range(self.h):\n row = []\n for j in range(self.w):\n row.append(0-self.g[i][j])\n matrix_neg.append(row)\n return Matrix(matrix_neg)\n # TODO - your code here", "def make_full_layout(d: AttentionMask) -> np.ndarray:\n\n if not d.is_head_specific:\n u = np.reshape(d.global_layout, [d.n_query_block, d.n_key_block, 1, 1])\n r = product(range(d.n_query_block), range(d.n_key_block))\n v = np.array([d.block_layout(None, 0, i, j, 0) for i, j in r])\n v = np.reshape(v, [d.n_query_block, d.n_key_block, d.block_size, d.block_size])\n\n w = u * v\n w = np.transpose(w, [0, 2, 1, 3])\n w = np.reshape(w, [d.query_context_size, d.key_context_size])\n return w\n else:\n if len(d.global_layout.shape) == 2:\n u = np.reshape(d.global_layout, [1, d.n_query_block, d.n_key_block, 1, 1])\n u = np.tile(u, [d.n_head, 1, 1, 1, 1])\n elif len(d.global_layout.shape) == 3:\n u = np.reshape(d.global_layout, [d.n_head, d.n_query_block, d.n_key_block, 1, 1])\n else:\n raise RuntimeError()\n\n s = product(range(d.n_head), range(d.n_query_block), range(d.n_key_block))\n v = np.array([d.block_layout(None, i, j, k, 0) for i, j, k in s])\n v = np.reshape(v, [d.n_head, d.n_query_block, d.n_key_block, d.block_size, d.block_size])\n\n w = u * v\n w = np.transpose(w, [0, 1, 3, 2, 4])\n w = np.reshape(w, [d.n_head, d.query_context_size, d.key_context_size])\n return w", "def preprocess_adj(adj):\r\n adj_add_diag=adj + sp.eye(adj.shape[0])\r\n adj_normalized = normalize_adj(adj_add_diag)\r\n return adj_normalized.astype(np.float32) #sp.coo_matrix(adj_unnorm)\r", "def _K_diag_computations(self, X):\r\n if self.ARD:\r\n pass\r\n else:\r\n self._K_diag_numer = (X*X).sum(1)*self.weight_variance + self.bias_variance\r\n self._K_diag_denom = self._K_diag_numer+1.\r\n self._K_diag_asin_arg = self._K_diag_numer/self._K_diag_denom\r\n self._K_diag_dvar = four_over_tau*np.arcsin(self._K_diag_asin_arg)", "def undevide_block(res_data, K):\n block_num = res_data.shape[0]\n row = int(np.sqrt(block_num*K*K))\n col = row\n block_row = int(row/K)\n block_col = int(col/K)\n res_img = np.zeros((row, col), dtype='uint8')\n for i in range(block_row):\n for j in range(block_col):\n block_img = unzip_block(res_data[i*block_row+j], K)\n res_img[i*K: (i+1)*K, j*K: (j+1)*K] = block_img\n return res_img", "def init_needleman_wunsch_matrix(self):\r\n empty_matrix = self.empty_matrix() # Building on the previous definition, this will give you an empty matrix\r\n for i in range(len(self.s2)+1):\r\n for j in range(len(self.s1)+1):\r\n empty_matrix[0][i] = -i\r\n empty_matrix[j][0] = -j\r\n return empty_matrix", "def pder_h(z,w,r):\n\n # (B1, B2, .., B_N, 2, 2)\n z_w_matrix = torch.matmul((z-w).unsqueeze(-1), (z-w).unsqueeze(-2))\n\n # (B1, B2, .., B_N)\n factor_two = (r ** 2 - torch.norm(w, dim=-1) ** 2) / torch.norm(z-w, dim=-1) ** 2\n factor_one = - 2 * factor_two / torch.norm(z-w, dim=-1) ** 2\n \n # (B1, B2, .., B_N, 1, 1)\n factor_one = factor_one.unsqueeze(-1).unsqueeze(-1)\n factor_two = factor_two.unsqueeze(-1).unsqueeze(-1)\n \n # list [B1, B2, .., B_N]\n batch_dims = z.shape[:-1]\n\n # (2, 2)\n I = torch.eye(2).to(z.device) # send I to same device as z\n # (B1, B2, .., B_N, 2, 2)\n I = I[(None,)*len(batch_dims)].repeat(*batch_dims, 1, 1)\n \n return factor_one * z_w_matrix + factor_two * I", "def smooth_pinv(B, L):\n L = diag(L)\n inv = pinv(concatenate((B, L)))\n return inv[:, :len(B)]", "def slidekernelthroughdiagonal(kernel, matrix):\n size_kernel = kernel.shape[0]\n size_matrix = matrix.shape[0]\n result = np.zeros([size_matrix])\n for i in range(size_matrix):\n # Calculate zero padding needed\n padding_b = -min(i - int(size_kernel/2), 0)\n padding_a = -min(size_matrix - int(i + size_kernel/2), 0)\n matrix_selection = matrix[max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2)),max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2))]\n matrix_padded = np.pad(matrix_selection, [(padding_b, padding_a), (padding_b, padding_a)])\n result[i] = np.sum(matrix_padded*kernel)\n return result", "def transform_lattice(lattice):\n\n for i in range(N+1):\n for j in range(N+1):\n if lattice[i, j] >= 0.5:\n lattice[i, j] = 1/2\n else:\n lattice[i, j] = -1/2\n\n for i in range(N+1):\n lattice[0, i] = lattice[N, i]\n lattice[N+1, i] = lattice[1, i]\n lattice[i, 0] = lattice[i, N]\n lattice[i, N + 1] = lattice[i, 1]\n\n lattice[0, 0] = lattice[N, N]\n lattice[0, N+1] = lattice[N, 1]\n lattice[N+1, 0] = lattice[1, N]\n lattice[N+1, N+1] = lattice[1, 1]\n\n return lattice", "def obtain_ones_in_the_main_diagonal(self):\r\n for row in range(self.SIZE):\r\n self.check_solvability(self.matrix[row][row], self.matrix[row][-1])\r\n self.matrix[row][-1] = self.divide(self.matrix[row][-1], self.matrix[row][row])\r\n self.matrix[row][row] = self.divide(self.matrix[row][row], self.matrix[row][row])", "def diagonal_matrix_tiling(start, stop, bandwidth, edge=0, verbose=False):\n size = stop - start\n tiles = size // bandwidth + bool(size % bandwidth)\n \n # matrix parameters before chunking:\n if verbose:\n print(\"matrix of size {}X{} to be split so that\\n\".format(size,size)+\n \" diagonal region of size {} would be completely\\n\".format(bandwidth)+\n \" covered by the tiling, additionally keeping\\n\"+\n \" a small 'edge' of size w={}, to allow for\\n\".format(edge)+\n \" meaningfull convolution around boundaries.\\n\"+\n \" Resulting number of tiles is {}\".format(tiles-1)+\n \" Non-edge case size of each tile is {}X{}\".format(2*(bandwidth+edge), \n 2*(bandwidth+edge)))\n\n # actual number of tiles is tiles-1\n # by doing range(1, tiles) we are making sure we are processing the \n # upper-left chunk only once:\n for t in range(1, tiles):\n # l = max(0,M*t-M)\n # r = min(L,M*t+M)\n lw = max(0 , bandwidth*(t-1) - edge)\n rw = min(size , bandwidth*(t+1) + edge)\n # don't forget about the 'start' origin:\n yield lw+start, rw+start", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def diag_inv(A):\n return diag(1. / diag(A))", "def __init__(\n self,\n in_channels=128,\n aux_channels=80,\n channels=64,\n out_channels=1,\n kernel_size=9,\n dilation=2,\n bias=True,\n noise_upsample_scales=[11, 2, 2, 2],\n noise_upsample_activation=\"LeakyReLU\",\n noise_upsample_activation_params={\"negative_slope\": 0.2},\n upsample_scales=[2, 2, 2, 2, 2, 2, 2, 2, 1],\n upsample_mode=\"nearest\",\n gated_function=\"softmax\",\n use_weight_norm=True,\n ):\n super().__init__()\n\n self.in_channels = in_channels\n\n noise_upsample = []\n in_chs = in_channels\n for noise_upsample_scale in noise_upsample_scales:\n # NOTE(kan-bayashi): How should we design noise upsampling part?\n noise_upsample += [\n torch.nn.ConvTranspose1d(\n in_chs,\n channels,\n noise_upsample_scale * 2,\n stride=noise_upsample_scale,\n padding=noise_upsample_scale // 2 + noise_upsample_scale % 2,\n output_padding=noise_upsample_scale % 2,\n bias=bias,\n )\n ]\n noise_upsample += [\n getattr(torch.nn, noise_upsample_activation)(\n **noise_upsample_activation_params\n )\n ]\n in_chs = channels\n self.noise_upsample = torch.nn.Sequential(*noise_upsample)\n self.noise_upsample_factor = np.prod(noise_upsample_scales)\n\n self.blocks = torch.nn.ModuleList()\n aux_chs = aux_channels\n for upsample_scale in upsample_scales:\n self.blocks += [\n TADEResBlock(\n in_channels=channels,\n aux_channels=aux_chs,\n kernel_size=kernel_size,\n dilation=dilation,\n bias=bias,\n upsample_factor=upsample_scale,\n upsample_mode=upsample_mode,\n gated_function=gated_function,\n ),\n ]\n aux_chs = channels\n self.upsample_factor = np.prod(upsample_scales)\n\n self.output_conv = torch.nn.Sequential(\n torch.nn.Conv1d(\n channels,\n out_channels,\n kernel_size,\n 1,\n bias=bias,\n padding=(kernel_size - 1) // 2,\n ),\n torch.nn.Tanh(),\n )\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n # reset parameters\n self.reset_parameters()", "def woodbury_inv(A_diag, U, V, k):\n # Helps with numerics. If A_diag[i, j] == 0, then 1 / 0 == inf.\n SMALL = 1e-12\n A_inv_diag = 1. / (A_diag + SMALL)\n\n I = torch.eye(k, device=cuda.device())\n B_inv = inv(I + ((V * A_inv_diag) @ U))\n\n # We want to perform the operation `U @ B_inv @ V` but need to optimize it:\n # - Computing `tmp1` is fast because it is (p, k) * (k, k).\n # - Computing `tmp2` is slow because it is (p, k) * (k, p).\n tmp1 = U @ B_inv\n tmp2 = torch.einsum('ab,bc->ac', (tmp1, V))\n\n # Use `view` rather than `reshape`. The former guarantees that a new tensor\n # is returned.\n tmp3 = A_inv_diag.view(-1, 1) * tmp2\n right = tmp3 * A_inv_diag\n\n # This is a fast version of `diag(A_inv_diag) - right`.\n right = -1 * right\n idx = torch.arange(0, A_diag.size(0), device=cuda.device())\n right[idx, idx] = A_inv_diag + right[idx, idx]\n\n return right", "def _fp32_inner_last_dim_lt_one_block(axis_0_lp_index, sub_axis_0):\n\n # move data in\n in_offset = (block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size +\n axis_0_lp_index * max_no_core_axis_size * axis_1) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, in_offset)\n _data_move_in_last_dim_lt_one_block(tik_inst, ub_input, data_in, data_pos_info)\n\n # do transpose\n with tik_inst.new_stmt_scope():\n temp_sub_axis_1 = tik_inst.Scalar(\"int64\")\n temp_sub_axis_0 = tik_inst.Scalar(\"int64\")\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n axis_1_0_2_size = axis_0 * axis_1 * axis_2\n sub_axis_1_0_2_size = sub_axis_1 * sub_axis_0 * axis_2\n\n # to avoid multiple core dirty data\n with tik_inst.if_scope(tik.all(sub_axis_1_0_2_size < data_size_one_block,\n axis_1_0_2_size > data_size_one_block)):\n with tik_inst.if_scope(sub_axis_1 == 1):\n temp_sub_axis_0.set_as(_ceil_div(data_size_one_block, axis_2))\n temp_sub_axis_1.set_as(sub_axis_1)\n with tik_inst.else_scope():\n temp_sub_axis_0.set_as(sub_axis_0)\n temp_sub_axis_1.set_as(_ceil_div(data_size_one_block,\n axis_0 * axis_2))\n with tik_inst.else_scope():\n temp_sub_axis_1.set_as(sub_axis_1)\n temp_sub_axis_0.set_as(sub_axis_0)\n\n sub_dim_size = (temp_sub_axis_1, temp_sub_axis_0, axis_2)\n _transpose_by_2_vnchwconv_not_last_dim(tik_inst, ub_input[ub_offset],\n ub_input, sub_dim_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size +\n axis_1_lp_index * max_core_axis_size) * axis_0 +\n axis_0_lp_index * max_no_core_axis_size) * axis_2\n data_pos_info = (sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset)\n _data_move_out_last_dim_lt_one_block(tik_inst, data_out, ub_input[ub_offset],\n data_pos_info)", "def diagonal(step='Metropolis', iters=5000):\n X = mc.Uniform('X', lower=-1., upper=1., value=[0., 0.])\n\n @mc.potential\n def near_diag(X=X):\n if abs(X[0] - X[1]) < .1:\n return 0\n else:\n return -inf\n\n mod = setup_and_sample(vars(), step, iters)\n mod.shape = pl.array([[-1,-1], [-1,-.9], [.9,1], [1,1], [1,.9], [-.9,-1], [-1,-1]])\n mod.true_mean = [0,0]\n mod.true_iqr = ['(-.5,.5)', '(-.5,5)']\n return mod", "def doubleMatrix(mat):\n\n dims = mat.shape\n myZero = np.zeros(dims)\n ans = np.block([[mat, myZero], [myZero, mat]])\n return ans", "def reverseDCT(self, components):\n for cp in components.values():\n for i in range(cp.nr_blocks_ver):\n for j in range(cp.nr_blocks_hor):\n cp.blocks[i][j] = IDCT_matrix(cp.blocks[i][j])", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def unblock(arr: np.ndarray, n1: int, n2: int, axis1: int = -1, axis2: int = -2, blocksize: bool = False) -> np.ndarray:\n\n \"\"\" test (stackoverflow): Ok, so considering I have N block matrices with bm x bn dimension and want to stack them in a m x n matrix, provided N = m x n, I would then have x.reshape(m,n,bm,bn).swapaxes(1,2).reshape(bm*m,-1)\n \"\"\"\n\n s = np.array(arr.shape)\n if s[axis1] % n1 != 0 or s[axis2] % n2 != 0:\n raise ValueError(f\"{s[axis1]}x{s[axis2]} does not divide by {n1}x{n2}\")\n\n if blocksize:\n n1 = s[axis1] // n1\n n2 = s[axis2] // n2\n\n # this first .split adds a new dimensions on the outside, so if a absolute index\n # is given for the second axis it must be moved one to the right\n if axis2 >= 0:\n _axis2 = axis2 + 1\n else:\n _axis2 = axis2\n\n arr = np.array(np.split(arr, n1, axis1))\n arr = np.array(np.split(arr, n2, _axis2))\n\n inv_blocksize = n1 * n2\n total = s[axis1] * s[axis2]\n s[axis2] = inv_blocksize\n s[axis1] = total // inv_blocksize\n\n return np.reshape(arr, s)", "def preprocess_multicluster(adj, parts, features, y_train, train_mask, num_clusters, block_size, diag_lambda=1):\n features_batches = []\n support_batches = []\n y_train_batches = []\n train_mask_batches = []\n total_nnz = 0\n np.random.shuffle(parts)\n\n for _, st in enumerate(range(0, num_clusters, block_size)):\n pt = parts[st]\n for pt_idx in range(st + 1, min(st + block_size, num_clusters)):\n pt = np.concatenate((pt, parts[pt_idx]), axis=0)\n features_batches.append(features[pt, :])\n y_train_batches.append(y_train[pt, :])\n support_now = adj[pt, :][:, pt]\n support_batches.append(sparse_to_tuple(normalize_adj_diag_enhance(support_now, diag_lambda=diag_lambda)))\n total_nnz += support_now.count_nonzero()\n\n train_pt = []\n for newidx, idx in enumerate(pt):\n if train_mask[idx]:\n train_pt.append(newidx)\n train_mask_batches.append(sample_mask(train_pt, len(pt)))\n\n return features_batches, support_batches, y_train_batches, train_mask_batches", "def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))", "def factorize_tridiag_matrix(A):\n n = len(b)\n # scratch arrays:\n d = zeros(n, 'd'); c = zeros(n, 'd'); m = zeros(n, 'd')\n\n d[0] = A[0,1]\n c[0] = b[0]\n\n for k in iseq(start=1, stop=n-1, inc=1):\n m[k] = A[k,0]/d[k-1]\n d[k] = A[k,1] - m[k]*A[k-1,2]\n c[k] = b[k] - m[k]*c[k-1]\n return c, d", "def row_diag(self):\n rows = self._row_type_2d()\n for i, m in iter_items(self.row_totals):\n rows[i][i] = m\n return self.__class__(rows=rows)", "def symmetric_cubature_fifth_order(dim=1):\n if dim == 1:\n weights = np.array([0.6667, 0.1667, 0.1667])\n sigma_pts = np.array([0., 1.7321, -1.7321])\n elif dim == 2:\n weights = np.array([0.4444, 0.1111, 0.1111, 0.1111, 0.1111, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, 1.7321, -1.7321],\n [0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])\n elif dim == 3:\n weights = np.array([0.3333, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0556, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 0., 0., 0., 0.],\n [0., 0., 0., 1.7321, -1.7321, 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321],\n [0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 1.7321, -1.7321, -1.7321, 1.7321]])\n elif dim == 6:\n weights = np.array([0.6667, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111, -0.1111,\n -0.1111, -0.1111, -0.1111, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278,\n 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278, 0.0278])\n sigma_pts = np.block([[\n 0., 1.7321, -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 1.7321, -1.7321, 1.7321, -1.7321, 0., 0., 0.,\n 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0., 0., 1.7321, -1.7321, 1.7321,\n -1.7321],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321,\n -1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 0., 0., 0., 0., 0., 0., 0., 0., 1.7321, -1.7321, -1.7321,\n 1.7321, 0., 0., 0., 0., 1.7321, -1.7321, -1.7321, 1.7321, 1.7321, -1.7321, -1.7321,\n 1.7321]\n ])\n\n # else:\n # # The weights and sigma-points from McNamee & Stenger\n # I0 = 1.\n # I2 = 1.\n # I4 = 3.\n # I22 = 1.\n # u = np.array(np.sqrt(I4 / I2))\n # A0 = I0 - dim * (I2 / I4) ** 2 * (I4 - 0.5 * (dim - 1) * I22)\n # A1 = 0.5 * (I2 / I4) ** 2 * (I4 - (dim - 1) * I22)\n # A11 = 0.25 * (I2 / I4) ** 2 * I22\n # U0 = sym_set(dim)\n # U1 = sym_set(dim, u)\n # U2 = sym_set(dim, np.block([u, u]))\n # sigma_pts = np.block([U0, U1, U2])\n # weights = np.block([A0 * np.ones([1, U0.shape[1]]),\n # A1 * np.ones([1, U1.shape[1]]),\n # A11 * np.ones([1, U2.shape[1]])])\n return sigma_pts, weights", "def diagonalize(width,height):\r\n A = createBoard(height, width) \r\n \r\n for row in range(height):\r\n for col in range(width):\r\n if row == col:\r\n A[row][col] = 1\r\n else:\r\n A[row][col] = 0 \r\n\r\n return A", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def _init_transformation_matrix(self):\n # Set up basic transformation matrix\n c_transform = np.zeros((self.n_beads, self.n_beads))\n\n # Get auxiliary array with bead indices\n n = np.arange(1, self.n_beads + 1)\n\n # for k = 0\n c_transform[0, :] = 1.0\n\n for k in range(1, self.n_beads // 2 + 1):\n c_transform[k, :] = np.sqrt(2) * np.cos(2 * np.pi * k * n / self.n_beads)\n\n for k in range(self.n_beads // 2 + 1, self.n_beads):\n c_transform[k, :] = np.sqrt(2) * np.sin(2 * np.pi * k * n / self.n_beads)\n\n if self.n_beads % 2 == 0:\n c_transform[self.n_beads // 2, :] = (-1) ** n\n\n # Since matrix is initialized as C(k,n) does not need to be transposed\n c_transform /= np.sqrt(self.n_beads)\n c_transform = torch.from_numpy(c_transform)\n\n return c_transform", "def integration_matrix(grid):\n I_blocks = []\n\n for iseg in range(grid.num_segments):\n i1, i2 = grid.subset_segment_indices['all'][iseg, :]\n indices = grid.subset_node_indices['all'][i1:i2]\n nodes_given = grid.node_stau[indices]\n\n i1, i2 = grid.subset_segment_indices['all'][iseg, :]\n indices = grid.subset_node_indices['all'][i1:i2]\n nodes_eval = grid.node_stau[indices][1:]\n\n _, D_block = lagrange_matrices(nodes_given, nodes_eval)\n I_block = np.linalg.inv(D_block[:, 1:])\n I_blocks.append(I_block)\n\n I = block_diag(*I_blocks)\n\n return I" ]
[ "0.6073989", "0.5890842", "0.57211554", "0.5672026", "0.5595262", "0.5567827", "0.5543956", "0.5539411", "0.5534139", "0.5419887", "0.537693", "0.5354077", "0.5354077", "0.53476894", "0.53476274", "0.52978414", "0.52978414", "0.5297459", "0.5286128", "0.52755713", "0.5268031", "0.5267875", "0.52625495", "0.5254548", "0.52498716", "0.5246239", "0.5237058", "0.5230206", "0.5227142", "0.5221388", "0.5206486", "0.5205415", "0.51688254", "0.5165119", "0.5157382", "0.5154611", "0.5143529", "0.51368636", "0.5116382", "0.51098496", "0.51093656", "0.51078176", "0.5106161", "0.50986594", "0.50838494", "0.5079178", "0.5078867", "0.5077355", "0.50745153", "0.5064583", "0.50530475", "0.50485355", "0.5045189", "0.50416046", "0.5019293", "0.5017328", "0.50140357", "0.5012782", "0.50108826", "0.5000392", "0.49944755", "0.49935907", "0.49930122", "0.49875373", "0.49858287", "0.49767476", "0.49719182", "0.49700746", "0.4966093", "0.4961556", "0.49604088", "0.4959202", "0.49423248", "0.49388304", "0.49318805", "0.49262622", "0.4921118", "0.49197397", "0.49195343", "0.49131888", "0.49116904", "0.49077418", "0.4904504", "0.48998493", "0.48949963", "0.4892632", "0.48862797", "0.4886148", "0.48684984", "0.48661005", "0.48639297", "0.48606998", "0.4859198", "0.48547086", "0.4851797", "0.48501384", "0.48458752", "0.4843963", "0.4842749", "0.4842339" ]
0.55643
6
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication()
def bd_toeplitz_inverse_multiplication(u, *arrs): y = zeros(shape(u)) n_start = 0 n_end = 0 for t in arrs: n_start = n_end n_end += len(t[3]) # len(t[3]) is the length of the block y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t) assert len(y) == n_end return y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def inverse(self) -> 'Matrix':\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Must be a square matrix. This one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n\n # 1) Construct the minor_matrix. Feel free to make this a separate method.\n minor_matrix_times_cofactor = Matrix.zeros(self.shape())\n\n for i in range (num_R):\n for j in range(num_C):\n minor_matrix_times_cofactor.mat[i][j] = self.get_minor(i,j).determinant() * (-1)**(i+j)\n\n minor_matrix_times_cofactor.display(message=\"minor\")\n # 2) Calculate the determinant, either by calling the determinant() method or by using the minor_matrix (faster)\n det = 0\n for i in range (num_R):\n det += self.mat[i][0] * minor_matrix_times_cofactor.mat[i][0]\n #print (f\"determinant: {self.determinant()}\")\n # 3) The inverse is the transpose of the minor matrix, divided by the determinant. Make sure that the determinant\n # isn't zero!\n if det == 0:\n return None\n return minor_matrix_times_cofactor.transpose().times(1/det)\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird später das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # müssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N können Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, müssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, können wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, müssen wir nun noch einmal von unten nach oben durchgehen\n # um die Einträge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return ConstantDiagLinearOperator(self.diag_values.reciprocal(), diag_shape=self.diag_shape)", "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def inv(in_A):\n Q,R = qr(in_A)\n QT = Q.T\n N = shape(in_A)[0]\n \n for n in range(N-1,-1,-1):\n Rnn = R[n,n]\n R[n,:] /= Rnn\n QT[n,:] /= Rnn\n for m in range(n+1,N):\n Rnm = R[n,m]\n R[n,m] = 0\n QT[n,:] -= QT[m,:]*Rnm\n\n return QT", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop over each block\n t.append(toeplitz_inverse_multiplication_prep(c))\n return tuple(t)", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def unwhiten(self, U, A, m):\n X = np.matmul(A, U.T).T\n X += m\n\n return X", "def to_compound_symmetric(z: torch.Tensor) -> torch.Tensor:\n a, b = z.real, z.imag\n return block_matrix([[a, b], [b, -a]])", "def matrix_inversion_identity(R_inv, K, C, T):\n K_inv = np.linalg.inv(K)\n R_invC = np.dot(R_inv, C)\n sub = K_inv + block_dot_AB(C.T, R_invC, T)\n term1 = block_dot_A(R_invC, np.linalg.solve(sub, make_block_diag(R_invC.T, T)), T)\n return make_block_diag(R_inv, T) - term1", "def new_basis(abc, lattice):\n return np.dot(abc.T, lattice.inv_matrix.T)", "def lu(matrix):\n SIZE = matrix.shape[0]\n BS = np.BLOCKSIZE\n\n if matrix.shape[0] != matrix.shape[0]:\n raise Exception(\"LU only supports squared matricis\")\n if not matrix.dist():\n raise Exception(\"The matrix is not distributed\")\n\n if(SIZE % np.BLOCKSIZE != 0):\n raise Exception(\"The matrix dimensions must be divisible \"\\\n \"with np.BLOCKSIZE(%d)\"%np.BLOCKSIZE)\n\n (prow,pcol) = matrix.pgrid()\n A = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True);A += matrix\n L = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n U = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpL = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n tmpU = np.zeros((SIZE,SIZE), dtype=matrix.dtype, dist=True)\n for k in xrange(0,SIZE,BS):\n bs = min(BS,SIZE - k) #Current block size\n kb = k / BS # k as block index\n\n #Compute vertical multiplier\n slice = ((kb,kb+1),(kb,kb+1))\n for a,l,u in zip(A.blocks(slice), L.blocks(slice), U.blocks(slice)):\n (p,tl,tu) = linalg.lu(a)\n if not (np.diag(p) == 1).all():#We do not support pivoting\n raise Exception(\"Pivoting was needed!\")\n #There seems to be a transpose bug in SciPy's LU\n l[:] = tl.T\n u[:] = tu.T\n\n #Replicate diagonal block horizontal and vertical\n for tk in xrange(k+bs,SIZE,BS):\n tbs = min(BS,SIZE - tk) #Current block size\n L[tk:tk+tbs,k:k+bs] = U[k:k+tbs,k:k+bs]\n U[k:k+bs,tk:tk+tbs] = L[k:k+bs,k:k+tbs]\n\n if k+bs < SIZE:\n #Compute horizontal multiplier\n slice = ((kb,kb+1),(kb+1,SIZE/BS))\n for a,u in zip(A.blocks(slice), U.blocks(slice)):\n u[:] = np.linalg.solve(u.T,a.T).T\n\n #Compute vertical multiplier\n slice = ((kb+1,SIZE/BS),(kb,kb+1))\n for a,l in zip(A.blocks(slice), L.blocks(slice)):\n l[:] = np.linalg.solve(l,a)\n\n #Apply to remaining submatrix\n A -= pyHPC.summa(L[:,:k+bs],U[:k+bs,:], ao=(k+bs,k),\n bo=(k,k+bs), co=(k+bs,k+bs))\n\n return (L, U)", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def inverse_transform(self, y: Array2D) -> Array2D:", "def InverseMatrix(matrix,vector):\r\n # Unveri reversible matrix\r\n if Determinant(matrix, 1) == 0:\r\n print(\"Error,Singular Matrix\\n\")\r\n return\r\n # result matrix initialized as singularity matrix\r\n result = MakeIMatrix(len(matrix), len(matrix))\r\n # loop for each row\r\n for i in range(len(matrix[0])):\r\n # turn the pivot into 1 (make elementary matrix and multiply with the result matrix )\r\n # pivoting process\r\n matrix, vector = RowXchange(matrix, vector)\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[i][i] = 1/matrix[i][i]\r\n result = MultiplyMatrix(elementary, result)\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n # make elementary loop to iterate for each row and subtracrt the number below (specific) pivot to zero (make\r\n # elementary matrix and multiply with the result matrix )\r\n for j in range(i+1, len(matrix)):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n\r\n # after finishing with the lower part of the matrix subtract the numbers above the pivot with elementary for loop\r\n # (make elementary matrix and multiply with the result matrix )\r\n for i in range(len(matrix[0])-1, 0, -1):\r\n for j in range(i-1, -1, -1):\r\n elementary = MakeIMatrix(len(matrix[0]), len(matrix))\r\n elementary[j][i] = -(matrix[j][i])\r\n matrix = MultiplyMatrix(elementary, matrix)\r\n result = MultiplyMatrix(elementary, result)\r\n\r\n return result", "def woodbury_inv(A_diag, U, V, k):\n # Helps with numerics. If A_diag[i, j] == 0, then 1 / 0 == inf.\n SMALL = 1e-12\n A_inv_diag = 1. / (A_diag + SMALL)\n\n I = torch.eye(k, device=cuda.device())\n B_inv = inv(I + ((V * A_inv_diag) @ U))\n\n # We want to perform the operation `U @ B_inv @ V` but need to optimize it:\n # - Computing `tmp1` is fast because it is (p, k) * (k, k).\n # - Computing `tmp2` is slow because it is (p, k) * (k, p).\n tmp1 = U @ B_inv\n tmp2 = torch.einsum('ab,bc->ac', (tmp1, V))\n\n # Use `view` rather than `reshape`. The former guarantees that a new tensor\n # is returned.\n tmp3 = A_inv_diag.view(-1, 1) * tmp2\n right = tmp3 * A_inv_diag\n\n # This is a fast version of `diag(A_inv_diag) - right`.\n right = -1 * right\n idx = torch.arange(0, A_diag.size(0), device=cuda.device())\n right[idx, idx] = A_inv_diag + right[idx, idx]\n\n return right", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def Controlled(U):\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1])", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def inverse(self, z, y):\n y_summary = self.summary_net(y)\n return self.invertible_net(z, y_summary, inverse=True)", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def test_inverse( centering='SYMMETRIC'):\n\n\n npupil = 300 #156\n pctr = int(npupil/2)\n npix = 100 #1024\n u = 20 #100 # of lam/D\n\n npix, u = 2000, 200\n s = (npupil,npupil)\n\n\n\n\n mft1 = matrixDFT.MatrixFourierTransform(centering=centering)\n\n ctr = (float(npupil)/2.0, float(npupil)/2.0 )\n #print ctr\n pupil = makedisk(s=s, c=ctr, r=float(npupil)/2.0001, t=np.float64, grey=0)\n pupil /= np.sqrt(pupil.sum())\n\n pupil[100:200, 30:50] = 0\n pupil[0:50, 140:160] = 0\n\n plt.subplot(141)\n plt.imshow(pupil)\n\n print \"Pupil 1 total:\", pupil.sum() \n\n a = mft1.perform(pupil, u, npix)\n\n asf = a.real.copy()\n cpsf = a * a.conjugate()\n psf = cpsf.real.copy()\n print \"PSF total\", psf.sum()\n \n plt.subplot(142)\n plt.imshow(psf, norm=matplotlib.colors.LogNorm(1e-8, 1.0))\n\n plt.subplot(143)\n\n pupil2 = mft1.inverse(a, u, npupil)\n pupil2r = (pupil2 * pupil2.conjugate()).real\n plt.imshow( pupil2r)\n\n print \"Pupil 2 total:\", pupil2r.sum() \n\n\n\n a2 = mft1.perform(pupil2r, u, npix)\n psf2 = (a2*a2.conjugate()).real.copy()\n print \"PSF total\", psf2.sum()\n plt.subplot(144)\n plt.imshow(psf2, norm=matplotlib.colors.LogNorm(1e-8, 1.0))", "def matrix_inv(mat):\n\ta = mat[0,0]\n\tb = mat[0,1]\n\tc = mat[0,2]\n\td = mat[1,0]\n\te = mat[1,1]\n\tf = mat[1,2]\n\tg = mat[2,0]\n\th = mat[2,1]\n\ti = mat[2,2]\n\n\tdet = b*f*g + c*d*h + a*e*i - a*f*h - b*d*i - c*e*g\n\n\tinvmat = np.zeros((3,3))\n\tinvmat[0,0] = (e*i - f*h) / det\n\tinvmat[0,1] = (c*h - b*i) / det\n\tinvmat[0,2] = (b*f - c*e) / det\n\tinvmat[1,0] = (f*g - d*i) / det\n\tinvmat[1,1] = (a*i - c*g) / det\n\tinvmat[1,2] = (c*d - a*f) / det\n\tinvmat[2,0] = (d*h - e*g) / det\n\tinvmat[2,1] = (b*g - a*h) / det\n\tinvmat[2,2] = (a*e - b*d) / det\n\treturn invmat", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def block_diag_matmul(X, W_, bsize):\n assert(W_.ndim == 3)\n I, M, N = X.shape[0], X.shape[1], W_.shape[2]*bsize\n assert(W_.shape[0] == bsize)\n assert(W_.shape[1]*bsize == M)\n\n X_ = X.reshape(I, M/bsize, bsize)\n X_ = np.swapaxes(np.swapaxes(X_, 1,2), 0,1)\n Y_ = np.matmul(X_, W_)\n Y_ = np.swapaxes(np.swapaxes(Y_, 0,1), 1,2)\n Y = Y_.reshape(I, N)\n return Y", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inv(self, y):\n pass", "def inv_m(self):\n self.m = -self.m", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def symmetrize(a):\n return a + a.T - np.diag(a.diagonal());", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def inv_inplace(a):", "def inverse_affine_transformation_matrix(A):\n n, n = A.shape\n\n # extract components R, an n-1xn-1 linear transformation matrix, and T, an nx1 translation matrix\n R = A[:n-1, :n-1]\n T = A[:n-1, n-1]\n\n # find R^-1\n R_inv = np.linalg.inv(R)\n\n # Find A^-1/A_inv\n A_inv = np.copy(A).astype(float) # copy A for base of A^-1 matrix and ensure it is of data type float\n A_inv[:n-1, :n-1] = R_inv # set top left nxn sub matrix equal to R^-1\n A_inv[:n-1, n-1] = np.negative(R_inv.dot(T)) # place -R^-1*T in top right corner\n\n return A_inv", "def matI(a):\n shape=matShape(a)\n if shape[0]!=shape[1]: raise ValueError\n n=shape[0]\n ret=matZeros((n,n*2))\n for i in range(n):\n for j in range(n):\n matSet(ret,i,j,matGet(a,i,j))\n for i in range(n):\n matSet(ret,i,i+n,1)\n for row in range(n):\n rm=row\n ap=abs(matGet(ret,rm,row))\n for rint in range(row+1,n):\n p=abs(matGet(ret,rint,row))\n if ap<p:\n ap=p\n rm=rint\n if 0.000000001 > ap:\n return matCopy(a) # Not invertible\n di=matGet(ret,rm,row)\n if rm!=row:\n for i in range(n*2):\n t=matGet(ret,rm,i)\n matSet(ret,rm,i,matGet(ret,row,i))\n matSet(ret,row,i,t)\n idi=1.0/di\n for rint in range(row+1,n):\n f=idi*matGet(ret,rint,row)\n if f!=0:\n for co in range(row,n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-f*matGet(ret,row,co))\n row=n-1\n while row>=0:\n ic=1.0/matGet(ret,row,row)\n for rint in range(row):\n icx=ic*matGet(ret,rint,row)\n if icx!=0:\n for co in range(row, n*2):\n matSet(ret,rint,co,matGet(ret,rint,co)-icx*matGet(ret,row,co))\n matSet(ret,row,row,ic*matGet(ret,row,row))\n for co in range(n,n*2):\n matSet(ret,row,co,ic*matGet(ret,row,co))\n row-=1\n return matPart(ret,0,n,n,n*2)", "def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )", "def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def matrixMul(self, matrix, matrix2):\n matrix0 = matrix[:]\n matrix[0] = matrix0[0] * matrix2[0] + matrix0[2]*matrix2[1] # + matrix0[4]*0\n matrix[1] = matrix0[1] * matrix2[0] + matrix0[3]*matrix2[1] # + matrix0[5]*0\n matrix[2] = matrix0[0] * matrix2[2] + matrix0[2]*matrix2[3] # + matrix0[4]*0\n matrix[3] = matrix0[1] * matrix2[2] + matrix0[3]*matrix2[3] # + matrix0[5]*0\n matrix[4] = matrix0[0] * matrix2[4] + matrix0[2]*matrix2[5] + matrix0[4]\n matrix[5] = matrix0[1] * matrix2[4] + matrix0[3]*matrix2[5] + matrix0[5]", "def L_pseudo_inverse_tf(self) -> tf.Tensor:\n return tf.py_func(np.linalg.pinv, [self.L_tf], tf.float32)", "def invupdatered(A, c):\n n, m = A.shape\n indn = np.arange(n)\n q = A[c, c]\n c1 = np.hstack((indn[:c], indn[c+1:]))\n Ax = np.atleast_2d(A[c1, c])\n yA = np.atleast_2d(A[c, c1])\n return A[c1][:,c1] - np.dot(Ax.T, yA)/q", "def complex_inverse(c1,cr):", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def inverse(self, x, y):", "def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def T_inv(T):\n R, xyz = rigmech.T2Rxyz(T)\n R_inv = R.T\n xyz_inv = -R_inv * xyz\n T_inv = R_inv.row_join(xyz_inv).col_join(sp.Matrix([[0, 0, 0, 1]]))\n return T_inv", "def invert(self,el):\n return el^(self.q-2)", "def _inv(M):\n ll, mm = M.shape\n M2 = M + 1e-10 * np.eye(ll)\n L = np.linalg.cholesky(M2)\n inv_L = np.linalg.inv(L)\n inv_M = inv_L.T @ inv_L\n return inv_M", "def multInverse(a, m):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while m != 0:\n p = a // m\n z = a % m\n a = m\n m = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n if(x0):\n return(x0)\n else:\n print(\"multiplicative inverse does not exist\")\n return 0", "def _pseudo_inv22sym_vectorized(M):\n assert M.ndim == 3\n assert M.shape[-2:] == (2, 2)\n M_inv = np.empty_like(M)\n prod1 = M[:, 0, 0]*M[:, 1, 1]\n delta = prod1 - M[:, 0, 1]*M[:, 1, 0]\n rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))\n\n if np.all(rank2):\n # Normal 'optimized' flow.\n M_inv[:, 0, 0] = M[:, 1, 1] / delta\n M_inv[:, 0, 1] = -M[:, 0, 1] / delta\n M_inv[:, 1, 0] = -M[:, 1, 0] / delta\n M_inv[:, 1, 1] = M[:, 0, 0] / delta\n else:\n # 'Pathologic' flow.\n # Here we have to deal with 2 sub-cases\n # 1) First sub-case: matrices of rank 2:\n delta = delta[rank2]\n M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta\n M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta\n M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta\n M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta\n # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:\n rank01 = ~rank2\n tr = M[rank01, 0, 0] + M[rank01, 1, 1]\n tr_zeros = (np.abs(tr) < 1.e-8)\n sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)\n #sq_tr_inv = 1. / tr**2\n M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv\n M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv\n M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv\n M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv\n\n return M_inv", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def blk_chol_inv(A_Txdxd, B_Tm1xdxd, b_Txd, lower=True, transpose=False):\n # Define a matrix-vector dot product because the tensorflow developers feel\n # this is beneath them.\n tf_dot = lambda M, v : tf.reduce_sum(tf.multiply(M, v), axis=1)\n if transpose:\n A_Txdxd = tf.transpose(A_Txdxd, [0,2,1])\n B_Tm1xdxd = tf.transpose(B_Tm1xdxd, [0,2,1])\n \n # Whether B is lower or upper doesn't matter. The function to be passed to\n # scan is the same.\n def step(x_d, ABb_2x_):\n A_dxd, B_dxd, b_d = ABb_2x_[0], ABb_2x_[1], ABb_2x_[2]\n return tf_dot(tf.matrix_inverse(A_dxd),\n b_d - tf_dot(B_dxd, x_d))\n if lower:\n x0_d = tf_dot(tf.matrix_inverse(A_Txdxd[0]), b_Txd[0])\n result_Tm1xd = tf.scan(fn=step, elems=[A_Txdxd[1:], B_Tm1xdxd, b_Txd[1:]], \n initializer=x0_d)\n result_Txd = tf.concat([tf.expand_dims(x0_d, axis=0), result_Tm1xd], axis=0)\n else:\n xN_d = tf_dot(tf.matrix_inverse(A_Txdxd[-1]), b_Txd[-1])\n result_Tm1xd = tf.scan(fn=step, \n elems=[A_Txdxd[:-1][::-1], B_Tm1xdxd[::-1], b_Txd[:-1][::-1]],\n initializer=xN_d )\n result_Txd = tf.concat([tf.expand_dims(xN_d, axis=0), result_Tm1xd],\n axis=0)[::-1]\n\n return result_Txd", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def __neg__(self):\r\n return mat4(map(lambda x: -x, self.mlist))", "def inv_IpXY(X, Y):\r\n d1 = X.shape[0]\r\n d2 = X.shape[1]\r\n if d1 > d2:\r\n M = np.eye(d1) - np.dot(np.dot(X, LA.inv(np.eye(d2) + np.dot(Y, X))), Y)\r\n else:\r\n M = LA.inv(np.eye(d1) + np.dot(X, Y))\r\n return M", "def Controlled2(U):\n '''Generalized controlled unitary tensor construction\n Parameters:\n -----------\n U: input tensor which is assumed to be a square Matrix\n\n Returns:\n --------\n Controlled unitary\n\n '''\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1], 2, shp[2])", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def _outer(a, b):\n a_flat = torch.reshape(a, [-1])\n b_flat = torch.reshape(b, [-1])\n a_mul = torch.unsqueeze(a_flat, dim=-1)\n b_mul = torch.unsqueeze(b_flat, dim=0)\n return a_mul * b_mul", "def __neg__(self):\n # \n # TODO - your code here\n #\n result = [];\n for row in self.g:\n result.append([-1*n for n in row]);\n \n return Matrix(result);", "def inverseTransformationMatrix(self,index=None):\n if self.method == 'pca':\n if index is not None:\n coordinateIndex = distribution1D.vectori_cxx(len(index))\n for i in range(len(index)):\n coordinateIndex[i] = index[i]\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions(coordinateIndex)\n inverseTransformation = self._distribution.getInverseTransformationMatrix(coordinateIndex)\n else:\n matrixDim = self._distribution.getInverseTransformationMatrixDimensions()\n inverseTransformation = self._distribution.getInverseTransformationMatrix()\n row = matrixDim[0]\n column = matrixDim[1]\n # convert 1D vector to 2D array\n L = np.atleast_1d(inverseTransformation).reshape(row,column)\n else:\n self.raiseAnError(NotImplementedError,' inverse transformationMatrix is not yet implemented for ' + self.method + ' method')\n return L", "def inv_mix_columns(state):\n mix_columns(state, m=inv_ax)", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)" ]
[ "0.65371925", "0.6473114", "0.639856", "0.6361315", "0.6302969", "0.6292023", "0.6192051", "0.61344135", "0.61059606", "0.60929507", "0.6069136", "0.6021487", "0.60205114", "0.6011188", "0.5997013", "0.5966648", "0.5926399", "0.5926365", "0.5916658", "0.5888663", "0.5883227", "0.5874907", "0.5866973", "0.58164996", "0.5813204", "0.5803478", "0.58029234", "0.5792404", "0.5783036", "0.57659465", "0.57347953", "0.573438", "0.57320714", "0.57297236", "0.57234615", "0.57023937", "0.5676741", "0.5667271", "0.5650198", "0.56423396", "0.56274515", "0.56067425", "0.5605958", "0.55912346", "0.55879545", "0.55862856", "0.5573533", "0.5567818", "0.55630285", "0.55520743", "0.5542488", "0.55335736", "0.5524717", "0.5524206", "0.5520179", "0.5517848", "0.5516924", "0.55104244", "0.5501446", "0.55014014", "0.54888505", "0.5481267", "0.5472314", "0.5472173", "0.54717505", "0.5467171", "0.54639393", "0.54543144", "0.54351276", "0.54329956", "0.5432344", "0.5429956", "0.54229677", "0.54179525", "0.54136", "0.5404831", "0.53976727", "0.53916246", "0.53888553", "0.53869003", "0.5379431", "0.53757465", "0.5354698", "0.5354128", "0.53499", "0.5345045", "0.5342773", "0.53404176", "0.53290564", "0.53234637", "0.5314762", "0.5310652", "0.53060216", "0.52966475", "0.5291388", "0.528643", "0.5281297", "0.52443004", "0.52339405", "0.52272296" ]
0.7164876
0
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.)
def _parse_csv_to_arrow_warning(line: str) -> I18nMessage: for pattern, builder in _ERROR_PATTERNS: match = pattern.match(line) if match: return builder(**match.groupdict()) raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_line(self, line):\n raise NotImplementedError", "def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()", "def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult", "def _parse_csv(\n path: Path,\n *,\n settings: Settings = DEFAULT_SETTINGS,\n encoding: Optional[str],\n delimiter: Optional[str],\n has_header: bool,\n autoconvert_text_to_numbers: bool,\n) -> ParseCsvResult:\n warnings = []\n\n with contextlib.ExitStack() as ctx:\n n_bytes = path.stat().st_size\n if n_bytes > settings.MAX_CSV_BYTES:\n # We can't simply os.truncate() the input file, because sandboxed code\n # can't modify input files.\n truncated_path = ctx.enter_context(tempfile_context(prefix=\"truncated-\"))\n with path.open(\"rb\") as src, truncated_path.open(\"wb\") as dest:\n os.sendfile(dest.fileno(), src.fileno(), 0, settings.MAX_CSV_BYTES)\n path = truncated_path\n warnings.append(\n _trans_cjwparse(\n \"csv.truncated_file\",\n \"{n_bytes_truncated, one{Truncated # byte} other{Truncated # bytes}} from file (maximum is {max_n_bytes} bytes)\",\n dict(\n n_bytes_truncated=(n_bytes - settings.MAX_CSV_BYTES),\n max_n_bytes=settings.MAX_CSV_BYTES,\n ),\n )\n )\n\n utf8_path = ctx.enter_context(tempfile_context(prefix=\"utf8-\", suffix=\".txt\"))\n # raises LookupError, UnicodeError\n warnings.extend(\n transcode_to_utf8_and_warn(path, utf8_path, encoding, settings=settings)\n )\n\n # Sniff delimiter\n if not delimiter:\n delimiter = detect_delimiter(utf8_path, settings)\n\n with tempfile_context(suffix=\".arrow\") as arrow_path:\n # raise subprocess.CalledProcessError on error ... but there is no\n # error csv-to-arrow will throw that we can recover from.\n child = subprocess.run(\n [\n \"/usr/bin/csv-to-arrow\",\n \"--delimiter\",\n delimiter,\n \"--max-rows\",\n str(settings.MAX_ROWS_PER_TABLE),\n \"--max-columns\",\n str(settings.MAX_COLUMNS_PER_TABLE),\n \"--max-bytes-per-value\",\n str(settings.MAX_BYTES_PER_VALUE),\n utf8_path.as_posix(),\n arrow_path.as_posix(),\n ],\n capture_output=True,\n check=True,\n )\n warnings.extend(_parse_csv_to_arrow_warnings(child.stdout.decode(\"utf-8\")))\n\n reader = pyarrow.ipc.open_file(arrow_path.as_posix())\n raw_table = reader.read_all() # efficient -- RAM is mmapped\n\n table, more_warnings = _postprocess_table(\n raw_table, has_header, autoconvert_text_to_numbers, settings\n )\n return ParseCsvResult(table, warnings + more_warnings)", "def _parse_tuple(self, line):\n elements = line[1:-1].split(\",\\t\")\n if len(elements) == len(self.description):\n return tuple(\n [\n pythonize.convert(element.strip(), description[1])\n for (element, description) in zip(elements, self.description)\n ]\n )\n else:\n self._exception_handler(\n InterfaceError, \"length of row doesn't match header\"\n )", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def parse(cls, line):\r\n raise NotImplementedError", "def from_csv_line(line):\r\n return line.strip().split(',')", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def ParseRow(self, parser_mediator, row_offset, row):\n try:\n date_time = self._CreateDateTime(row['date'], row['time'])\n except errors.ParseError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to create date time with error: {0!s}'.format(exception))\n date_time = None\n\n status = row['status']\n if status:\n status = status.rstrip()\n\n event_data = McafeeAVEventData()\n event_data.action = row['action']\n event_data.filename = row['filename']\n event_data.offset = row_offset\n event_data.rule = row['rule']\n event_data.status = status\n event_data.trigger_location = row['trigger_location']\n event_data.username = row['username']\n event_data.written_time = date_time\n\n parser_mediator.ProduceEventData(event_data)", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def next_line(self, context, line):\n if \"\\t\" in line:\n next_index = line.find(\"\\t\", 0)\n while next_index != -1:\n extra_data = f\"Column: {next_index + 1}\"\n self.report_next_line_error(\n context, next_index + 1, extra_error_information=extra_data\n )\n next_index = line.find(\"\\t\", next_index + 1)", "def process_line(line: str):\n \n comment_start = line.find(';')\n\n # Remove comments, one comment per line allowed\n if comment_start != -1:\n line = line[:comment_start]\n\n line = line.strip()\n \n # Splits commands such that the command and all details are seperated\n # \"command ...\" -> [command, ...]\n try:\n command, contents = line.split(maxsplit = 1)\n # Deals with function names, two special commands, and empty lines\n except ValueError:\n if line == '':\n return None\n elif line[-1] == ':' or line == 'end' or line == 'ret':\n return (line,)\n\n # Splits depending on command type, some requiring one argument, others two\n try:\n one, two = contents.split(',')\n return command, one.strip(), two.strip()\n except ValueError:\n return command, contents.strip()", "def read(self, line):\n data = []\n if six.PY3 and type(line) == six.binary_type:\n line = line.decode('utf-8')\n\n csv_reader = csv.reader(six.StringIO(line),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n skipinitialspace=True)\n for cr in csv_reader:\n data = [decode_string(f).strip() for f in cr]\n break\n\n return None, data", "def parse_row(self, response, row):\n raise NotImplementedError", "def __read_csv(self) -> tuple:\n with open(self.csv_file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0].isspace():\n raise StopIteration\n yield row", "def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)", "def process_line(self, line):\n columns = line.split('|')\n\n if len(line) == 0 or len(columns) < 16:\n return None # empty line or malformed line\n\n cmte_id, name, zip_code = columns[0], columns[7], columns[10][:5]\n transaction_dt, transaction_amt = columns[13], columns[14]\n other_id = columns[15]\n\n if len(other_id) > 0 or len(transaction_amt) == 0 or len(cmte_id) == 0 or len(name) == 0 or len(zip_code) < 5:\n return None # malformed data fields, ignore this line\n transaction_date = string_to_date(transaction_dt)\n if transaction_date is None:\n return None # 'TRANSACTION_DT' is an invalid date\n\n try:\n if self.repeat_donor(name, zip_code, transaction_date.year):\n # this record is from a repeat donor in any prior calendar year\n amount = float(transaction_amt)\n key = RecipientZipYear(cmte_id, zip_code, transaction_date.year)\n if key not in self.running_percentile:\n self.running_percentile[key] = RunningPercentile(self.percentile)\n self.running_percentile[key].add(amount)\n return self.print_record(key)\n else:\n return None # this record is not from a repeat donor\n except:\n return None # exception may comes from malformed line, so just ignore this line", "def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def parse_row(self, row):\n \n self.metadata = row", "def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1", "def _ParseOutput(output_text):\n if _CSV_PREFIX not in output_text:\n raise ValueError('{0} not found in\\n{1}'.format(_CSV_PREFIX, output_text))\n csv_fp = six.StringIO(str(output_text).rsplit(_CSV_PREFIX, 1)[-1])\n reader = csv.DictReader(csv_fp)\n if (frozenset(reader.fieldnames) !=\n frozenset(['variable', 'value', 'unit'])):\n raise ValueError('Unexpected fields: {}'.format(reader.fieldnames))\n for row in reader:\n yield row['variable'], float(row['value']), row['unit']", "def lineToList(self, line):\n l = [item for item in next(csv.reader(StringIO.StringIO(line), self.CSVDialect))]\n if self.firstLine is None:\n self.firstLine = l\n return None\n return l", "def interpret_line(self, line, source=None, lineno=None):\n\n pline = self.parser.parse_line(line, source=source, lineno=lineno)\n return self.execute(pline)", "def mapper_data_cleaning(self, l, line):\n lineitems = line.split(\",\")\n yield (lineitems[0], lineitems[2])", "def parse_line(self, line):\n\t\tif line[0] == \"#\":\n\t\t\treturn False\n\t\tparts = [x.strip() for x in line.strip().split(\",\")]\n\t\tself.unix_time = int(parts[0])\n\t\tself.cycles_done = int(parts[1])\n\t\tself.cur_path = int(parts[2])\n\t\tself.paths_total = int(parts[3])\n\t\tself.pending_total = int(parts[4])\n\t\tself.pending_favs = int(parts[5])\n\t\tself.map_size = float(parts[6].replace(\"%\",\"\"))\n\t\tself.unique_crashes = int(parts[7])\n\t\tself.unique_hangs = int(parts[8])\n\t\tself.max_depth = int(parts[9])\n\t\tself.execs_per_sec = float(parts[10])\n\t\treturn True", "def parse_row(row, download_to=\"../data/raw/\"):\n\n row_template = utils.get_row_template(row)\n\n url = row_template[\"link\"]\n link_format = url.split(\".\")[-1].lower()\n file_name = url.split(\"/\")[-1]\n local_file_path = download_to + file_name\n\n if not os.path.exists(local_file_path):\n utils.download_file(url, local_file_path)\n\n if link_format == \"csv\":\n table_df = csv_cleaner.try_to_parse_csv(local_file_path)\n elif link_format == \"pdf\":\n table_df = pdf_cleaner.try_parse_pdf(local_file_path)\n elif link_format == \"odt\":\n table_df = odt_cleaner.try_parse_odt(local_file_path)\n elif link_format == \"doc\":\n table_df = doc_cleaner.try_parse_doc(local_file_path)\n elif link_format == \"xlsx\":\n table_df = xlxs_cleaner.try_parse_xlsx(local_file_path)\n elif link_format == \"ods\":\n table_df = ods_cleaner.try_parse_ods(local_file_path)\n else:\n raise Exception(\"Not sure how to parse {}...\".format(local_file_path))\n\n if table_df is None:\n return None\n\n table_df[\"department\"] = row_template[\"department\"]\n table_df[\"period\"] = row_template[\"period\"]\n table_df[\"link\"] = row_template[\"link\"]\n\n return table_df", "def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def line_to_row(line):\n m = line_re.match(line)\n if m:\n # TODO \n return m.group(1), m.group(2)\n else:\n return None", "def parseChain(self, line):\n\n\t\tcols = string.split(line, \",\")\n\n\t\tfor col in cols:\n\t\t\tself.chain.append(col)", "def parse_line(line):\n log_line = LogLine(line)\n dt = datetime.datetime.strptime(log_line.line[0], \"%Y-%m-%d %H:%M:%S\")\n # make a tuple with dt and the rest (splatted)\n return (dt, *log_line.line[1:])", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def from_line(self, line: str):\n raise NotImplementedError()", "def parse(stream, idx):\n\n # Skip comment lines\n stream = filter(lambda x: not x.startswith('#'), stream)\n\n # Ignore empty lines.\n stream = filter(lambda x: x.strip(), stream)\n\n # Format the stream.\n stream = csv.reader(stream, delimiter=delimiter)\n\n # Generate empty values on missing columns.\n for row in stream:\n try:\n yield (row[idx], None)\n except IndexError as exc:\n yield ('', None)", "def csvread(infile):\n out_csv = []\n errors = []\n index = -1\n p = LineParser(csv=True)\n for line in infile:\n index += 1\n try:\n values = p.feed(line)\n except ListQuoteError as e:\n values = []\n e.line = line\n e.index = index\n errors.append(e)\n #\n out_csv.append(values)\n #\n if errors:\n e = CSVError(\"Parsing CSV failed. See 'errors' attribute.\")\n e.csv = out_csv\n e.errors = errors\n raise e\n return out_csv", "def parse_line(self, line: str) -> Flight:\n split_line = line.rstrip(\"\\n\").split(\",\")\n departure = datetime.datetime.strptime(split_line[2], self.time_format)\n arrival = datetime.datetime.strptime(split_line[3], self.time_format)\n new_flight = Flight(split_line[0], split_line[1], departure, arrival, split_line[4])\n return new_flight", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def parse_vcf_line(line):\n\n if isinstance(line, str):\n if line.startswith('#'):\n return None\n\n elem = line.strip().split('\\t')\n elif isinstance(line, list):\n elem = line\n\n try:\n quality = int(elem[5])\n except ValueError:\n try:\n quality = float(elem[5])\n except ValueError:\n quality = None\n\n filter_field = None\n\n if elem[6] != '.':\n filter_field = elem[6].split(';')\n\n info = elem[7]\n\n try:\n fmt = elem[8]\n except IndexError:\n fmt = None\n else:\n if fmt == '.':\n fmt = None\n\n return VCFRecord(elem[0], int(elem[1]), None if elem[2] == '.' else elem[2], elem[3],\n elem[4].split(','), quality, filter_field, info, fmt, elem[9:])", "def test_multiple_lines():\n\n # Multi-line file\n test_file = StringIO(\n u'fri,wed\\n1,1\\n2,2'\n )\n\n csv_parser = CSVParser(test_file)\n\n expected = [\n {'day': 'wed', 'description': 'N/A 1', 'square': 1, 'value': 1},\n {'day': 'fri', 'description': 'N/A 2', 'double': 2, 'value': 1},\n ]\n\n assert csv_parser.parse() == expected", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def run(self, row, **kwargs):\n self.source = row\n kwargs['output'] = self.__graph__()\n super(CSVRowProcessor, self).run(**kwargs)\n return kwargs['output']", "def parse_csv_row(self, row):\n\n for key in self.field_map:\n if self.field_map[key] is not None:\n if key == 'marking':\n self.obstacle_data[key] = self.get_marking_value(row[self.field_map[key]].strip())\n elif key == 'lighting':\n self.obstacle_data[key] = self.get_lighting_value(row[self.field_map[key]].strip())\n elif key == 'obst_type':\n self.obstacle_data['obst_type_id'] = self.get_obstacle_type_id(row[self.field_map[key]].strip())\n else:\n self.obstacle_data[key] = row[self.field_map[key]].strip()", "def GetLine(line):\r\n pass", "def _process_line(self, line):\n i = 0\n while True:\n i = self._process_token(line)\n if i == 0:\n break\n line = line[i:]\n if line:\n util.debug('Lexer state:\\n{}'.format(self._debug_lexer_state()))\n raise LexerError('Syntax error (remaining:%r)' % (line,),\n self._cur_lineno + 1,\n self._cur_charno + 1)", "def dp_rows(self, csv_reader, ATA_line):\n rows = []\n # Compile a regular expression pattern matching class ID's\n class_id_re = re.compile(\"[A-Z]+&* [0-9]+\")\n for row in csv_reader:\n # Exit when we encounter the next ATA row after first\n if row[0].startswith(\"ATA\"):\n return (rows, row)\n elif class_id_re.fullmatch(row[0].strip()):\n rows.append(row)\n elif row[0].startswith(\"Generic\"):\n rows.append(row)\n # This exit point occurs when we run out of rows to read\n return (rows, None)", "def _parse_record(line):\n fields = line.split()\n if len(fields) < 6:\n raise ParseError(\"Less than six fields found in PED/FAM file\")\n individual_id, paternal_id, maternal_id = fields[1:4]\n if paternal_id == \"0\":\n paternal_id = None\n if maternal_id == \"0\":\n maternal_id = None\n return Trio(child=individual_id, father=paternal_id, mother=maternal_id)", "def parse (self, line):\n result = self.program.parseString (line)\n return TranQL_AST (result.asList (), self.backplane)", "def __next__(\n self, make_invalid_measurement_missing: bool = False\n ) -> Measurement: # pragma: no mutate\n try:\n line = self._fpointer.readline()\n parts = next(csv.reader([line]))\n if not parts:\n raise StopIteration\n return self._line_to_measurement(parts, make_invalid_measurement_missing)\n except StopIteration:\n raise StopIteration", "def make_generators_from_csv(csv_file):\n with open(csv_file, mode='r', encoding='utf-8',\n errors='ignore') as source_data:\n source_data.readline() # skip header\n\n for line in source_data.readlines():\n try:\n line = line.rstrip().split(',')\n if len(line) == 8:\n yield line\n else:\n logger.error(\n f\"Import failure on line split.\"\n \" Expected 8 columns, but got {len(line)}. {line}\"\n )\n except IndexError as e:\n logger.error(f\"Could not import customer data for {line}: {e}\")", "def _translate_line_to_handle(self, line):", "def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"", "def process_line(self, line, data):\n return data", "def decodeline(self, line):\n result = ApacheLogLine()\n result.full_line = line\n linepatternmatch = self._linepattern.match(line)\n if linepatternmatch:\n result.hostname = linepatternmatch.group(1)\n result.user = linepatternmatch.group(2)\n if result.user == '-':\n result.user = ''\n (result.accesstime_seconds, result.serveroffset) = self.parsedate(linepatternmatch.group(3))\n result.accesstime_string = stringdate(result.accesstime_seconds, offset=result.serveroffset)\n result.file = linepatternmatch.group(4)\n result.code = linepatternmatch.group(5)\n result.code_description = self._codetranslator.get_description(result.code)\n result.size = linepatternmatch.group(6)\n if result.size == '-':\n result.size = 0\n result.referer = linepatternmatch.group(7)\n if result.referer == '-':\n result.referer = ''\n result.browser = linepatternmatch.group(8)\n else:\n self._notparsable += 1\n warn(\"The line '%s' could not be parsed\" % line)\n return None\n if self._line_fits_pattern(result):\n self._acceptedlines += 1\n return result\n else:\n self._rejectedlines += 1\n return None", "def parse_line(self, line: str) -> None:\n self._count += 1", "def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")", "def _assemble(self, line):\n self.counter += 1\n if line.strip() == \"\":\n return self.__next__()\n elif line.startswith(\"browser\"):\n return self.__next__()\n elif line.startswith(\"track\"):\n # reset metadata\n self._parse_track_line(line[5:])\n return self.__next__()\n elif line.startswith(\"#\"):\n return self.__next__()\n else:\n try:\n return self.return_type.from_bed(line, extra_columns=self.extra_columns)\n except:\n self.rejected.append(line)\n msg = \"Cannot parse BED line number %s. \" % self.counter\n if self.metadata.get(\"type\", None) is not None:\n msg += (\n \"Are you sure this is a %s BED file with extra columns (%s)?\" %\n (self.metadata.get(\"type\"), self._get_extra_column_names())\n )\n elif self.extra_columns != 0:\n msg += (\n \"Are you sure this BED file has extra columns (%s)?\" %\n self._get_extra_column_names()\n )\n else:\n msg += \"Maybe this BED has extra columns (i.e. is an extended BED file)?\"\n\n msg += (\"\\n %s\" % line)\n warn(msg, FileFormatWarning)\n return self.__next__()", "def parse_line(self, line):\n tokens, permanent, comment = OrdersParser.tokenize(line)\n \n if not tokens:\n if comment:\n self._consumer.comment(permanent=permanent, comment=comment)\n return\n \n order = tokens.popleft().lower()\n \n if order == '#atlantis':\n if not tokens:\n raise SyntaxError('{}: missing faction'.format(line))\n faction = OrdersParser._value(tokens.popleft())\n try:\n password = tokens.popleft()\n except IndexError:\n self._consumer.atlantis(faction=faction)\n else:\n self._consumer.atlantis(faction=faction,\n password=password)\n \n elif order == '#end':\n self._consumer.atlantis_end()\n \n elif order == 'unit':\n if not tokens:\n raise SyntaxError('{}: missing unit'.format(line))\n else:\n unit = OrdersParser._value(tokens.popleft())\n if not unit:\n raise SyntaxError('{}: invalid unit'.format(line))\n self._consumer.unit(unit=unit)\n \n elif order == 'form':\n if not tokens:\n raise SyntaxError('{}: missing alias'.format(line))\n else:\n alias = OrdersParser._value(tokens.popleft())\n if not alias:\n raise SyntaxError('{}: invalid alias'.format(line))\n self._consumer.order_form(alias=alias, permanent=permanent,\n comment=comment)\n elif order == 'end':\n self._consumer.order_end()\n \n elif order == 'turn':\n self._consumer.order_turn(permanent=permanent, comment=comment)\n \n elif order == 'endturn':\n self._consumer.order_endturn()\n \n elif order == 'address':\n if not tokens:\n raise SyntaxError('{}: missing address'.format(line))\n else:\n self._consumer.order_address(address=tokens.popleft(),\n permanent=permanent,\n comment=comment)\n \n elif order == 'advance':\n try:\n dirs = [OrdersParser._parse_dir(d.lower()) for d in tokens]\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_advance(dirs=dirs, permanent=permanent,\n comment=comment)\n \n elif order == 'assassinate':\n try:\n unit = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_assassinate(unit=unit,\n permanent=permanent,\n comment=comment)\n \n elif order == 'attack':\n targets = []\n try:\n while tokens:\n targets.append(OrdersParser._parse_unit(tokens))\n except SyntaxError as e:\n self._consumer.order_attack(targets=targets,\n permanent=permanent,\n comment=comment)\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_attack(targets=targets,\n permanent=permanent,\n comment=comment)\n \n elif order == 'autotax':\n if not tokens:\n raise SyntaxError('{}: missing value'.format(line))\n try:\n self._consumer.order_autotax(\n flag=OrdersParser._parse_TF(tokens.popleft()),\n permanent=permanent, comment=comment)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n \n elif order == 'avoid':\n if not tokens:\n raise SyntaxError('{}: missing value'.format(line))\n try:\n self._consumer.order_avoid(\n flag=OrdersParser._parse_TF(tokens.popleft()),\n permanent=permanent, comment=comment)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n \n elif order == 'idle':\n self._consumer.order_idle(permanent=permanent, comment=comment)\n \n elif order == 'behind':\n if not tokens:\n raise SyntaxError('{}: missing value'.format(line))\n try:\n self._consumer.order_behind(\n flag=OrdersParser._parse_TF(tokens.popleft()),\n permanent=permanent, comment=comment)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n \n elif order == 'build':\n if not tokens:\n self._consumer.order_build(permanent=permanent, comment=comment)\n else:\n tok = tokens.popleft().lower()\n if tok == 'help':\n try:\n target = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_build(target=target,\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_build(structure=tok,\n permanent=permanent,\n comment=comment)\n \n elif order == 'buy':\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n num = tokens.popleft().lower()\n if num == 'all':\n num = OrdersParser.AMT_ALL\n else:\n num = OrdersParser._value(num)\n if not num:\n raise SyntaxError('{}: missing amount'.format(line))\n if not tokens:\n raise SyntaxError('{}: missing item'.format(line))\n self._consumer.order_buy(num=num, item=tokens.popleft().lower(),\n permanent=permanent, comment=comment)\n \n elif order == 'cast':\n if not tokens:\n raise SyntaxError('{}: missing skill'.format(line))\n skill = tokens.popleft().lower()\n params = [p.lower() for p in tokens]\n self._consumer.order_cast(skill=skill, params=params,\n permanent=permanent, comment=comment)\n \n elif order == 'claim':\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n value = OrdersParser._value(tokens.popleft())\n if not value:\n raise SyntaxError('{}: missing amount'.format(line))\n self._consumer.order_claim(num=value, permanent=permanent,\n comment=comment)\n \n elif order == 'combat':\n if not tokens:\n combat = OrdersParser.IT_NONE\n else:\n combat = tokens.popleft().lower()\n self._consumer.order_combat(skill=combat, permanent=permanent,\n comment=comment)\n \n elif order == 'consume':\n if not tokens:\n consuming = 'none'\n else:\n consuming = tokens.popleft().lower()\n if consuming not in ('unit', 'faction', 'none'):\n raise SyntaxError('{}: invalid value'.format(line))\n self._consumer.order_consume(consuming=consuming,\n permanent=permanent, comment=comment)\n \n elif order == 'declare':\n if not tokens:\n raise SyntaxError('{}: missing faction'.format(line))\n fac = tokens.popleft().lower()\n if fac != 'default':\n fac = OrdersParser._value(fac)\n if not fac:\n raise SyntaxError('{}: missing faction'.format(line))\n if not tokens:\n self._consumer.order_declare(faction=fac, permanent=permanent,\n comment=comment)\n else:\n attitude = tokens.popleft().lower()\n if attitude in ('hostile', 'unfriendly', 'neutral',\n 'friendly', 'ally'):\n self._consumer.order_declare(faction=fac,\n attitude=attitude,\n permanent=permanent,\n comment=comment)\n else:\n raise SyntaxError('{}: invalid attitude'.format(line))\n \n elif order == 'describe':\n if tokens:\n target = tokens.popleft().lower()\n else:\n raise SyntaxError('{}: missing target'.format(line))\n if tokens:\n description = tokens.popleft()\n else:\n description = None\n if target == 'unit':\n self._consumer.order_describe(unit=description,\n permanent=permanent,\n comment=comment)\n elif target in ('ship', 'building', 'object', 'structure'):\n self._consumer.order_describe(structure=description,\n permanent=permanent,\n comment=comment)\n else:\n raise SyntaxError('{}: invalid target'.format(line))\n \n elif order == 'destroy':\n self._consumer.order_destroy(permanent=permanent, comment=comment)\n \n elif order == 'enter':\n if tokens:\n structure = OrdersParser._value(tokens.popleft())\n if dir:\n self._consumer.order_enter(structure=structure,\n permanent=permanent,\n comment=comment)\n else:\n raise SyntaxError('{}: invalid structure'.format(line))\n else:\n raise SyntaxError('{}: missing structure'.format(line))\n \n elif order == 'entertain':\n self._consumer.order_entertain(permanent=permanent, comment=comment)\n \n elif order == 'evict':\n targets = []\n try:\n while tokens:\n targets.append(OrdersParser._parse_unit(tokens))\n except SyntaxError as e:\n self._consumer.order_evict(targets=targets,\n permanent=permanent,\n comment=comment)\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_evict(targets=targets,\n permanent=permanent,\n comment=comment)\n \n elif order == 'exchange':\n try:\n target = OrdersParser._parse_unit(tokens, allow_any=True)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if not tokens:\n raise SyntaxError('{}: missing given amount'.format(line))\n amtGive = OrdersParser._value(tokens.popleft())\n if not tokens:\n raise SyntaxError('{}: missing given item'.format(line))\n itemGive = tokens.popleft().lower()\n if not tokens:\n raise SyntaxError('{}: missing expected amount'.format(line))\n amtExpected = OrdersParser._value(tokens.popleft())\n if not tokens:\n raise SyntaxError('{}: missing expected item'.format(line))\n itemExpected = tokens.popleft().lower()\n self._consumer.order_exchange(\n target=target, give={'amt': amtGive, 'item': itemGive},\n expected={'amt': amtExpected, 'item': itemExpected},\n permanent=permanent, comment=comment)\n \n elif order == 'faction':\n if not tokens:\n raise SyntaxError('{}: missing faction type'.format(line))\n ftype = {}\n while tokens:\n t = tokens.popleft().lower()\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n ftype[t] = OrdersParser._value(tokens.popleft())\n self._consumer.order_faction(permanent=permanent, comment=comment,\n **ftype)\n \n elif order == 'find':\n if not tokens:\n raise SyntaxError('{}: missing faction'.format(line))\n fac = tokens.popleft().lower()\n if fac != 'all':\n fac = OrdersParser._value(fac)\n if not fac:\n raise SyntaxError('{}: invalid faction'.format(line))\n self._consumer.order_find(permanent=permanent, comment=comment,\n faction=fac)\n \n elif order == 'forget':\n if not tokens:\n raise SyntaxError('{}: missing skill'.format(line))\n self._consumer.order_forget(permanent=permanent, comment=comment,\n skill=tokens.popleft().lower())\n \n elif order == 'withdraw':\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n tok = tokens.popleft().lower()\n amt = OrdersParser._value(tok)\n if amt < 1:\n amt = 1\n item = tok\n elif tokens:\n item = tokens.popleft().lower()\n else:\n raise SyntaxError('{}: missing item'.format(line))\n self._consumer.order_withdraw(permanent=permanent, comment=comment,\n amt=amt, item=item)\n \n elif order == 'give':\n try:\n target = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n amt = tokens.popleft().lower()\n if amt == 'unit':\n self._consumer.order_give(permanent=permanent, comment=comment,\n target=target, give=amt)\n else:\n if amt != 'all':\n amt = OrdersParser._value(amt)\n if not amt:\n raise SyntaxError('{}: invalid amount'.format(line))\n try:\n item = tokens.popleft().lower()\n if item == 'unfinished':\n unfinished = True\n item = tokens.popleft().lower()\n else:\n unfinished = False\n except:\n raise SyntaxError('{}: missing item'.format(line))\n \n if tokens and tokens[0].lower() == 'except':\n tok = tokens.popleft().lower()\n if amt != 'all':\n raise SyntaxError(\n '{}: except only valid with all'. format(line))\n if not tokens:\n raise SyntaxError(\n '{}: missing except value'.format(line))\n excpt = OrdersParser._value(tokens.popleft())\n if not excpt:\n raise SyntaxError(\n '{}: invalid except value'.format(line))\n self._consumer.order_give(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished,\n 'excpt': excpt})\n else:\n self._consumer.order_give(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished})\n \n elif order == 'guard':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n guard = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n self._consumer.order_guard(flag=guard, permanent=permanent,\n comment=comment)\n \n elif order == 'hold':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n hold = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n self._consumer.order_hold(flag=hold, permanent=permanent,\n comment=comment)\n \n elif order == 'join':\n try:\n target = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if tokens:\n tok = tokens.popleft().lower()\n if tok == 'nooverload':\n self._consumer.order_join(target=target, nooverload=True,\n permanent=permanent,\n comment=comment)\n elif tok == 'merge':\n self._consumer.order_join(target=target, merge=True,\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_join(target=target,\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_join(target=target,\n permanent=permanent,\n comment=comment)\n \n elif order == 'leave':\n self._consumer.order_leave(permanent=permanent, comment=comment)\n \n elif order == 'move':\n try:\n dirs = [OrdersParser._parse_dir(d.lower()) for d in tokens]\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_move(dirs=dirs, permanent=permanent,\n comment=comment)\n \n elif order == 'name':\n if len(tokens) < 2:\n raise SyntaxError('{}: missing name'.format(line))\n what, name = tokens.popleft().lower(), tokens.popleft()\n if what == 'faction':\n self._consumer.order_name(permanent=permanent, comment=comment,\n faction=name)\n elif what == 'unit':\n self._consumer.order_name(permanent=permanent, comment=comment,\n unit=name)\n elif what in ('building', 'ship', 'object', 'structure'):\n self._consumer.order_name(permanent=permanent, comment=comment,\n structure=name)\n elif what in ('village', 'town', 'city') and \\\n OrdersParser._get_legal(name):\n self._consumer.order_name(permanent=permanent, comment=comment,\n city=OrdersParser._get_legal(name))\n else:\n raise SyntaxError('{}: invalid argument'.format(line))\n \n elif order == 'noaid':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n noaid = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n self._consumer.order_noaid(flag=noaid, permanent=permanent,\n comment=comment)\n \n elif order == 'nocross':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n nocross = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n self._consumer.order_nocross(flag=nocross, permanent=permanent,\n comment=comment)\n \n elif order == 'nospoils':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n spoils_none = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if spoils_none:\n self._consumer.order_spoils(spoils='none', permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_spoils(spoils='all', permanent=permanent,\n comment=comment)\n raise DeprecationWarning(\n '{}: deprecated. Use SPOILS instead'.format(line))\n \n elif order == 'option':\n if not tokens:\n raise SyntaxError('{}: missing option'.format(line))\n option = tokens.popleft().lower()\n if option == 'times':\n self._consumer.order_option(times=True, permanent=permanent,\n comment=comment)\n elif option == 'notimes':\n self._consumer.order_option(times=False, permanent=permanent,\n comment=comment)\n elif option == 'showattitudes':\n self._consumer.order_option(showunitattitudes=True,\n permanent=permanent,\n comment=comment)\n elif option == 'dontshowattitudes':\n self._consumer.order_option(showunitattitudes=False,\n permanent=permanent,\n comment=comment)\n elif option == 'template':\n if not tokens:\n raise SyntaxError('{}: missing template type'.format(line))\n temformat = tokens.popleft().lower()\n if temformat in ('off', 'short', 'long', 'map'):\n self._consumer.order_option(temformat=temformat,\n permanent=permanent,\n comment=comment)\n else:\n raise SyntaxError('{}: invalid template type'.format(line))\n else:\n raise SyntaxError('{}: invalid option'.format(line))\n \n elif order == 'password':\n if not tokens:\n self._consumer.order_password(password='none',\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_password(password=tokens.popleft(),\n permanent=permanent,\n comment=comment)\n \n elif order == 'pillage':\n self._consumer.order_pillage(permanent=permanent, comment=comment)\n \n elif order == 'prepare':\n if not tokens:\n self._consumer.order_prepare(item=None,\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_prepare(item=tokens.popleft().lower(),\n permanent=permanent,\n comment=comment)\n \n elif order == 'weapon':\n self._consumer.order_weapon(permanent=permanent, comment=comment,\n items=[w.lower() for w in tokens])\n \n elif order == 'armor':\n self._consumer.order_armor(permanent=permanent, comment=comment,\n items=[w.lower() for w in tokens])\n \n elif order == 'produce':\n if not tokens:\n raise SyntaxError('{}: missing item'.format(line))\n item = tokens.popleft().lower()\n if OrdersParser._value(item):\n if not tokens:\n raise SyntaxError('{}: missing item'.format(line))\n self._consumer.order_produce(target=OrdersParser._value(item),\n item=tokens.popleft().lower(),\n permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_produce(item=item,\n permanent=permanent,\n comment=comment)\n \n elif order == 'promote':\n try:\n unit = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_promote(unit=unit, permanent=permanent,\n comment=comment)\n \n elif order == 'quit':\n if not tokens:\n self._consumer.order_quit(permanent=permanent, comment=comment)\n else:\n self._consumer.order_quit(password=tokens.popleft(),\n permanent=permanent, comment=comment)\n \n elif order == 'restart':\n if not tokens:\n self._consumer.order_restart(permanent=permanent,\n comment=comment)\n else:\n self._consumer.order_restart(password=tokens.popleft(),\n permanent=permanent,\n comment=comment)\n \n elif order == 'reveal':\n if not tokens:\n self._consumer.order_reveal(reveal=None, permanent=permanent,\n comment=comment)\n else:\n tok = tokens.popleft().lower()\n if tok == 'none':\n self._consumer.order_reveal(reveal=None, comment=comment,\n permanent=permanent)\n elif tok in ('unit', 'faction'):\n self._consumer.order_reveal(reveal=tok, comment=comment,\n permanent=permanent)\n else:\n raise SyntaxError('{}: invalid value'.format(line))\n \n elif order == 'sail':\n try:\n dirs = [OrdersParser._parse_dir(d.lower(), allow_enter=False) \\\n for d in tokens]\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_sail(dirs=dirs, permanent=permanent,\n comment=comment)\n \n elif order == 'sell':\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n num = tokens.popleft().lower()\n if num == 'all':\n num = OrdersParser.AMT_ALL\n else:\n num = OrdersParser._value(num)\n if not num:\n raise SyntaxError('{}: missing amount'.format(line))\n if not tokens:\n raise SyntaxError('{}: missing item'.format(line))\n self._consumer.order_sell(num=num, item=tokens.popleft().lower(),\n permanent=permanent, comment=comment)\n \n elif order == 'share':\n if not tokens:\n raise SyntaxError('{}: invalid value'.format(line))\n try:\n share = OrdersParser._parse_TF(tokens.popleft())\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n self._consumer.order_share(flag=share, permanent=permanent,\n comment=comment)\n \n elif order == 'show':\n try:\n what, item = tokens.popleft().lower(), tokens.popleft().lower()\n except IndexError:\n raise SyntaxError('{}: missing target'.format(line))\n if what == 'skill':\n self._consumer.order_show(permanent=permanent, comment=comment,\n skill=item)\n elif what == 'item':\n self._consumer.order_show(permanent=permanent, comment=comment,\n item=item)\n elif what == 'object':\n self._consumer.order_show(permanent=permanent, comment=comment,\n structure=item)\n else:\n raise SyntaxError('{}: invalid target'.format(line))\n \n elif order == 'spoils':\n if not tokens:\n tok = 'all'\n else:\n tok = tokens.popleft().lower()\n if tok in ('none', 'walk', 'fly', 'swim', 'sail', 'all'):\n self._consumer.order_spoils(spoils=tok, permanent=permanent,\n comment=comment)\n else:\n raise SyntaxError('{}: invalid option'.format(line))\n \n elif order == 'steal':\n try:\n unit = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if not tokens:\n raise SyntaxError('{}: missing item'.format(line))\n else:\n item = tokens.popleft().lower()\n self._consumer.order_steal(target=unit, item=item,\n permanent=permanent, comment=comment)\n \n elif order == 'study':\n if not tokens:\n raise SyntaxError('{}: missing skill'.format(line))\n sk = tokens.popleft().lower()\n if tokens:\n self._consumer.order_study(\n skill=sk, level=OrdersParser._value(tokens.popleft()),\n permanent=permanent, comment=comment)\n else:\n self._consumer.order_study(skill=sk, permanent=permanent,\n comment=comment)\n \n elif order == 'take':\n if not tokens or tokens.popleft().lower() != 'from':\n raise SyntaxError('{}: missing from'.format(line))\n if not tokens:\n raise SyntaxError('{}: missing unit'.format(line))\n unit = OrdersParser._value(tokens.popleft())\n if not unit:\n raise SyntaxError('{}: invalid unit'.format(line))\n target = {'unitnum': unit}\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n amt = tokens.popleft().lower()\n if amt != 'all':\n amt = OrdersParser._value(amt)\n if not amt:\n raise SyntaxError('{}: invalid amount'.format(line))\n try:\n item = tokens.popleft().lower()\n if item == 'unfinished':\n unfinished = True\n item = tokens.popleft().lower()\n else:\n unfinished = False\n except:\n raise SyntaxError('{}: missing item'.format(line))\n \n if tokens and tokens[0].lower() == 'except':\n tok = tokens.popleft().lower()\n if amt != 'all':\n raise SyntaxError(\n '{}: except only valid with all'. format(line))\n if not tokens:\n raise SyntaxError(\n '{}: missing except value'.format(line))\n excpt = OrdersParser._value(tokens.popleft())\n if not excpt:\n raise SyntaxError(\n '{}: invalid except value'.format(line))\n self._consumer.order_takefrom(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished,\n 'excpt': excpt})\n else:\n self._consumer.order_takefrom(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished})\n \n elif order == 'tax':\n self._consumer.order_tax(permanent=permanent, comment=comment)\n \n elif order == 'teach':\n if not tokens:\n raise SyntaxError('{}: missing target'.format(line))\n targets = []\n try:\n while tokens:\n targets.append(OrdersParser._parse_unit(tokens))\n except SyntaxError as e:\n self._consumer.order_teach(targets=targets,\n permanent=permanent,\n comment=comment)\n raise SyntaxError('{}: {}'.format(line, e))\n else:\n self._consumer.order_teach(targets=targets,\n permanent=permanent,\n comment=comment)\n \n elif order == 'work':\n self._consumer.order_work(permanent=permanent, comment=comment)\n \n elif order == 'transport':\n try:\n target = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n amt = tokens.popleft().lower()\n if amt != 'all':\n amt = OrdersParser._value(amt)\n if not amt:\n raise SyntaxError('{}: invalid amount'.format(line))\n try:\n item = tokens.popleft().lower()\n if item == 'unfinished':\n unfinished = True\n item = tokens.popleft().lower()\n else:\n unfinished = False\n except:\n raise SyntaxError('{}: missing item'.format(line))\n\n if tokens and tokens[0].lower() == 'except':\n tok = tokens.popleft().lower()\n if amt != 'all':\n raise SyntaxError(\n '{}: except only valid with all'. format(line))\n if not tokens:\n raise SyntaxError(\n '{}: missing except value'.format(line))\n excpt = OrdersParser._value(tokens.popleft())\n if not excpt:\n raise SyntaxError(\n '{}: invalid except value'.format(line))\n self._consumer.order_transport(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished,\n 'excpt': excpt})\n else:\n self._consumer.order_transport(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished})\n \n elif order == 'distribute':\n try:\n target = OrdersParser._parse_unit(tokens)\n except SyntaxError as e:\n raise SyntaxError('{}: {}'.format(line, e))\n if not tokens:\n raise SyntaxError('{}: missing amount'.format(line))\n amt = tokens.popleft().lower()\n if amt != 'all':\n amt = OrdersParser._value(amt)\n if not amt:\n raise SyntaxError('{}: invalid amount'.format(line))\n try:\n item = tokens.popleft().lower()\n if item == 'unfinished':\n unfinished = True\n item = tokens.popleft().lower()\n else:\n unfinished = False\n except:\n raise SyntaxError('{}: missing item'.format(line))\n \n if tokens and tokens[0].lower() == 'except':\n tok = tokens.popleft().lower()\n if amt != 'all':\n raise SyntaxError(\n '{}: except only valid with all'. format(line))\n if not tokens:\n raise SyntaxError(\n '{}: missing except value'.format(line))\n excpt = OrdersParser._value(tokens.popleft())\n if not excpt:\n raise SyntaxError(\n '{}: invalid except value'.format(line))\n self._consumer.order_distribute(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished,\n 'excpt': excpt})\n else:\n self._consumer.order_distribute(permanent=permanent,\n comment=comment,\n target=target,\n give={'amt': amt, 'item': item,\n 'unfinished': unfinished})", "def parse(self, line):\n\t\n\t# remove trailing newline a-la Perl CHOMP\n\tline = line.rstrip(\"\\n\")\n\n\t\n # correctly formatted PDB files\n # TODO - assuming 80 chars means well formatted is\n # perhaps risky. Need a more robust way to asses\n # formatting validity\n\n if len(line) == 80:\n self.record_name = line[0:6].strip()\n self.atom_id = int(line[6:11].strip())\n self.atom_name = line[12:16].strip()\n self.alt_location = line[16]\n self.res_name = line[17:20].strip()\n self.chain = line[21]\n self.res_id = line[22:26].strip()\n self.res_ins_code = line[26]\n self.coord_X = float(line[30:38].strip())\n self.coord_Y = float(line[38:46].strip())\n self.coord_Z = float(line[46:54].strip())\n self.occupancy = float(line[54:60].strip())\n self.beta = float(line[60:66].strip())\n self.seg_ID = line[72:76].strip()\n self.element = line[76:78].strip()\n\t if line[78:80].strip() == \"\":\n\t\t self.charge=0.0\n\t else:\n\t\t self.charge = float(line[78:80].strip())\n self.chain_local_id = -1\n self.formatted_ok = True\n\n # Heuristic section - split by space and then use\n # errors in casting as flags for things being issues\n # Note this may need to be expanded as malformed edge-cases\n # are identified...\n else:\n rawsplitline = filter(None, line.split(\" \"))\n\t \n\t \n\n splitline = []\n for i in rawsplitline:\n if i == \"\\n\" or i == \"\\t\":\n pass\n else:\n splitline.append(i)\n \n num_cols = len(splitline)\n\n\t print num_cols\n \n try:\n if num_cols == 10:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \"\"\n self.res_name = splitline[3] \n self.chain = \"\"\n self.res_id = int(splitline[4])\n self.res_ins_code = \"\"\n self.coord_X = float(splitline[5]) \n self.coord_Y = float(splitline[6]) \n self.coord_Z = float(splitline[7])\n self.occupancy = float(splitline[8])\n self.beta = float(splitline[9])\n self.seg_ID = \" \"\n self.element = \" \" \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n\n elif num_cols == 11:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \" \"\n self.res_name = splitline[3] \n self.chain = splitline[4]\n self.res_id = int(splitline[5])\n self.res_ins_code = \" \"\n self.coord_X = float(splitline[6]) \n self.coord_Y = float(splitline[7]) \n self.coord_Z = float(splitline[8]) \n self.occupancy = float(splitline[9]) \n self.beta = float(splitline[10])\n self.seg_ID = \" \"\n self.element = \" \" \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n\n elif num_cols == 12:\n self.record_name = splitline[0] \n self.atom_id = int(splitline[1])\n self.atom_name = splitline[2] \n self.alt_location = \" \"\n self.res_name = splitline[3] \n self.chain = splitline[4]\n self.res_id = int(splitline[5])\n self.res_ins_code = \" \"\n self.coord_X = float(splitline[6]) \n self.coord_Y = float(splitline[7]) \n self.coord_Z = float(splitline[8]) \n self.occupancy = float(splitline[9]) \n self.beta = float(splitline[10])\n self.seg_ID = \" \"\n self.element = splitline[11] \n self.charge = \" \"\n self.chain_local_id = -1\n self.formatted_ok = False\n else:\n raise PDB_atomException(\"Did not match number of columns\")\n except ValueError,e:\n print \"Error with columns (using \" + str(num_cols) + \") columns\"\n print \"Tried to cast string to int/float\"\n raise e", "def csv_parser(s):\n\n # Data is our output. It will be a list of lists.\n\n # Split csv into lines and store them in a list called 'lines'.\n \n # Remove the first element from lines, so that you have only the data lines left.\n \n # At this stage, we loop through the list called lines.\n # As you loop\n # i. split each line on the commas;\n # ii. convert the Subject variable to int.\n # iii. convert the Height variable to float.\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values ", "def from_csv(self, user, row):\n if len(row) != 4:\n raise BadRequest(_(\"Invalid line\"))\n self.name = row[1].strip()\n try:\n self.target = RelayDomain.objects.get(name=row[2].strip())\n except RelayDomain.DoesNotExist:\n raise NotFound(_(\"Relay domain %s does not exist\" % row[2].strip()))\n self.enabled = (row[3].strip() == 'True')\n self.save(creator=user)", "def parseAtom(self, line):\n\n\t\tcols = string.split(line, \",\")\n\n\t\tfor col in cols:\n\t\t\tself.atom.append(col)", "def parse_line(line):\n vals = []\n pos = comma = openq = closeq = 0\n while True:\n comma = line.find(',', pos)\n openq = line.find('\"', pos)\n if comma < 1:\n vals.append(line[pos:])\n break\n elif openq == -1 or comma < openq:\n vals.append(line[pos:comma])\n pos = comma + 1\n continue\n else:\n closeq = line.find('\"', openq + 1)\n vals.append(line[openq:closeq + 1])\n pos = closeq + 2\n continue\n return vals", "def _next_record(self, next_line):\n record = self.loader.parse_record_stream(self.reader,\n next_line,\n self.known_format)\n\n self.member_info = None\n\n # Track known format for faster parsing of other records\n self.known_format = record.format\n\n return record", "def processLine(self,line,logger=None):\n line=line.rstrip()\n # see where we are in the execution\n if self.stage == 'initialise':\n if AthenaLogChecker._startOfExecuteRE.match(line):\n if logger: logger.info(\"Athena execute()...\")\n self.stage = 'execute'\n return None\n elif self.stage == 'execute':\n if AthenaLogChecker._startOfFinaliseRE.match(line):\n if logger: logger.info(\"Athena finalise()...\")\n self.stage = 'finalise'\n self.event = None\n return None\n match = AthenaLogChecker._eventNumberRE.match(line)\n if match:\n self.event = match.group('event')\n if logger: logger.debug( \"Athena event %s\" , self.event )\n return None\n if AthenaLogChecker._noMoreEventNumberRE.match(line):\n oldEvent = self.event\n self.event = None\n if logger and oldEvent is not None:\n logger.debug( \"No more event numbers available\" )\n return None\n # match ignore patterns\n ignore = AtlasErrorCodes.matchIgnorePattern(line,self.release)\n if ignore:\n if ignore.re.pattern == r'.*?\\s+?INFO .+':\n return None\n self.ignoreCount += 1\n if logger:\n logger.debug(\"ignoring error in line: \\\"%s\\\"\", line)\n logger.debug(\" because it matched: \\\"%s\\\"\", ignore.re.pattern)\n return None\n # then match known error patterns\n match, err = AtlasErrorCodes.matchErrorPattern(line,self.release)\n if err:\n self.processError(err)\n if logger:\n logger.debug(\"matched error category %s in line: %s\", err.category.acronym, line)\n logger.debug(\" because it matched: \\\"%s\\\"\", match.re.pattern)\n return err\n # finally, perform generic error match\n err = self.extractError(line)\n if err:\n self.processError(err)\n if logger:\n logger.verbose(\"non-matched error in line: %s\", line)\n return err\n return None", "def import_csv_gen(csv_filename):\n with open(csv_filename, 'r') as csv_fd:\n line_num = 0\n line = 'foo'\n while line:\n line_num += 1\n try:\n line = csv_fd.readline()\n # generator 'yield' statement for each\n # line of the CSV file below. Python CSV\n # support does not allow per-line parsing\n yield line.rstrip('\\n').split(',')\n except EOFError:\n return", "def split_line(line):\n try:\n strings = line.split(',', 1)\n return str(strings[1])\n except:\n pass", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")", "def parseType(self, line):\n\n\t\tcols = string.split(line, \",\")\n\n\t\tfor col in cols:\n\t\t\tself.type.append(col)", "def test_line_class_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n # line:\n #\n # brush type\n (\n struct.pack(lines.BrushType.fmt, lines.BrushType.REVERSE['pen']),\n lines.BrushType.REVERSE['pen'],\n ),\n # colour\n (\n struct.pack(lines.Colour.fmt, lines.Colour.REVERSE['black']),\n lines.Colour.REVERSE['black']\n ),\n # magical unknown line attribute 1\n (\n struct.pack(lines.LineAttribute1.fmt, 0),\n 0\n ),\n # base brush size\n (\n struct.pack(\n lines.BrushBaseSize.fmt, lines.BrushBaseSize.REVERSE['small']\n ),\n lines.BrushBaseSize.REVERSE['small']\n ),\n # one point:\n (struct.pack(lines.Points.fmt, 1), 1),\n # the single point's data:\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Line.load(position)\n assert result.brush_type.name == 'pen'\n assert result.colour.name == 'black'\n assert result.line_attribute1.value == 0\n assert result.brush_base_size.name == 'small'\n assert result.points.count == 1\n result = result.points.points[0]\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216", "def parse_log_line(line: str) -> ('value', ):\n return tuple(SEP_RE.split(line))", "def _load_crontab_line(self, rownum, crontab_line, job_func_func=std_launch_func, stdin=None):\n pieces = crontab_line.split()\n\n if pieces[0] in ALIASES.keys():\n try:\n # CASE 1 - pattern using alias\n job = self.cron(pieces[0], job_func_func(pieces[1:]))\n return job\n except ValueError as e:\n # shouldn't happen\n logger.error((\"Error at line %d, cannot parse pattern, the line will be ignored.\\r\\n\" +\n \"Inner Exception: %s\") % (rownum, str(e)))\n return None\n if len(pieces) < 6:\n logger.error(\"Error at line %d, expected at least 6 tokens\" % rownum)\n return None\n if len(pieces) >= 7:\n try:\n # CASE 2 - pattern including year\n job = self.cron(\" \".join(pieces[0:6]), job_func_func(pieces[6:]))\n return job\n except ValueError:\n pass\n try:\n # CASE 3 - pattern not including year\n job = self.cron(\" \".join(pieces[0:5]), job_func_func(pieces[5:]))\n return job\n except ValueError as e:\n logger.error((\"Error at line %d, cannot parse pattern, the line will be ignored.\\r\\n\" +\n \"Inner Exception: %s\") % (rownum, str(e)))\n return None", "def next_line(self, context, line):", "def do_Promo_line_parse (Promo_line, line_number, filehash) :\n result = [filehash,\n line_number,\n Promo_line[0:8].strip(),\n Promo_line[9:13].strip(),\n Promo_line[14:19].strip(),\n Promo_line[20:26].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[27:30].strip().lstrip('0'), # Spec indicates numerical field\n Promo_line[31:40].strip(),\n Promo_line[41:49].strip(),\n Promo_line[50:].strip()\n ]\n return result\n # Having the line number passed in is ugly, but kind of works :/\n # Having all the field extraction explicit is kind of ugly too...\n # We're using the hash here to link? Yeah, that's because Python\n # doesn't know what the autonumbered load table is up to in the\n # DB when it starts to coalesce the raw files together.", "def main():\r\n\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the file\r\n csv_reader = csv.reader(csvfile1)\r\n #jummp the first line\r\n next(csv_reader)\r\n #loop through the file\r\n for line in csv_reader:\r\n print(line)", "def from_csv_entry(entry: dict) -> \"Lineage\":\n return Lineage(\n species=entry.get(\"species\", \"\"),\n lineage=entry.get(\"lineage\", \"\"),\n sublineage=entry.get(\"sublineage\", \"\"),\n name=entry.get(\"id\", \"\"),\n )", "def line_to_data( line ):\n data = [ ]\n if '>>>' in line:\n print(line)\n return data\n\n secs = filter( None, line.split(',') )\n for i, x in enumerate( secs ):\n try:\n data.append( float(x.strip()) )\n except Exception as e:\n data = None\n # print( data )\n return data", "def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a history entry\n if not line[108] == '(':\n raise ParsingException\n if not line[138:139] == ')':\n raise ParsingException\n\n self.status = line[109:122].strip()\n self.time_stamp = datetime.strptime(line[122:138], '%m/%d/%Y %H:%M')", "def from_csv_row(\n cls,\n irow: typing.Iterator[str],\n aux_names: list[str],\n line_number: typing.Optional[int] = None,\n offset: typing.Optional[int] = None,\n ) -> \"ConversionRuleSpec\":\n\n n_aux = len(aux_names)\n row = list(irow)\n\n auxiliary_categories = {}\n factors_a = cls._parse_formula(row[0])\n for i in range(n_aux):\n aux_codes = cls._parse_aux_codes(row[i + 1])\n auxiliary_categories[aux_names[i]] = set(aux_codes)\n factors_b = cls._parse_formula(row[n_aux + 1])\n\n try:\n comment = row[n_aux + 2]\n except IndexError:\n comment = \"\"\n\n return cls(\n factors_categories_a=factors_a,\n factors_categories_b=factors_b,\n auxiliary_categories=auxiliary_categories,\n comment=comment,\n csv_line_number=line_number,\n csv_original_text=\",\".join(row),\n )", "def process(self, record):\n is_data = True\n if self.file_path.split('.')[-1] == 'csv':\n if self.header_skip:\n logging.info('Skipping header data... {}'.format(record))\n self.header_skip = False\n is_data = False\n return [(record, None, None, is_data)]\n record_attributes = list(csv.reader([record]))[0]\n if len(record_attributes) != len(self.schema[FIELDS_KEY]):\n if len(record_attributes) > 1 or not record_attributes[0].strip().isdigit():\n IS_VALID_FILE = False\n is_data = None\n return [(record, None, None, is_data)]\n for record_attribute, attribute_schema in zip(\n record_attributes, self.schema[FIELDS_KEY]):\n is_valid_datatype_check = self.__datatype_check(record_attribute, attribute_schema)\n is_valid_null_check = self.__null_check(record_attribute, attribute_schema)\n return [(record, is_valid_datatype_check, is_valid_null_check, is_data)]", "def parse_line(self, line, time_shift=0.0):\n raise NotImplementedError(\"must be defined by subclass\")", "def _parse_contents(self, contents):\n pattern = r'(,)(?=(?:[^\"]|\"[^\"]*\")*$)'\n # Convert string object from CSV to DSV format. This prevents\n # certain columns (e.g. `TaxationAddress`), from being split.\n contents = re.sub(pattern, ';', contents)\n # Split each row via newline escape sequence.\n contents = contents.split('\\n')\n # Remove double quotes from string objects and split the\n # contents by semicolon.\n contents = [content.strip().replace('\\\"', '').split(';')\n for content in contents]\n headers = contents[0]\n return (headers, contents)", "def parse_record(self, record):\n raise NotImplementedError()", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def _parse_ignore(line):\n # mimic multiple lines w/ list\n parser = csv.reader([line])\n return tuple(item.strip() for row in parser for item in row)", "def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]", "def __parse_move_line(self, line):\n parts = re.sub('\\(.*?\\)', '', line).split()\n x, y = None, None\n for part in parts[:0:-1]:\n axis = part.upper()[0]\n value = float(part[1:])\n if axis in ['Z', 'F']:\n parts.remove(part)\n elif axis == 'X':\n x = value\n parts.remove(part)\n elif axis == 'Y':\n y = value\n parts.remove(part)\n if x is None or y is None:\n return None\n template = parts[0] + ' X{:.6f} Y{:.6f} ' + ' '.join(parts[1:])\n return [template, x, y]", "def csv_to_trophy(csv_stream, filename='output', outpath='', outline=False, logopoints=False, validpoints=(0,1,2,3,4,5,6,7,8,9),debugstring='', simulate=False, do_corners=False):\n has_content = False\n if logopoints:\n print(\"LP is now depreciated! plz dont use\")\n current_drawing = dxf.drawing()\n current_drawing.header[\"$ACADVER\"] = \"DUMMY VALUE\"\n if do_corners:\n draw_4_corners(current_drawing)\n\n if outline:\n generate_template_trophy(h1, h2, w, current_drawing)\n\n for iteration, valid in enumerate(validpoints):\n try:\n line = next(csv_stream)\n except StopIteration:\n if has_content and not simulate:\n save_file(current_drawing, filename, outpath)\n raise\n\n\n # get next item from stream\n if len(line) != 2: # invalid line\n print(\"Line %d is invalid [%s]\" % (iteration + 1, line))\n if line[0].startswith('##') or line[1].startswith('##'): # comment line\n continue\n else:\n print(line)\n name, year = line\n if len(name) == 0 and len(year) == 0: # empty line,created by EXCLE WHEN YOU PRESS ARROW DOWN COMMAND\n continue\n if len(name) == 0 or len(year) == 0:\n print(\"Length of line %d, is invalid (one entry is 0 length)\" % (iteration + 1))\n continue\n has_content = True\n if valid <= 3:\n add_school_trophy(REF_POINTS[valid], current_drawing, name, year, 'down')\n elif valid <= 7:\n add_school_trophy(REF_POINTS[valid], current_drawing, name, year, 'up')\n elif valid == 8:\n add_school_trophy(REF_POINTS[valid], current_drawing, name, year, 'right')\n elif valid == 9:\n add_school_trophy(REF_POINTS[valid], current_drawing, name, year, 'left')\n if not simulate:\n save_file(current_drawing, filename, outpath)" ]
[ "0.6595832", "0.6529445", "0.62704617", "0.61401874", "0.61335003", "0.61316746", "0.61252147", "0.61061907", "0.5982218", "0.5961737", "0.5809438", "0.5809438", "0.5809438", "0.5809438", "0.5806658", "0.5806658", "0.5729117", "0.5704075", "0.5667828", "0.56519485", "0.5627262", "0.5607163", "0.55858445", "0.5569104", "0.55405027", "0.552808", "0.5510118", "0.5474966", "0.5470403", "0.54466015", "0.54352725", "0.5412309", "0.53933454", "0.5387015", "0.5384669", "0.5377176", "0.5370356", "0.53684646", "0.5365959", "0.5360174", "0.53505", "0.5344733", "0.53426194", "0.5342244", "0.53414285", "0.5329676", "0.53278595", "0.52921903", "0.5286978", "0.528332", "0.5274603", "0.5271105", "0.5268118", "0.5267756", "0.52561855", "0.5254896", "0.5252997", "0.5250147", "0.52459663", "0.52423763", "0.5224957", "0.52086097", "0.52041066", "0.51833004", "0.51673937", "0.51504743", "0.5150248", "0.5146722", "0.5137327", "0.513205", "0.5100271", "0.5083593", "0.50800645", "0.5074651", "0.50697905", "0.50666064", "0.5053576", "0.5045257", "0.50426364", "0.5034947", "0.50346965", "0.50339484", "0.50330913", "0.5032923", "0.502973", "0.5020634", "0.5018737", "0.50172883", "0.4997192", "0.49909803", "0.4988535", "0.49870023", "0.49869356", "0.4976962", "0.49717924", "0.49703914", "0.49700055", "0.49680373", "0.49670652", "0.49650007" ]
0.7278119
0
Return `table`, with final column names but still String values.
def _postprocess_name_columns( table: pyarrow.Table, has_header: bool, settings: Settings ) -> Tuple[pyarrow.Table, List[I18nMessage]]: if has_header and table.num_rows > 0: names, warnings = gen_unique_clean_colnames_and_warn( list((c[0].as_py() if c[0].is_valid else "") for c in table.columns), settings=settings, ) # Remove header (zero-copy: builds new pa.Table with same backing data) table = table.slice(1) else: names = [f"Column {i + 1}" for i in range(len(table.columns))] warnings = [] return ( pyarrow.table(dict(zip(names, table.columns))), warnings, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def _fix_query_table(table):\n for i in table.columns:\n tdtype = table[i].dtype.char\n if tdtype in ('b', 'B', 'S', 'a', 'O'):\n row = process_list(string_fix, table[i])\n table[i] = np.array(row, dtype=str)\n return table", "def get_columns(self, table):\n if table not in self.columns:\n self.columns[table] = [\n row[0] for row in self.db.iter('describe ' + table)]\n return self.columns[table]", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def trim_column_names(self, table: Table):\n self._requires_table(table)\n table.columns = [\n column.strip() if isinstance(column, str) else column\n for column in table.columns\n ]", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def get_normalized_data_table(table_metadata, debug=False):\n suffix = table_metadata.get('suffix', '')\n data_table = table_metadata['table_class'](\n file_path=table_metadata['csv_filename'], suffix=suffix)\n drop_headers(table_metadata['document_label'], data_table.data)\n rename_headers(table_metadata['document_label'], data_table.data)\n print_data_table_length(table_metadata['document_label'],\n data_table.data,\n debug=debug)\n return data_table", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def column_names(\n self,\n table: exp.Table | str,\n only_visible: bool = False,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> t.List[str]:", "def _stringify_table(df: Table) -> pd.DataFrame:\n columns = [\n 'reimbursee',\n 'cost',\n 'currency',\n 'reimbursers',\n ]\n\n if 'notes' in df.columns:\n columns.append('notes')\n\n side_reimbs = df['reimbursers'].notna()\n def add_space(string):\n return ', '.join(map(str.strip, string.split(',')))\n spaced_reimbursers = df['reimbursers'][side_reimbs].apply(add_space)\n\n stringified_df = df[columns].fillna(value={'reimbursers': 'everyone'})\n stringified_df.loc[side_reimbs, 'reimbursers'] = spaced_reimbursers\n\n stringified_df.loc[:, 'cost'] = stringified_df['cost'].apply(_add_decimals)\n\n return stringified_df", "def parse_db_cols(cur, table):\n cur.execute('PRAGMA table_info({})'.format(table))\n d = cur.fetchall()\n\n cols = []\n for col in d:\n cols.append(str(col[1]))\n\n return cols", "def _str_colnames(self):\n return ', '.join(self.galcat.colnames)", "def rawtable(self):\n return self.__rawtable", "def fields_from_table(table):\r\n\r\n fields = []\r\n\r\n for column in table.columns:\r\n field = brewery.metadata.Field(name=column.name)\r\n field.concrete_storage_type = column.type\r\n\r\n for conv in _sql_to_brewery_types:\r\n if issubclass(column.type.__class__, conv[0]):\r\n field.storage_type = conv[1]\r\n field.analytical_type = conv[2]\r\n break\r\n\r\n if not field.storage_type:\r\n field.storaget_tpye = \"unknown\"\r\n\r\n if not field.analytical_type:\r\n field.analytical_type = \"unknown\"\r\n\r\n fields.append(field)\r\n\r\n return brewery.metadata.FieldList(fields)", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def get_string(self):\n if not self._flip:\n this_column_specifier = TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols)]) + TABLE_NUMROWS_SEP\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_column_headers = COLUMN_HEADERS_TEMPLATE.substitute(\n column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header]))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([str(row_elt) for row_elt in row])\n for row in self._rows])\n return os.linesep.join([this_table_header,\n this_table_column_headers,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])\n else:\n this_column_specifier = (\n TABLE_NUMROWS_SEP + \"l\" + TABLE_NUMROWS_SEP +\n TABLE_NUMROWS_SEP + TABLE_NUMROWS_SEP.join(\n [\"l\" for col in xrange(self._num_cols - 1)]) +\n TABLE_NUMROWS_SEP)\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n this_table_content = (TABLE_ROWSEP + os.linesep).join(\n [TABLE_COLSEP.join([header_elt] + [str(elt) for elt in row])\n for (header_elt, row) in zip(self._header, self._rows)])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def constructSavedTable(table, records):\n columns = []\n colNames = []\n for column in table.tableColumns:\n col = {}\n for k,v in column.iteritems():\n if k == \"sizeCalculated\" or k == \"sizeCorrected\" or k == 'min':\n continue\n elif k == \"field\":\n colNames.append(v)\n col[str(k)] = str(v)\n columns.append(col)\n return constructTable(table, records, columns, colNames)", "def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT\r\n RF.RDB$FIELD_NAME FIELD_NAME,\r\n CASE F.RDB$FIELD_TYPE\r\n WHEN 7 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 8 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'INTEGER'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 9 THEN 'QUAD'\r\n WHEN 10 THEN 'FLOAT'\r\n WHEN 12 THEN 'DATE'\r\n WHEN 13 THEN 'TIME'\r\n WHEN 14 THEN 'CHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ') '\r\n WHEN 16 THEN\r\n CASE F.RDB$FIELD_SUB_TYPE\r\n WHEN 0 THEN 'BIGINT'\r\n WHEN 1 THEN 'NUMERIC(' || F.RDB$FIELD_PRECISION || ', ' || (-F.RDB$FIELD_SCALE) || ')'\r\n WHEN 2 THEN 'DECIMAL'\r\n END\r\n WHEN 27 THEN 'NUMERIC'\r\n WHEN 35 THEN 'TIMESTAMP'\r\n WHEN 37 THEN 'VARCHAR(' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 40 THEN 'CSTRING' || (TRUNC(F.RDB$FIELD_LENGTH / COALESCE(CH.RDB$BYTES_PER_CHARACTER,1))) || ')'\r\n WHEN 45 THEN 'BLOB_ID'\r\n WHEN 261 THEN 'TEXT'\r\n ELSE 'RDB$FIELD_TYPE: ' || F.RDB$FIELD_TYPE || '?'\r\n END FIELD_TYPE\r\n FROM RDB$RELATION_FIELDS RF\r\n JOIN RDB$FIELDS F ON (F.RDB$FIELD_NAME = RF.RDB$FIELD_SOURCE)\r\n LEFT OUTER JOIN RDB$CHARACTER_SETS CH ON (CH.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID)\r\n LEFT OUTER JOIN RDB$COLLATIONS DCO ON ((DCO.RDB$COLLATION_ID = F.RDB$COLLATION_ID) AND (DCO.RDB$CHARACTER_SET_ID = F.RDB$CHARACTER_SET_ID))\r\n WHERE (RF.RDB$RELATION_NAME = '%s') AND (COALESCE(RF.RDB$SYSTEM_FLAG, 0) = 0)\r\n ORDER BY RF.RDB$FIELD_POSITION;\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, in res.fetchall():\r\n table += \"%s %s,\" % (coluna.strip(), tipo.strip())\r\n tipos[coluna.strip()] = tipo\r\n table = table[:-1]+\");\"\r\n return table, tipos", "def basic_table_eject():\n tbl: pa.Table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n # NOTE: Requires pandas installation (to_pandas)\n 'to_pandas > to_dict': tbl.to_pandas().to_dict(orient='records'),\n 'to_pydict': tbl.to_pydict(), # Dict[str, list]\n 'to_pylist': tbl.to_pylist(), # List[dict]\n 'to_string': tbl.to_string(), # str\n }\n\n pretty_print_result_map(results)", "def _str_colnames(self):\n return ', '.join(self.colnames)", "def get_table_columns_list(self, table, dictionary=False):\n if \".\" in table:\n prefix = table.split(\".\")[0] + \".\"\n table = table.split(\".\")[1]\n else:\n # table = table\n prefix = \"\"\n cur = self._connection.cursor()\n\n if self.isMSSQL(): # pragma: no cover\n prf = \"\" if len(prefix) == 0 else prefix + \".\"\n sql = \"\"\"SELECT * FROM (SELECT OBJECT_NAME(c.OBJECT_ID) TableName,c.name AS ColumnName,t.name AS TypeName\n FROM sys.columns AS c\n JOIN sys.types AS t ON c.user_type_id=t.user_type_id\n ) AS ttt\n WHERE ttt.TableName = '%s%s'\"\"\" % (prf, table)\n cur.execute(sql)\n else:\n cur.execute(\"PRAGMA %stable_info(%s)\" % (prefix, table) + \";\")\n\n res = cur.fetchall()\n cur.close()\n res = [(r[1], DatabaseCore._SQL_conversion_types[r[2]]) for r in res]\n if dictionary:\n dic = {}\n for i in range(0, len(res)):\n dic[i] = res[i]\n return dic\n else:\n return res", "def table(self):\n return self.t", "def call_table(conn, table):\r\n cursor = conn.cursor()\r\n values_list = []\r\n header_list = get_header(conn, table) # list with table header values\r\n sql = f\"SELECT * FROM {table}\"\r\n cursor.execute(sql)\r\n for value in cursor.fetchall(): # iterates over list of tuples\r\n value_dict = dict() # dictionary to store each row values. keys = column headers, value = respective row value\r\n for index, c_header in enumerate(header_list):\r\n value_dict[f\"{c_header}\"] = value[index]\r\n values_list.append(value_dict)\r\n return values_list", "def get_column_names(self, table):\n try:\n logging.info(f'Getting column names of table `{table}`')\n return list(self.execute(f'SELECT * FROM `{table}`'))\n except:\n logging.exception('Something went wrong getting column names. Check trace.')\n return", "def _format_sql(self, trade, table):\n\n trade = copy(trade)\n for key, value in trade.items():\n\n if value is None:\n trade[key] = 'NULL'\n elif key == 'date':\n value = tb.DateConvert(value).date\n\n if isinstance(value, str):\n trade[key] = f\"'{value}'\"\n\n return {k:v for k,v in trade.items() if k in self.fields[table]}", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def get_table(self):\n return copy.deepcopy(self._table)", "def __repr__(self):\n return \"[\" + \", \".join([str(member) for member in self.table]).rstrip(\",\") + \"]\"", "def table_columns(auth, table_name):\n return [row[0] for row in DBMySQL.csfr(auth, \"describe \" +table_name)]", "def __str__(self):\r\n tmp = \"\"\r\n for (name, value) in self.__table__.items():\r\n tmp += str(name) + \"\\n\" + str(value) + \"\\n\"\r\n return(tmp)", "def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT col.column_name AS coluna,\r\n CASE\r\n WHEN col.data_type LIKE 'NUMBER%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'NCHAR%%' THEN 'CHAR'\r\n WHEN col.data_type LIKE 'VARCHAR2%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'NVARCHAR2%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'VARCHAR%%' THEN col.DATA_TYPE||'('||col.DATA_LENGTH||')'\r\n WHEN col.data_type LIKE 'BLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'CLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'NCLOB%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'LONG%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'RAW%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'BFILE%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'LONG RAW%%' THEN 'TEXT'\r\n WHEN col.data_type LIKE 'FLOAT%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'BINARY_FLOAT%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'BINARY_DOUBLE%%' THEN 'NUMERIC'\r\n WHEN col.data_type LIKE 'TIMESTAMP%%' THEN 'TIMESTAMP'\r\n WHEN col.data_type LIKE 'INTERVAL%%' THEN 'TEXT'\r\n ELSE col.DATA_TYPE\r\n END AS tipo,\r\n col.column_id,\r\n col.data_type\r\n FROM\r\n all_tab_columns col\r\n WHERE\r\n upper(table_name) = '%s'\r\n ORDER BY col.column_id\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, id, data_type, in res.fetchall():\r\n # EXECOES (palavras reservadas no postgres)\r\n if coluna.strip() == \"NATURAL\":\r\n col = \"NATURALDE\"\r\n elif coluna.strip() == \"SIMILAR\":\r\n col = \"SIMILARR\"\r\n else:\r\n col = coluna\r\n table += \"%s %s,\" % (col.strip(), tipo.strip())\r\n tipos[id] = [coluna.strip(), data_type]\r\n table = table[:-1]+\");\"\r\n return table, tipos", "def get_table_data(table):\n pattern_body = re.compile(r'(?ims)\\<tbody\\>(.*?)\\</tbody\\>')\n pattern_rows = re.compile(r'(?ims)\\<tr\\>(.*?)\\</tr\\>')\n pattern_cols = re.compile(r'(?ims)\\<td.*?\\>([^<]+?)\\<.*?/td\\>')\n\n body = pattern_body.findall(table)[0]\n return [\n list(map(lambda x: html.unescape(x), pattern_cols.findall(row)[:3]))\n for row in pattern_rows.findall(body)]", "def fromTable(cls, table):\n cls.__attrmap__ = {}\n cls.__colmap__ = {}\n allColumns = list(table)\n for column in allColumns:\n attrname = cls.namingConvention(column.model.name)\n cls.__attrmap__[attrname] = column\n cls.__colmap__[column] = attrname", "def tables_columns(sql):\n td = OrderedDict()\n for table, columns in findall('create table ([a-zA-Z_]+) \\((.*?)\\);',\n sql, flags=DOTALL):\n td[table] = [c.split(' ') for c in [ct.replace(',', '').strip()\n for ct in\n columns.split('\\n')] if c]\n return td", "def get_colnames(cur, table):\n cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n cols = cur.fetchall()\n return [col[0] for col in cols]", "def table(self):\r\n return self._table", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def get_string(self):\n this_column_specifier = \"l\" * self._num_cols\n this_column_headers = TABLE_COLSEP.join(\n [str(header_elt) for header_elt in self._header])\n this_table_header = TABLE_HEADER_TEMPLATE.substitute(\n column_specifier = this_column_specifier,\n caption = str(self._caption),\n tag = str(self._tag))\n if self._flip:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(\n [self._header[row_num]] + [\n str(row_elt) for row_elt in self._rows[row_num]])\n for row_num in xrange(self._num_rows)])\n else:\n this_table_content = (TABLE_ROWSEP_NOLINE + os.linesep).join(\n [TABLE_COLSEP.join(self._header)] + [TABLE_COLSEP.join(\n [str(row_elt) for row_elt in row]) for row in self._rows])\n return os.linesep.join([this_table_header,\n ENDHEADER,\n this_table_content,\n TABLE_FOOTER])", "def makeCreateTable(self, tbl, columns, rows):\n\n lines = []\n lines.append('CREATE TABLE \"{}\" ('.format(tbl))\n\n m = []\n for name, typ, prec, scale in columns:\n if typ == 'VARCHAR':\n typ = 'VARCHAR2'\n\n if scale:\n prec = '({},{})'.format(prec,scale)\n elif prec:\n prec = '({})'.format(prec)\n else:\n prec=''\n\n m.append(' \"{}\" {}{}'.format(name, typ, prec))\n \n lines.append(\",\\n\".join(m))\n lines.append(\")\")\n\n return \"\\n\".join(lines)", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def empty_table(self, table_name, extra_columns=None):\n table_definition = self._table_definitions[table_name]\n df = pd.DataFrame({\n k: pd.Series(dtype=v.type.python_type) for k, v in table_definition.c.items()\n })\n if extra_columns:\n extras = pd.DataFrame({k: pd.Series() for k in extra_columns})\n df = pd.concat([df, extras], axis=1)\n return df", "def basic_table_details():\n tbl: pa.table = pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])\n\n results = {\n 'column_names': tbl.column_names,\n 'columns > map > combine_chunks > to_pylist': [col.combine_chunks().to_pylist() for col in tbl.columns],\n 'nbytes': tbl.nbytes,\n 'num_columns': tbl.num_columns,\n 'num_rows': tbl.num_rows,\n 'schema': tbl.schema,\n 'shape': tbl.shape,\n }\n\n print(results)", "def raw_table_data(*args, **kwargs):\n # pylint: disable=unused-argument\n return {\n \"columns\": [\n {\"title\": \"id\"},\n {\"title\": \"name\"},\n {\"title\": \"type\"},\n ],\n \"data\": [\n [18371164, \"All\", \"CUSTOM\"],\n [18371166, \"None\", \"CUSTOM\"],\n [18371168, \"Localhost\", \"CUSTOM\"],\n [18371170, \"Localnetworks\", \"CUSTOM\"],\n ],\n }", "def table(self):\n return self._table", "def table(self):\n return self._table", "def tabledict(self):\n return dict(table=self.tablename,\n dffeld=self.name_dateifuehrungsschluessel,\n statusfeld=self.name_status,\n schluesselfeld=self.name_schluessel)", "def get_table_columns(self):\n raise NotImplementedError(\"Please implement this method\")", "def columnar(table: list[dict[str, str]]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n keys = table[0].keys()\n for key in keys:\n result[key] = column_values(table, key)\n return result", "def columns(self, table):\n cur = self.connection.cursor()\n res = cur.execute(\"PRAGMA TABLE_INFO(%s)\" % table)\n columns = {}\n for row in res:\n columns[row[1]] = row[2]\n return columns", "def simple_format_table(table):\n s = [[str(e) for e in row] for row in table]\n lens = [max(map(len, col)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n return '\\n'.join(table)", "def _qualify(table, cols):\n return ', '.join(['{}.{}'.format(table, c) for c in cols])", "def read_table_data(self, table):\n data = []\n index = 0\n for row in table.rows:\n data.append([])\n for cell in row.cells:\n text_data = ''\n for para in cell.paragraphs:\n text_data += para.text.strip(' ')\n data[index].append(text_data)\n index += 1\n\n # trim unneeded rows in old & new reports\n if all('CAPA' in x for x in data[0]):\n self.table_data = data[2:]\n else:\n self.table_data = data[1:]\n # trim end of list\n self.table_data = [row[:5] for row in self.table_data]", "def get_schema(self):\n return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)", "def _get_table_sql_columns(columns=[]):\n\n\tif len(columns) == 0:\n\t\tsql_columns = '*'\n\n\telse: \n\t\tsql_columns = \",\".join(columns)\n\n\treturn sql_columns", "def _get_table(self):\n\t\treturn self._table", "def getColumnHeads(self, table=None):\n if table == None:\n table = self.tableName\n self._db._c.execute(\"PRAGMA table_info(\" + table + \")\")\n return [ col[1] for col in self._db._c.fetchall() ]", "def _get_sql_create_table(self, table_attr):\n template = 'CREATE TABLE IF NOT EXISTS \"%s\" (\\n %s );'\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n v2_columns = []\n for columnName, columnAttr in merge_dicts(columns_pri, columns_ref, columns).iteritems():\n v2_columns.append(PsqlParser._get_sql_column(columnAttr))\n return template % (table_attr['name'], \", \\n \".join(v2_columns))", "def get_schema(self):\n return ', '.join(\n '%s:%s' % (col, self.schema[col]) for col in self.schema)", "def table_col(file_name='tpch'):\n \n path = './data/' + file_name + \"/sql/{}-create.sql\".format(\"tpch\")\n regex = re.compile(';\\($')\n \n tbl_name = {}\n tbl = \"\"\n with open(path, 'r') as f:\n for line in f.readlines():\n if \"CREATE TABLE\" in line:\n tbl = line.split()[2]\n tbl_name[tbl.lower()] = []\n elif line != \"\\n\" and ');' not in line and regex.search(line) == None:\n col = line.split()[0]\n tbl_name[tbl.lower()].append(col.lower())\n return tbl_name", "def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"", "def output_columns(self) -> List[str]:", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n values: list[str] = []\n for row in table:\n values.append(row[column])\n return values", "def get_headers (self, table, schema = 'TABLES'):\n get_headers = (\"SELECT * FROM information_schema.columns WHERE \"\n \"table_schema = \" + schema + \" AND \"\n \"table_name = \" + table + \"\")\n b_sql, b_table, self.sql = self.sql, self.table, get_headers \n self.run()\n self.sql = b_sql\n headers = self.as_DataFrame()[3].tolist()\n self.table = b_table\n\n return headers", "def _get_fields(self):\n fields = self.table[0]\n fields = filter(None.__ne__, fields)\n return list(map(str.lower, fields))", "def _getStrValues(self):\n res = {}\n for colname in self._iterName():\n res[colname] = str(self._values[colname])\n return res", "def makeCreateTable(self, tbl, columns, rows):\n\n lines = []\n lines.append('CREATE TABLE \"{}\" ('.format(tbl))\n\n m = []\n for name, typ, prec, scale in columns:\n if scale:\n prec = '({},{})'.format(prec,scale)\n elif prec:\n prec = '({})'.format(prec)\n else:\n prec=''\n\n m.append(' \"{}\" {}{}'.format(name, typ, prec))\n \n lines.append(\",\\n\".join(m))\n lines.append(\")\")\n\n return \"\\n\".join(lines)", "def StringTableFromMatrix(matrix):\n m, n = matrix.shape\n tbl = StringTable(m, n, \"0.0\")\n for i in range(m):\n for j in range(n):\n tbl[i,j] = matrix[i,j]\n return tbl", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def select(table: dict[str, list[str]], cols: list[str]) -> dict[str, list[str]]:\n result: dict[str, list[str]] = {}\n for col in cols:\n result[col] = table[col]\n return result", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def getTable(table):\n\n return session.query(table).all()", "def start_table(self):\n self.col_widths = []\n self.result = \"\"", "def searchColumnHeadings(self, table: Table):\n lista = []\n if table:\n for col in table.columns:\n lista.append(col.name)\n return lista\n return None", "def tableify(table):\n num_cols = 0\n maxes = []\n\n for row in table:\n num_cols = max(num_cols, len(row))\n if len(maxes) < len(row):\n maxes.extend([0] * (len(row) - len(maxes)))\n for i, cell in enumerate(row):\n maxes[i] = max(maxes[i], len(str(cell)))\n\n def fix_row(maxes, row):\n return ' '.join([\n str(cell) + (' ' * (maxes[i] - len(str(cell))))\n for i, cell in enumerate(row)\n ])\n\n return '\\n'.join(\n [\n fix_row(maxes, row)\n for row in table\n ]\n )", "def table_name() -> str:\n pass", "def basic_table_creation():\n results = {\n 'From pyarrow arrays': pa.table([\n pa.array(['Kakashi', 'Itachi', 'Shisui'], type=pa.string()),\n pa.array(['Hatake', 'Uchiha', 'Uchiha'], type=pa.string())\n ], names=['first_name', 'last_name']),\n 'From List[dict]': pa.Table.from_pylist([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ]),\n 'From Dict[str, list]': pa.Table.from_pydict({\n 'first_name': ['Kakashi', 'Itachi', 'Shisui'],\n 'last_name': ['Hatake', 'Uchiha', 'Uchiha'],\n }),\n 'From pandas df': pa.Table.from_pandas(pd.DataFrame([\n {'first_name': 'Kakashi', 'last_name': 'Hatake', },\n {'first_name': 'Itachi', 'last_name': 'Uchiha', },\n {'first_name': 'Shisui', 'last_name': 'Uchiha', },\n ])),\n }\n pretty_print_result_map(results)", "def _get_table_columns(self):\n try:\n table_header = parse_table_head(self.table.value, version=self.version)\n merged_data = self.table.value[table_header.tdef_header_end:]\n if table_header.TDEF_header.next_page_ptr:\n merged_data = merged_data + self._merge_table_data(table_header.TDEF_header.next_page_ptr)\n\n parsed_data = parse_table_data(merged_data, table_header.real_index_count,\n table_header.column_count, version=self.version)\n\n # Merge Data back to table_header\n table_header['column'] = parsed_data['column']\n table_header['column_names'] = parsed_data['column_names']\n\n except ConstructError:\n logging.error(f\"Failed to parse table header {self.table.value}\")\n return\n col_names = table_header.column_names\n columns = table_header.column\n\n # Add names to columns metadata so we can use only columns for parsing\n for i, c in enumerate(columns):\n c.col_name_str = col_names[i].col_name_str\n\n # column_index is more accurate(id is always incremented so it is wrong when a column is deleted).\n # Some tables like the catalog don't have index, so if indexes are 0 use id.\n\n # create a dict of index to column to make it easier to access. offset is used to make this zero based\n offset = min(x.column_index for x in columns)\n column_dict = {x.column_index - offset: x for x in columns}\n # If column index is not unique try best effort\n if len(column_dict) != len(columns):\n # create a dict of id to column to make it easier to access\n column_dict = {x.column_id: x for x in columns}\n\n if len(column_dict) != table_header.column_count:\n logging.debug(f\"expected {table_header.column_count} columns got {len(column_dict)}\")\n return column_dict, table_header", "def set_row_as_column_names(self, table: Table, row: Index):\n values = self.pop_table_row(table, row, as_list=True)\n table.columns = values", "def ddl_table(self, tabela):\r\n sql = \"\"\"SELECT COLUMN_NAME as coluna, \r\n\t CASE DATA_TYPE\r\n\t WHEN 'uniqueidentifier' THEN 'varchar'\r\n WHEN 'datetime' THEN 'timestamp' \r\n WHEN 'varbinary' THEN 'bytea'\r\n WHEN 'char' THEN 'varchar'\r\n WHEN 'nvarchar' THEN 'varchar'\r\n WHEN 'image'THEN 'bytea'\r\n WHEN 'bit'THEN 'boolean'\r\n\t ELSE DATA_TYPE END AS tipo, \r\n\t ORDINAL_POSITION as column_id, \r\n\t DATA_TYPE\r\n FROM INFORMATION_SCHEMA.COLUMNS\r\n WHERE TABLE_NAME = '%s'\r\n ORDER BY ORDINAL_POSITION\"\"\" % (tabela)\r\n res = self.cur_origem.execute(sql)\r\n table = \"CREATE TABLE IF NOT EXISTS %s (\" % tabela\r\n tipos = {}\r\n for coluna, tipo, id, data_type, in res.fetchall():\r\n # print('\"%s,%s\"'%(coluna,tipo))\r\n if coluna == 'ROW_VERSION' or coluna == 'ROWVERSION':\r\n continue\r\n else:\r\n col = coluna\r\n table += \"%s %s,\" % (col.strip(), tipo.strip())\r\n if tipo == 'bytea':\r\n if data_type == 'image':\r\n coluna = 'CONVERT(VARCHAR(1000), cast(%s as varbinary(max)), 2)' % coluna\r\n else: \r\n coluna = 'CONVERT(VARCHAR(1000), %s, 2)' % coluna\r\n tipos[id] = [coluna.strip(), data_type]\r\n table = table[:-1]+\");\"\r\n return table, tipos", "def dump(self):\n # This is pretty, but we could just return the ddl_string\n outputs = [\"Table : %s\\n\" % self.name]\n # We show the columns in sequence order, using DSU\n # DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform\n deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]\n deco_cols.sort()\n cols = [ col for seq, col in deco_cols ]\n for column in cols:\n outputs.append(\" %-30s\" % column['name'])\n if 'length' in column and column['length'] != None:\n if 'precision' in column and column['precision'] != None:\n # This column is a numeric data type\n column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])\n else:\n # This column is a text data type\n column_defn = '%s(%d)' % (column['type'], column['length'])\n else:\n # This column is a simple data type such as date or boolean\n column_defn = column['type']\n outputs.append(\" %-15s \" % column_defn)\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n if 'special' in column:\n # Special case for e.g. 'enum' in MySQL\n outputs.append(' %s' % column['special'])\n outputs.append(\"\\n\")\n # Constraints please\n if len(self.constraints) != 0:\n outputs.append(\" Constraints;\\n\")\n for constraint_name, constraint in list(self.constraints.items()):\n outputs.append(\" %s, \" % constraint_name)\n outputs.append(\"%s \" % (constraint['type']))\n if 'columns' in constraint:\n outputs.append(\": \")\n outputs.append(', '.join(constraint['columns']))\n outputs.append(\"\\n\")\n # Indexes\n if len(self.indexes) > 0:\n outputs.append(\" Indexes:\\n\")\n for index_name, index in list(self.indexes.items()):\n outputs.append(\" %s, \" % index_name)\n outputs.append(\"%s\\n\" % index['type'])\n # Don't check number of columns because there must be at least 1\n outputs.append(\" Columns: \")\n outputs.append(\", \".join(index['columns']))\n outputs.append(\"\\n\")\n # LOG.debug(\"Table Dump output: \" + \"\".join(outputs))\n return \"\".join(outputs)", "def _create_table_html(self, table):\n if table != {} and table is not None:\n html_output = [['<hr>']]\n else:\n html_output = []\n\n for t in self._util_func.dict_key_list(table.keys()):\n html_output.append(table[t])\n\n return html_output", "def getTableColumnDefs(self, schema, table):\r\n src_columns = self.fetchSqlRecords(\r\n \"select c.column_name, data_type, character_maximum_length, numeric_precision, numeric_scale from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}'\".format(schema, table))\r\n return [dict(zip(('name', 'type', 'max_length', 'precision', 'scale'), c)) for c in src_columns]", "def prettytable(self):\r\n table = PrettyTable(self.columns)\r\n if self.sortby:\r\n table.sortby = self.sortby\r\n for a_col, alignment in self.align.items():\r\n table.align[a_col] = alignment\r\n\r\n # Adding rows\r\n for row in self.rows:\r\n table.add_row(row)\r\n return table", "def _get_tabletype(cls) -> str:\n raise NotImplementedError", "def _remake_table(self, table_name, renames={}, deleted=[], altered={}):\r\n # Dry runs get skipped completely\r\n if self.dry_run:\r\n return\r\n # Temporary table's name\r\n temp_name = \"_south_new_\" + table_name\r\n # Work out the (possibly new) definitions of each column\r\n definitions = {}\r\n cursor = self._get_connection().cursor()\r\n for column_info in self._get_connection().introspection.get_table_description(cursor, table_name):\r\n name = column_info[0]\r\n type = column_info[1]\r\n # Deal with an alter (these happen before renames)\r\n if name in altered:\r\n type = altered[name]\r\n # Deal with a rename\r\n if name in renames:\r\n name = renames[name]\r\n # Add to the defs\r\n definitions[name] = type\r\n # Alright, Make the table\r\n self.execute(\"CREATE TABLE %s (%s)\" % (\r\n self.quote_name(temp_name),\r\n \", \".join([\"%s %s\" % (self.quote_name(cname), ctype) for cname, ctype in definitions.items()]),\r\n ))\r\n # Copy over the data\r\n self._copy_data(table_name, temp_name, renames)\r\n # Delete the old table, move our new one over it\r\n self.delete_table(table_name)\r\n self.rename_table(temp_name, table_name)", "def get_table_data(self):\n return self.table_data", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def populate_table(self, table: Table, name=None) -> None:\n new_table = Table()\n\n if name is None:\n name = self.returns_tms.name\n\n new_table.set_column_names([\"Statistic\", name])\n for item in self._get_results_list():\n row_name = item[1] + \" [\" + item[3] + \"]\"\n if item[3] == '':\n row_name = item[1]\n\n new_table.add_row([row_name, Table.Cell(item[2])])\n\n if len(table.rows) != 0:\n new_table = table.combine(new_table)\n\n table.set_column_names(new_table.get_column_names())\n table.rows = new_table.rows", "def _html_table(self):\n return '</i>'.join(APtable._repr_html_(self).split('</i>')[1:])", "def columns_type(self,table):\n with self.conn.cursor() as cur:\n #_logger.debug('Columns Query. sql: %r', self.table_columns_query)\n cur.execute(self.columns_info_query % (self.dbname,table))\n for row in cur:\n yield row", "def get_infostring(table):\n infostring = \"\"\n for entries in table:\n for entry in entries.values():\n infostring += entry+\", \"\n return infostring", "def translation():\r\n class TranslationTable(tables.Table):\r\n normal = tables.Column(verbose_name=ugettext(\"Normal\"))\r\n lazy = tables.Column(verbose_name=ugettext(\"Lazy\"))\r\n\r\n table = TranslationTable([])\r\n assert \"Normal\" == table.columns[\"normal\"].header\r\n assert \"Lazy\" == table.columns[\"lazy\"].header", "def table_fields() -> Dict[str, TableFieldDetails]:\n return {\n \"REPEATS\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=0,\n bit_high=15,\n description=\"Number of times the line will repeat\",\n labels=None,\n ),\n \"TRIGGER\": TableFieldDetails(\n subtype=\"enum\",\n bit_low=16,\n bit_high=19,\n description=\"The trigger condition to start the phases\",\n labels=[\n \"Immediate\",\n \"BITA=0\",\n \"BITA=1\",\n \"BITB=0\",\n \"BITB=1\",\n \"BITC=0\",\n \"BITC=1\",\n \"POSA>=POSITION\",\n \"POSA<=POSITION\",\n \"POSB>=POSITION\",\n \"POSB<=POSITION\",\n \"POSC>=POSITION\",\n \"POSC<=POSITION\",\n ],\n ),\n \"POSITION\": TableFieldDetails(\n subtype=\"int\",\n bit_low=32,\n bit_high=63,\n description=\"The position that can be used in trigger condition\",\n labels=None,\n ),\n \"TIME1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=64,\n bit_high=95,\n description=\"The time the optional phase 1 should take\",\n labels=None,\n ),\n \"OUTA1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=20,\n bit_high=20,\n description=\"Output A value during phase 1\",\n labels=None,\n ),\n \"OUTB1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=21,\n bit_high=21,\n description=\"Output B value during phase 1\",\n labels=None,\n ),\n \"OUTC1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=22,\n bit_high=22,\n description=\"Output C value during phase 1\",\n labels=None,\n ),\n \"OUTD1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=23,\n bit_high=23,\n description=\"Output D value during phase 1\",\n labels=None,\n ),\n \"OUTE1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=24,\n bit_high=24,\n description=\"Output E value during phase 1\",\n labels=None,\n ),\n \"OUTF1\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=25,\n bit_high=25,\n description=\"Output F value during phase 1\",\n labels=None,\n ),\n \"TIME2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=96,\n bit_high=127,\n description=\"The time the mandatory phase 2 should take\",\n labels=None,\n ),\n \"OUTA2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=26,\n bit_high=26,\n description=\"Output A value during phase 2\",\n labels=None,\n ),\n \"OUTB2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=27,\n bit_high=27,\n description=\"Output B value during phase 2\",\n labels=None,\n ),\n \"OUTC2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=28,\n bit_high=28,\n description=\"Output C value during phase 2\",\n labels=None,\n ),\n \"OUTD2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=29,\n bit_high=29,\n description=\"Output D value during phase 2\",\n labels=None,\n ),\n \"OUTE2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=30,\n bit_high=30,\n description=\"Output E value during phase 2\",\n labels=None,\n ),\n \"OUTF2\": TableFieldDetails(\n subtype=\"uint\",\n bit_low=31,\n bit_high=31,\n description=\"Output F value during phase 2\",\n labels=None,\n ),\n }", "def __str__(self):\n return str([self.fields[col] for col in self.columns])", "def columns(self, table_name):\n table = self._create_table(table_name)\n return [c.name for c in table.c]", "def _get_text_rendering(self, table):\n text_table = ''\n base_variable = table.get_variable().replace(\"'\", '')\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n[user]\\t'\n elif base_variable == self._system.get_settings().system_output:\n text_table += '[system]\\t'\n else:\n text_table += '[' + base_variable + ']\\t'\n\n for value in table.get_values():\n if not isinstance(value, NoneVal):\n content = str(value)\n if table.get_prob(value) < 0.98:\n content += ' (' + StringUtils.get_short_form(table.get_prob(value)) + ')'\n\n text_table += content + '\\n\\t\\t'\n\n if base_variable == self._system.get_settings().user_input:\n text_table += '\\n'\n\n text_table = text_table[0:-3]\n return text_table" ]
[ "0.6711792", "0.64184517", "0.64011616", "0.62590617", "0.6193587", "0.6136157", "0.6058402", "0.6052583", "0.60350543", "0.5984028", "0.5966508", "0.59490246", "0.5942764", "0.5938039", "0.59240717", "0.5916883", "0.5916883", "0.5908916", "0.59033936", "0.5902068", "0.58906996", "0.58857995", "0.5881167", "0.585046", "0.58457214", "0.58248", "0.57768303", "0.5774298", "0.575645", "0.5750014", "0.5740206", "0.57180566", "0.5717021", "0.5705656", "0.57028073", "0.57025576", "0.5676367", "0.56744456", "0.56715137", "0.56680113", "0.5666424", "0.566493", "0.56556326", "0.5646041", "0.564303", "0.5632032", "0.5632032", "0.5621606", "0.56100935", "0.5608373", "0.5597093", "0.5595542", "0.5592626", "0.5585679", "0.55762255", "0.5574234", "0.5574142", "0.55672544", "0.55562943", "0.55550754", "0.5545922", "0.55358034", "0.5530604", "0.55228454", "0.5521555", "0.55149996", "0.5504911", "0.54923433", "0.5478348", "0.5478132", "0.5477789", "0.5471548", "0.5471548", "0.54642296", "0.54583305", "0.5447119", "0.544488", "0.5443342", "0.5441885", "0.54330146", "0.5426496", "0.53883976", "0.53727037", "0.53650755", "0.53623736", "0.5362335", "0.5336104", "0.5331195", "0.533115", "0.5327475", "0.5325674", "0.53239626", "0.53167653", "0.53131354", "0.53130627", "0.5308", "0.5307006", "0.5301841", "0.5296276", "0.52958494" ]
0.602494
9
Return a pa.Array that replaces "" with null. Assume `arr` is of type `utf8` or a dictionary of `utf8`.
def _nix_utf8_chunk_empty_strings(chunk: pyarrow.Array) -> pyarrow.Array: # pyarrow's cast() can't handle empty string. Create a new Array with # "" changed to null. _, offsets_buf, data_buf = chunk.buffers() # Build a new validity buffer, based on offsets. Empty string = null. # Assume `data` has no padding bytes in the already-null values. That way # we can ignore the _original_ validity buffer and assume all original # values are not-null. (Null values are stored as "" plus "invalid".) # # Validity-bitmap spec: # https://arrow.apache.org/docs/format/Columnar.html#validity-bitmaps # first offset must be 0. Next offsets are used to calculate lengths offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian validity = bytearray() null_count = 0 last_offset = offsets[0] assert last_offset == 0 pos = 1 while True: # Travel offsets in strides of 8: one per char in the validity bitmap. # Pad with an extra 1 bit -- [2020-02-20, adamhooper] I think I read # this is needed somewhere. valid_byte = 0x00 block = offsets[pos : pos + 8] try: if block[0] > last_offset: valid_byte |= 0x1 else: null_count += 1 if block[1] > block[0]: valid_byte |= 0x2 else: null_count += 1 if block[2] > block[1]: valid_byte |= 0x4 else: null_count += 1 if block[3] > block[2]: valid_byte |= 0x8 else: null_count += 1 if block[4] > block[3]: valid_byte |= 0x10 else: null_count += 1 if block[5] > block[4]: valid_byte |= 0x20 else: null_count += 1 if block[6] > block[5]: valid_byte |= 0x40 else: null_count += 1 if block[7] > block[6]: valid_byte |= 0x80 else: null_count += 1 validity.append(valid_byte) last_offset = block[7] pos += 8 except IndexError: validity.append(valid_byte) break # end of offsets validity_buf = pyarrow.py_buffer(validity) # We may have over-counted in null_count: anything before `chunk.offset` # should not count. # # It's less work to "undo" the counting we did before -- otherwise we'd # riddle the above loop with if-statements. for i in range(chunk.offset): if offsets[i + 1] == offsets[i]: null_count -= 1 return pyarrow.StringArray.from_buffers( length=len(chunk), value_offsets=offsets_buf, data=data_buf, null_bitmap=validity_buf, null_count=null_count, offset=chunk.offset, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unicode(arr):\n try:\n return unicode(arr)\n except UnicodeEncodeError:\n dt = arr.dtype.newbyteorder('S')\n return unicode(arr.view(dt))", "def strip_array(arr):\n\n return [word.strip(' ') for word in arr]", "def _strip_all(dic):\n for k, v in dic.items():\n\n if len(v) == 0:\n dic[k] = 'NULL'\n if isinstance(v, str):\n v = v.strip().replace('\\t', '').replace('\\n', '').encode('utf-8', 'ignore')\n dic[k] = v\n\n return dic", "def filterNull(self, result):\n\t\treturn [_ for _ in result if _]", "def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])", "def remove_handles(text_array):\n\n handle_pattern = re.compile(r'([@])\\w+')\n\n return np.array([re.sub(handle_pattern, \"\", str(string)) for string in text_array])", "def remove_unc(array):\r\n\tnew_arr = []\r\n\r\n\tdef checkForNewLineAndSemiColon(string):\r\n\t\t\"\"\"delete the new-line character and semi-colon from the string\"\"\"\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string\r\n\r\n\tfor i in range(len(array)):\r\n\t\tif array[i] != '' and array[i] != \"package\":\r\n\t\t\tnew_arr.append(checkForNewLineAndSemiColon(array[i]))\r\n\r\n\treturn new_arr[0]", "def arrayify(possible_array):\n if isinstance(possible_array, basestring):\n return [possible_array]\n return possible_array", "def strip(self, chars=None):\n return asarray(strip(self, chars))", "def clean_data(array):\n ret = np.zeros(len(array))\n for i in range(len(array)):\n drop_id = len(str(i+1)) + 1\n array[i, 0] = array[i, 0][int(drop_id):]\n return array", "def replace_empty_value(data: list) -> list:\n for book in data:\n for attribute in book:\n if book[attribute] == \"\":\n book[attribute] = \"None\"\n return data", "def clear_empty_strings(data):\n if isinstance(data, dict):\n for k, v in data.items():\n if v == \"\":\n del data[k]\n else:\n data[k] = clear_empty_strings(v)\n elif isinstance(data, (list, set, tuple)):\n # use list comprehension to filter out \"\" and modify items,\n # then reconstruct as original data type\n data = type(data)([clear_empty_strings(x) for x in data if x != \"\"])\n elif data == \"\":\n return None\n return data", "def _convert_ascii(self, column, field):\n format = column.format\n recformat = getattr(format, \"recformat\", ASCII2NUMPY[format[0]])\n # if the string = TNULL, return ASCIITNULL\n nullval = str(column.null).strip().encode(\"ascii\")\n if len(nullval) > format.width:\n nullval = nullval[: format.width]\n\n # Before using .replace make sure that any trailing bytes in each\n # column are filled with spaces, and *not*, say, nulls; this causes\n # functions like replace to potentially leave gibberish bytes in the\n # array buffer.\n dummy = np.char.ljust(field, format.width)\n dummy = np.char.replace(dummy, encode_ascii(\"D\"), encode_ascii(\"E\"))\n null_fill = encode_ascii(str(ASCIITNULL).rjust(format.width))\n\n # Convert all fields equal to the TNULL value (nullval) to empty fields.\n # TODO: These fields really should be converted to NaN or something else undefined.\n # Currently they are converted to empty fields, which are then set to zero.\n dummy = np.where(np.char.strip(dummy) == nullval, null_fill, dummy)\n\n # always replace empty fields, see https://github.com/astropy/astropy/pull/5394\n if nullval != b\"\":\n dummy = np.where(np.char.strip(dummy) == b\"\", null_fill, dummy)\n\n try:\n dummy = np.array(dummy, dtype=recformat)\n except ValueError as exc:\n indx = self.names.index(column.name)\n raise ValueError(\n \"{}; the header may be missing the necessary TNULL{} \"\n \"keyword or the table contains invalid data\".format(exc, indx + 1)\n )\n\n return dummy", "def fill_str_array(data, size, push_back=True):\n\n string_array_size = len(data)\n nan_array_size = size - string_array_size\n num_chars = sdc.str_arr_ext.num_total_chars(data)\n\n result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)\n\n # Keep NaN values of initial array\n arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])\n data_str_list = sdc.str_arr_ext.to_string_list(data)\n nan_list = [''] * nan_array_size\n\n result_list = data_str_list + nan_list if push_back else nan_list + data_str_list\n cp_str_list_to_array(result_data, result_list)\n\n # Batch=64 iteration to avoid threads competition\n batch_size = 64\n if push_back:\n for i in numba.prange(size//batch_size + 1):\n for j in range(i*batch_size, min((i+1)*batch_size, size)):\n if j < string_array_size:\n if arr_is_na_mask[j]:\n str_arr_set_na(result_data, j)\n else:\n str_arr_set_na(result_data, j)\n\n else:\n for i in numba.prange(size//batch_size + 1):\n for j in range(i*batch_size, min((i+1)*batch_size, size)):\n if j < nan_array_size:\n str_arr_set_na(result_data, j)\n else:\n str_arr_j = j - nan_array_size\n if arr_is_na_mask[str_arr_j]:\n str_arr_set_na(result_data, j)\n\n return result_data", "def remove_empty(data):\n out = []\n for item in data:\n if item == '':\n continue\n out.append(item)\n return out", "def array2anyscript(arr):\n def tostr(v):\n if np.isreal(v):\n return '{:.12g}'.format(v)\n elif isinstance(v, (string_types, np.str_)):\n return '\"{}\"'.format(v)\n\n def createsubarr(arr):\n outstr = \"\"\n if isinstance(arr, np.ndarray):\n if len(arr) == 1 and not isinstance(arr[0], np.ndarray):\n return '{'+tostr(arr[0]) + '},'\n outstr += '{'\n for row in arr:\n outstr += createsubarr(row)\n outstr = outstr.strip(',') + '},'\n return outstr\n else:\n return outstr + tostr(arr)+','\n if isinstance(arr, np.ndarray) and not arr.shape:\n return tostr(arr.tolist())\n elif isinstance(arr, np.ndarray) :\n return createsubarr(arr).strip(',')\n elif isinstance( arr, float):\n return tostr(arr)\n else:\n return str(arr)", "def replace_nones(strings: List[Optional[str]]) -> List[str]:\n return list(map(lambda s: '' if s is None else s, strings))", "def none_to_empty(data):\n return data if data is not None else ''", "def clean_values(values_to_clean: np.ndarray):\n char_rem = \"!@#$%^*()[]{};:.,/<>?|`~-=_+'\\\\\"\n for j in range(values_to_clean.shape[0]):\n for k in range(2, 4):\n for c in char_rem:\n values_to_clean[j, k] = re.sub(' +', ' ', values_to_clean[j, k].replace(c, \" \").strip())\n return values_to_clean", "def lstrip(self, chars=None):\n return asarray(lstrip(self, chars))", "def convert_null(values: Iterable) -> list:\n\n return [x\n if x is not None\n else NULL\n for x in values]", "def str_to_numpy(string_array):\n if pd.isnull(string_array):\n return(np.NaN)\n else:\n return np.array(ast.literal_eval(string_array))", "def strip(a, chars=None):\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))", "def normalize_array(var):\n if np.issubdtype(var.dtype, 'S1'):\n if var.dtype == str:\n # Python 2 on netCDF4 'string' variables needs this.\n # Python 3 returns false for np.issubdtype(var.dtype, 'S1')\n return var[:]\n\n def decoder(x):\n return str(x.decode('utf-8'))\n vfunc = np.vectorize(decoder)\n return vfunc(nc4.chartostring(var[:]))\n else:\n return var[:]", "def _replace_none(lst, repl=\"\"):\n return ['' if v is None else v for v in lst]", "def array_to_string(array: list, separator: str) -> str:\n string = ''\n for value in array:\n if type(value) == str:\n value = value.replace(\"'\", \"''\")\n string += \"'\" + value + \"'\"\n elif value is None:\n string += 'null'\n else:\n string += str(value)\n string += separator\n\n string = string[:-len(separator)]\n\n return string", "def testtojsonempty(self):\n self.assertEqual(\"[]\", Base.to_json_string([]))", "def nulls_to_empty(dic, *keys):\n if not keys:\n keys = dic.keys()\n for key in keys:\n if dic[key] is None:\n dic[key] = ''\n return None", "def _strip_nul(text):\n return text.replace('\\x00', '<NUL>')", "def strip_blanklines(blob):\n lines = blob.split('\\n')\n return '\\n'.join([line for line in lines if line.strip() != ''])", "def get_strip_strings_array(strings):\n string_array = strings.strip()\n string_array = string_array.split(',')\n result = []\n for string in string_array:\n string = string.strip()\n if string:\n result.append(string)\n return result", "def checkio(element):\n return [\"\", u\"\", \"\"]", "def _maybe_fill(arr, fill_value=np.nan):\n if _is_na_compat(arr, fill_value):\n arr.fill(fill_value)\n return arr", "def clearArray(ls=('AnyPin', [], {PinSpecifires.CONSTRAINT: '1', PinSpecifires.ENABLED_OPTIONS: PinOptions.ArraySupported | PinOptions.AllowAny})):\n return clearList(ls)", "def escaped(array):\n\n return list(map(re.escape, array))", "def stringToList(txt):\r\n txt1 = toTypeOrNone(txt)\r\n if txt1 is None:\r\n return []\r\n list_ = [item.strip() for item in txt.split(\",\")]\r\n listWithNones = map(toTypeOrNone, list_)\r\n if any(True for x in listWithNones if x is not None):\r\n return list_\r\n return []", "def test_value_empty_string(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def remove_empty_string(str_list):\n return list(filter(None, str_list))", "def replacenull(results, delimiter=','):\n if results.startswith('null{}'.format(delimiter)):\n # rare special case\n results = results[4:]\n pat = '(?<=[{}\\n])null(?=[{}\\n])'.format(delimiter,delimiter)\n results = re.sub(pat, '', results, flags=re.IGNORECASE | re.DOTALL)\n return results", "def remove_blanks_list(src):\n return [el for el in src if el]", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def without_empty(s):\n return {i for i in s if not i.is_empty()}", "def decode (self, s):\n if s == \"null\": return []\n return s.split(chr(257))", "def remove_halo(field):\n if not isinstance(field, np.ndarray):\n field = np.asarray(field)\n if len(field) <= 2:\n return np.array([])\n return field[1:-1, :-1]", "def format_array(self, name, value):\n\n if value is not None and isinstance(value, list):\n array = '%s = [\\n%s]\\n' % (name, self.parse_array(value, 1))\n else:\n array = '%s = None\\n' % name\n return array", "def _always_array(value: str | list | None) -> list:\n if value is None:\n value = []\n elif not isinstance(value, list):\n value = [value]\n return value", "def getAttributeParamNullValues(self, sAttr):\n sPrefix = self.getHungarianPrefix(sAttr);\n if sPrefix in ['id', 'uid', 'i', 'off', 'pct']:\n return [-1, '', '-1',];\n elif sPrefix in ['l', 'c',]:\n return [long(-1), '', '-1',];\n elif sPrefix == 'f':\n return ['',];\n elif sPrefix in ['enm', 'ip', 's', 'ts', 'uuid']:\n return ['',];\n elif sPrefix in ['ai', 'aid', 'al', 'as']:\n return [[], '', None]; ## @todo ??\n elif sPrefix == 'bm':\n return ['', [],]; ## @todo bitmaps.\n raise TMExceptionBase('Unable to classify \"%s\" (prefix %s)' % (sAttr, sPrefix));", "def list_of_strings_to_c_string_array(l):\n c_strings = (ctypes.c_char_p*(len(l)))()\n for i, s in enumerate(l):\n if s == None:\n c_strings[i] = None\n else:\n # create_string_buffer() handles conversion\n c_strings[i] = ctypes.create_string_buffer(strtobytes(s)).value\n return c_strings", "def get_fixed_array():\n return np.array([[[np.nan, np.nan],\n [np.nan, np.nan],\n [np.nan, np.nan]],\n\n [[3., 7.],\n [2., 4.],\n [1., 16.]],\n\n [[17., 5.],\n [10., 0.],\n [14., 12.]],\n\n [[8., 6.],\n [13., 11.],\n [9., 15.]]])", "def parse(arr_str):\n return arr_str.rstrip().replace(' ', '').split(',')[:-1]", "def strip_array_wrappers(arry):\n curr = arry\n if curr.ndim == 0:\n if isinstance(curr[...], np.ndarray):\n return strip_array_wrappers(curr[...])\n return curr\n\n # there is a possibility for infinite looping\n # e.g. [np.ndarray, str, dict] would stay object array\n # impossible if homogeneous (implied by 1-element wrappers)\n while isinstance(curr[0], np.ndarray):\n if curr.shape[0] == 1:\n curr = curr[0]\n else:\n curr = np.array(tuple(curr))\n\n return curr", "def basic_array_ejection():\n arr: pa.Array = pa.array([1, 2, 3, 4, 5], type=pa.int8())\n\n srs: pd.Series = arr.to_pandas() # NOTE: Requires pandas installation\n nparr: np.ndarray = arr.to_numpy()\n list_: List[dict] = arr.to_pylist()\n str_: str = arr.to_string()\n\n results = {\n 'to_pandas > to_list': srs.to_list(),\n 'to_numpy > tolist': nparr.tolist(),\n 'to_pylist': list_,\n 'to_string': str_,\n }\n\n pretty_print_result_map(results)", "def encode_byte_array(value: bytes) -> bytes:\n return bytes([]) if isinstance(value, type(None)) else value", "def no_null(x):\n return \"\\0\" not in x", "def test_getArray_success_contain_blanks(self):\n # prepare\n fileName = \"10ContBlanks\"\n expectedResult = [2.0, 3.4, 5.9, 6.5, 12.0, 13.0]\n\n # execute\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n\n # assert\n self.assertTrue(expectedResult, actuatlResponse)", "def convert_nil(self, v, t):\n return relay.Tuple([])", "def remove_NA(d):\r\n if d['label'] == None:\r\n d['label'] = np.array('NA')\r\n if ' ' in d['label']:\r\n d['label'] = \",\".join(sorted(d['label'].split()))\r\n return d", "def decodeUtf8(self, arrayBuffer):", "def decodeUtf8(self, arrayBuffer):", "def remove_emojis(text_array):\n # emoji regular expression #\n\n emoji_pattern = re.compile(\n \"[\"\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F700-\\U0001F77F\" # alchemical symbols\n \"\\U0001F780-\\U0001F7FF\" # Geometric Shapes Extended\n \"\\U0001F800-\\U0001F8FF\" # Supplemental Arrows-C\n \"\\U0001F900-\\U0001F9FF\" # Supplemental Symbols and Pictographs\n \"\\U0001FA00-\\U0001FA6F\" # Chess Symbols\n \"\\U0001FA70-\\U0001FAFF\" # Symbols and Pictographs Extended-A\n \"\\U00002702-\\U000027B0\" # Dingbats\n \"\\U000024C2-\\U0001F251\"\n \"]+\"\n )\n\n # assert(isinstance(text,np.ndarray)) # always feed a numpy array\n\n return np.array([re.sub(emoji_pattern, \"\", str(string)) for string in text_array])", "def sanitise_array(data):\n array = np.array(data)\n\n if array.ndim == 0:\n array = array[np.newaxis, np.newaxis]\n elif array.ndim == 1:\n array = array[:, np.newaxis]\n elif array.ndim != 2:\n raise ValueError(f'Only 1/2 dimensional data can be saved to text files, data.shape = {array.shape}')\n\n return array", "def test_string_to_list_none(self):\n assert_equals(\n str_to_list(None),\n None\n )", "def decode_xarray_bytes(xdf):\n for col in list(xdf):\n if xdf[col].dtype == 'O':\n try:\n xdf[col] = xdf[col].astype(str)\n except:\n xdf[col] = xdf[col].str.decode('cp1252').str.strip()\n return xdf", "def _remove_nulls(self, params):\n\n if params is not None:\n return {key:value for key, value in params.items() if value is not None}\n\n return {}", "def set_nan_as_string(data, replace_str='0'):\n for i, x in enumerate(data):\n for key, value in x.items():\n if value == '':\n x[key] = replace_str\n data[i] = x", "def rstrip(self, chars=None):\n return asarray(rstrip(self, chars))", "def filter_none(elems):\n return [x for x in elems if x is not None]", "def test_string_to_list_none(self):\n\n assert_equals(\n str_to_list(None),\n None\n )", "def string_to_array(s):\n\n if isinstance(s, str):\n out = s.split(\"|\")\n elif math.isnan(s):\n out = []\n else:\n raise ValueError(\"Value must be either string of nan\")\n return out", "def type_array():\n return []", "def strip_null(arg,null=None):\n if null is None:\n null = NULL\n\n if type(arg) is types.ListType:\n return [i for i in arg if i not in null]\n elif type(arg) is types.TupleType:\n return tuple([i for i in arg if i not in null])\n elif type(arg) is type(set()):\n return arg.difference(set(null))\n elif type(arg) is types.DictType:\n return {key:value for key,value in arg.items() if value not in null}\n\n return arg", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def remove_none_from_arrays(self):\r\n\r\n is_nan = numpy.isnan(self.y_values) # array of booleans, element is True if the corresponding element in\r\n # self.y_values is None\r\n\r\n self.x_values = self.x_values[numpy.logical_not(is_nan)]\r\n self.y_values = self.y_values[numpy.logical_not(is_nan)] # replace all None elements\r", "def _asarray(v):\n try:\n return np.asarray(v)\n except ValueError:\n return np.asarray(v, dtype=object)", "def force_utf8(data):\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n elif isinstance(data, list):\n return [force_utf8(i) for i in data]\n elif isinstance(data, dict):\n return {force_utf8(i): force_utf8(data[i]) for i in data}\n return data", "def _parse_array(node, key):\r\n element = node.find(key)\r\n if element is not None:\r\n return element.text.split(',')\r\n else:\r\n return None", "def sanitized(self) -> list:\n\n return [np.NaN if x is NULL else x\n for x in self.parent.values]", "def sanitize_array(array):\n a = np.ravel(array)\n maxi = np.nanmax(a[np.isfinite(a)])\n mini = np.nanmin(a[np.isfinite(a)])\n array[array == float('inf')] = maxi\n array[array == float('-inf')] = mini\n mid = (maxi + mini) / 2\n array[np.isnan(array)] = mid\n return array", "def astype(self, dtype):\n return NoneArray", "def clean_all(text):\n # anticipate Null values in columns that will be cleaned\n if text is not None and type(text) is not float:\n text = \"\".join(text)\n no_ucode = clean_unicode(text)\n no_space = \"\".join(clean_whitespaces(no_ucode.strip()))\n text = no_space.strip()\n\n return text", "def to_xarray(self, searchString, remove_grib=True):\n print('nothing here yet')\n pass", "def pandas_series(arr, nan_to_null=False):\n import pandas as pd\n return pd.Series(arr, copy=False)", "def clean_ext_entry(entry, dtype):\n clean_str = entry[:-1].strip()\n split_str = clean_str.split(';')\n to_type = np.array(split_str).astype(dtype)\n return to_type", "def getStringArray2D(self) -> typing.List[typing.List[str]]:\n ...", "def _strip(obj):\n return obj.translate(None, STRIP_CHARS)", "def nullspace(A):\r\n u = null_space(A)\r\n if (u.size == 0):\r\n return [np.zeros((A.shape[1],), dtype = int)]\r\n else:\r\n return u", "def __build_array(string):\n ar = []\n tmp = string.split('.')\n\n for item in tmp:\n ar.append( item.strip().strip('[').strip(']').strip() )\n\n return ar", "def cleanup_raw_data(buf):\n raw = str(buf, encoding='iso-8859-1').strip()\n records = raw.splitlines()\n return records", "def replace_nan(arr, value):\n arr[np.isnan(arr)] = value\n return arr", "def _curate_data(data):\n new_data = []\n for row in data:\n flag = True\n new_row = []\n for col in row:\n if col == 'NA':\n flag = False\n break\n else:\n new_row.append(ast.literal_eval(col))\n\n if flag:\n new_data.append(new_row)\n\n return np.asarray(new_data)", "def clean_text(lines: np.ndarray) -> np.ndarray:\n clean_lines = np.empty(len(lines), dtype=object)\n for i, line in enumerate(lines):\n tokens = re.sub('[^a-z0-9ąčęėįšųūž ]+', ' ', line.lower()).split(' ')\n stemmed_tokens = [w for w in tokens if len(w) > 2]\n clean_lines[i] = ' '.join(stemmed_tokens)\n return clean_lines", "def sanitized(self) -> list:\n\n return [None if x is NULL else x\n for x in self.parent.values]", "def test_getArray_error_with_empty_value(self):\n\n # prepare\n fileName = \"empty\"\n expectedResult = \"\\n The file is empty \\n\"\n\n # execute\n actuatlResponse = PSPQuickSortInput.getArray(fileName)\n\n # assert\n self.assertTrue(actuatlResponse)", "def _get_blank_value_19(field):\n if field.null:\n return None\n else:\n return ''", "def test_None_to_json_String(self):\n json_string = Base.to_json_string(None)\n self.assertTrue(type(json_string) is str)\n self.assertEqual(json_string, \"[]\")", "def rstrip(a, chars=None):\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))", "def lstrip(a, chars=None):\n a_arr = numpy.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))", "def DISABLEDtest_datatype_sarscapedataarray_string(self):\n input = [ 'foo.bar', 'tea.leaves', 'sar' ]\n\n result = arcpy.qa_envitaskengine_datatype_sarscapedataarray_TEST(input)\n self.assertEqual(input, result[0].split(';'))", "def convert_nulls(dic, null_value):\n for key in dic.iterkeys():\n if dic[key] is None:\n dic[key] = null_value", "def convert_char_array_to_string(char_array):\n str = \"\"\n try:\n return str.join(char_array)\n except:\n return \"Error occured: variable non string\"" ]
[ "0.6056737", "0.60276425", "0.5736321", "0.56652236", "0.5625485", "0.55626565", "0.5436401", "0.5387418", "0.53866494", "0.53788817", "0.53714484", "0.5352443", "0.53399795", "0.53159595", "0.52804863", "0.5268372", "0.52583706", "0.52422833", "0.5236339", "0.5233802", "0.521342", "0.5211816", "0.5208082", "0.52009547", "0.5153881", "0.51484585", "0.51208675", "0.51208156", "0.50835496", "0.5041734", "0.5028336", "0.50153226", "0.4986129", "0.49598753", "0.49550098", "0.49544084", "0.49415466", "0.49132925", "0.49047405", "0.49043187", "0.48941618", "0.4893017", "0.4891233", "0.48842552", "0.48801535", "0.48453894", "0.48440298", "0.48388678", "0.4836567", "0.4825744", "0.4812652", "0.48105982", "0.48103884", "0.48051235", "0.48035115", "0.47763646", "0.47723383", "0.4764799", "0.4764799", "0.47642276", "0.47554228", "0.47505903", "0.47496614", "0.47404933", "0.47404003", "0.47295707", "0.47293895", "0.47210318", "0.47184095", "0.4714266", "0.47086623", "0.4699486", "0.46994343", "0.46945667", "0.4693783", "0.46825644", "0.46736062", "0.46699578", "0.46611744", "0.46354914", "0.46329907", "0.46278542", "0.46242446", "0.4624099", "0.4623975", "0.46153814", "0.46098316", "0.46050698", "0.45962685", "0.45934755", "0.459343", "0.4589849", "0.45878667", "0.45877802", "0.4582678", "0.45790452", "0.45762488", "0.4576194", "0.45737377", "0.45558482" ]
0.5096824
28
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.)
def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool: _, offsets_buf, data_buf = chunk.buffers() offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian offset0 = offsets[chunk.offset] offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk) b = data_buf[offset0:offsetN].to_pybytes() return SCARY_BYTE_REGEX.search(b) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])", "def is_array(self):\n return False", "def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False", "def sanitize_array(array):\n a = np.ravel(array)\n maxi = np.nanmax(a[np.isfinite(a)])\n mini = np.nanmin(a[np.isfinite(a)])\n array[array == float('inf')] = maxi\n array[array == float('-inf')] = mini\n mid = (maxi + mini) / 2\n array[np.isnan(array)] = mid\n return array", "def nonans(array):\n return array[~np.isnan(array)]", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def isfillvalue(a):\n a = numpy.asarray(a)\n if a.dtype.kind == 'i':\n mask = a == -999999999\n elif a.dtype.kind == 'f':\n mask = numpy.isnan(a)\n elif a.dtype.kind == 'S':\n mask = a == ''\n else:\n raise ValueError('Fill value not known for dtype %s' % a.dtype)\n return mask", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def is_sorted_array(arr, increasing=True):\n # If only 1\n if len(arr) == 0:\n return True\n # If multiple values\n if increasing:\n return np.all(np.diff(arr) >= 0)\n else:\n return np.all(np.diff(arr) <= 0)", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')", "def is_array(t):\n return isinstance(t, ast.Array)", "def is_array(self):\n return len(self.descriptor) > 1", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def convert_non_monotonic_to_nan(array):\n keep = np.arange(0, len(array))\n is_monotonic = False\n while not is_monotonic:\n is_monotonic_array = np.hstack(\n (array[keep][1:] >= array[keep][:-1], np.array(True))\n )\n is_monotonic = is_monotonic_array.all()\n keep = keep[is_monotonic_array]\n out_array = np.full_like(array.astype(np.float), np.nan)\n out_array[keep] = array[keep]\n return out_array", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def _autocast_column(data: pyarrow.ChunkedArray) -> pyarrow.ChunkedArray:\n # All-empty (and all-null) columns stay text\n for chunk in data.iterchunks():\n # https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout\n _, offsets_buf, _ = chunk.buffers()\n # If data has an offset, ignore what comes before\n #\n # We don't need to grab the _int_ offset: we can just look at the\n # byte-representation of it.\n offset_0_buf = offsets_buf[chunk.offset * 4 : (chunk.offset + 1) * 4]\n # last offset isn't always the last 4 bytes: there can be padding\n offset_n_buf = offsets_buf[\n (chunk.offset + len(chunk)) * 4 : (chunk.offset + len(chunk) + 1) * 4\n ]\n if offset_0_buf.to_pybytes() != offset_n_buf.to_pybytes():\n # there's at least 1 byte of text. (Assumes the CSV reader doesn't\n # pad the buffer with gibberish.)\n break\n else:\n # there are 0 bytes of text\n return data\n\n # Convert \"\" => null, so pyarrow cast() won't balk at it.\n sane = pyarrow.chunked_array(\n [_nix_utf8_chunk_empty_strings(chunk) for chunk in data.iterchunks()]\n )\n\n for chunk in sane.iterchunks():\n # pyarrow cast() uses double-conversion, so it parses \"NaN\" and \"Inf\"\n # as doubles. Workbench doesn't support NaN or Inf, so don't convert to\n # them.\n if _utf8_chunk_may_contain_inf_or_nan(chunk):\n return data\n\n try:\n numbers = sane.cast(pyarrow.float64())\n except pyarrow.ArrowInvalid:\n # Some string somewhere wasn't a number\n return data\n\n # Test that there's no infinity. We'll use numpy. .to_numpy() with\n # zero_copy_only=False will convert nulls to NaN. That's fine, since we\n # know `numbers` has no NaN values (because `cast()` would have raised\n # rather than return a NaN.)\n for chunk in numbers.iterchunks():\n npchunk = chunk.to_numpy(zero_copy_only=False)\n if np.inf in npchunk or -np.inf in npchunk:\n # Numbers too large\n return data\n\n # Downcast integers, when possible.\n #\n # We even downcast float to int. Workbench semantics say a Number is a\n # Number; so we might as well store it efficiently.\n try:\n # Shrink as far as we can, until pyarrow complains.\n #\n # pyarrow will error \"Floating point value truncated\" if a conversion\n # from float to int would be lossy.\n #\n # We'll return the last _successful_ `numbers` result.\n numbers = numbers.cast(pyarrow.int32())\n numbers = numbers.cast(pyarrow.int16())\n numbers = numbers.cast(pyarrow.int8())\n except pyarrow.ArrowInvalid:\n pass\n\n return numbers", "def _numba_not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:\n diff = np.abs(array - vector)\n for idx in range(array.shape[0]):\n localdiff = np.max(diff[idx, :])\n if localdiff < delta:\n return False\n\n return True", "def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]", "def is_array_type(an_array, atype):\n tmp = [i for i in an_array if not isinstance(i, atype)]\n return len(tmp) == 0", "def isinf(data):\n return _make.isinf(data)", "def __skip_null_values(self, row, index):\n # If there is no value at the row index,\n # Return True\n # Return False if there is a value\n if row[index] == \"\":\n return True\n else:\n return False", "def is_arrayexpress_array(val):\n return arrayexpress_array_regexp.match(val)", "def _convert_data(data, inf_ok=False, min_len=1):\n # If it's scalar, convert to array\n if np.isscalar(data):\n data = np.array([data], dtype=float)\n\n # Convert data to NumPy array\n data = np.array(data, dtype=float)\n\n # Make sure it is 1D\n if len(data.shape) != 1:\n raise RuntimeError(\"Input must be a 1D array or Pandas series.\")\n\n # Remove NaNs\n data = data[~np.isnan(data)]\n\n # Check for infinite entries\n if not inf_ok and np.isinf(data).any():\n raise RuntimeError(\"All entries must be finite.\")\n\n # Check to minimal length\n if len(data) < min_len:\n raise RuntimeError(\n \"Array must have at least {0:d} non-NaN entries.\".format(min_len)\n )\n\n return data", "def isfinite(self):\n return not self.isAny( (lambda x: not np.isfinite(x)) )", "def not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:\n\n if len(array) == 0 or len(vector) == 0:\n return False\n\n try:\n return _numba_not_in_array(vector, array, delta)\n except TypeError:\n diff = np.min(np.max(np.abs(vector - array), axis=1))\n return (diff > delta)", "def _check_nan_array(array):\n # count nan\n mask = np.isnan(array)\n x = mask.sum()\n\n # check the NaN values of the array\n if x > 0:\n raise ValueError(\"Array has {0} NaN values.\".format(x))", "def is_pointless(self):\n return self._format() in ('[0, ]', '[, ]', '(-inf, +inf)')", "def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def _has_unicode_fields(array):\n dtypes = (d[0] for d in array.dtype.fields.values())\n return any(d.kind == \"U\" for d in dtypes)", "def really1d(arr):\n if np.ndim(arr) != 1:\n return False\n # Empty list or array\n if len(arr) == 0:\n return True\n if np.any(np.vectorize(np.ndim)(arr)):\n return False\n return True", "def canMakeArithmeticProgression(arr): \n new_arr = sorted(arr)\n diff = new_arr[1] - new_arr[0]\n for idx, num in enumerate(new_arr):\n if idx == 0:\n pass\n elif num - new_arr[idx - 1] != diff:\n return False\n return True", "def not_a_num(val):\n if math.isnan(val):\n return False\n else:\n return True", "def any_allowed_skipna_inferred_dtype(request):\n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def _mixed_precision_enabled_for_buffers(self) -> bool:\n return self.mixed_precision.buffer_dtype is not None", "def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity", "def mono(arr, type='g', axis=-1):\n arr = np.atleast_1d(arr)\n if arr.size == 1: return True\n good_type = ['g', 'ge', 'l', 'le', 'e']\n assert type in good_type, \"Type '%s' Unrecognized.\" % (type)\n # Retrieve the numpy comparison function (e.g. np.greater) for the given `type` (e.g. 'g')\n func = _comparisonFunction(type)\n delta = np.diff(arr, axis=axis)\n retval = np.all(func(delta, 0.0))\n return retval", "def IsMissingField(field, arr):\n\tfor a in arr:\n\t\ttemp = a[0:3]\n\t\tif temp == field:\n\t\t\treturn False\n\treturn True", "def can_insert(data):\n if not isinstance(data, np.ndarray):\n return False\n if data.dtype.char in UNSUPPORTED_NUMERIC_TYPE_CODES:\n return False\n return np.issubdtype(data.dtype, np.number)", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def enforce_dtype(arr, dtype, msg=\"\"):\n if isinstance(arr, np.ndarray):\n if arr.dtype is not np.dtype(dtype):\n log_debug(\"enforcing dtype for array %s %s\" % (str(arr.dtype), msg))\n return np.array(arr, dtype)\n return arr", "def is_integer(value: Union[float, np.ndarray]) -> bool:\n if type(value) == np.ndarray:\n for entry in value:\n result = Comparator.is_integer(entry)\n if not result:\n return False\n return True\n else:\n value = abs(value)\n value -= int(value)\n if value > 0.5:\n return Comparator.is_close_to_zero(1 - value)\n return Comparator.is_close_to_zero(value)", "def _chk_asarray(a, axis):\r\n if axis is None:\r\n a = ravel(a)\r\n outaxis = 0\r\n else:\r\n a = asarray(a)\r\n outaxis = axis\r\n return a, outaxis", "def data_missing_for_sorting(allow_in_pandas):\n return PandasArray(\n np.array([(1,), np.nan, (0,)])\n )", "def is_normalized(self) -> bool:\n return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)", "def _infer_fill_value(val):\n\n if not is_list_like(val):\n val = [val]\n val = np.array(val, copy=False)\n if is_datetimelike(val):\n return np.array('NaT', dtype=val.dtype)\n elif is_object_dtype(val.dtype):\n dtype = lib.infer_dtype(_ensure_object(val))\n if dtype in ['datetime', 'datetime64']:\n return np.array('NaT', dtype=_NS_DTYPE)\n elif dtype in ['timedelta', 'timedelta64']:\n return np.array('NaT', dtype=_TD_DTYPE)\n return np.nan", "def is_array(type):\n nake_type = remove_alias(type)\n nake_type = remove_reference(nake_type)\n nake_type = remove_cv(nake_type)\n return isinstance(nake_type, cpptypes.array_t)", "def is_not_constant(series: np.ndarray) -> bool:\n #print(\"enter bartpy/bartpy/data.py is_not_constant\")\n \n if len(series) <= 1:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False\n first_value = None\n for i in range(1, len(series)):\n # if not series.mask[i] and series.data[i] != first_value:\n if series[i] != first_value:\n if first_value is None:\n first_value = series.data[i]\n else:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return True\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False", "def ReplaceInvalid(arr, max_value=None):\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings('ignore')\n arr[arr < 0.0] = np.nan\n if max_value:\n arr[arr > max_value] = np.nan", "def pandas_series(arr, nan_to_null=False):\n import pandas as pd\n return pd.Series(arr, copy=False)", "def is_real(self) -> np.ndarray:\n return np.all(np.isclose(self.v, np.zeros_like(self.v)), axis=1)", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def test_nan_keyword(self):\n # If array has any nan's then the output will return all zeros\n array = self.array1.copy()\n array[0,0] = numpy.nan\n byt = bytscl(array, nan=True)\n total = numpy.sum(byt)\n self.assertTrue(total != 0)", "def _maybe_fill(arr, fill_value=np.nan):\n if _is_na_compat(arr, fill_value):\n arr.fill(fill_value)\n return arr", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def isfinite(data):\n return _make.isfinite(data)", "def _check_all_finite(X):\n # First try an O(n) time, O(1) space solution for the common case that\n # everything is finite; fall back to O(n) space np.isfinite to prevent\n # false positives from overflow in sum method.\n try:\n if (X.dtype.char in np.typecodes['AllFloat'] and not\n np.isfinite(X.sum()) and not np.isfinite(X).all()):\n return False\n else:\n return True\n\n except Exception as e:\n warnings.warn('Could not check array for all finite. Ensure X is an'\n 'array type, and consider converting to an ndarray or'\n 'scipy sparse array. Details:\\n%r' % e, InputDataWarning)", "def test_differencer_remove_missing_false(y, lags, na_handling, index_type):\n if index_type == \"int\":\n y = y.reset_index(drop=True)\n\n transformer = Differencer(lags=lags, na_handling=na_handling)\n y_transform = transformer.fit_transform(y)\n\n # if na_handling is fill_zero, get rid of the zeros for reconstruction\n if na_handling == \"fill_zero\":\n y_transform = y_transform[24:]\n y = y[24:]\n\n y_reconstructed = transformer.inverse_transform(y_transform)\n\n _assert_array_almost_equal(y, y_reconstructed)", "def _round_if_needed(arr, dtype):\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)", "def filter_lorentz(lorentz_array):\n if lorentz_array is None:\n return False\n else:\n return lorentz_array.shape[1] == 4", "def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64", "def isna(self):\n size = len(self.data)\n isnull_byte_map = np.zeros(size, dtype=bool)\n for i in range(size):\n if self.data[i] is None:\n isnull_byte_map[i] = 1\n\n return isnull_byte_map", "def isscalar(self):\n return not self.axes", "def nozero(arr):\n vals=sorted(list(set(np.array(arr).flatten())))\n if vals[0]<0:\n print(\"correcting for div/zero by replacing 0 with\",vals[1])\n arr[arr==0]=vals[1]\n return arr", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def pd_isnan(val):\n return val is None or val != val", "def can_insert(data):\n types = (float, complex, int, np.long)\n if isinstance(data, types) and not isinstance(data, bool):\n return True\n elif isinstance(data, np.number):\n return data.dtype.char not in UNSUPPORTED_NUMERIC_TYPE_CODES", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def thin_array(arr):\n # Algorithm: throw away any 1 which is at the corner of a 2x2 square of 1s.\n changed = True\n while changed:\n changed = False\n for r, row in enumerate(arr):\n if r == 0 or r == len(arr) - 1:\n continue\n for c, cell in enumerate(row):\n # If at the top-left corner of a 2x2 square of 1s:\n if c == 0 or c == len(row) - 1:\n continue\n if cell == 0:\n continue\n if row[c-1] == 0 and row[c+1] == 1 and cell == 1:\n if arr[r-1][c] == 0 and arr[r+1][c] == 1:\n if arr[r+1][c+1] == 1:\n # bonanza!\n arr[r][c] = 0\n changed = True\n print('\\n'.join(str_row(row) for row in arr))\n\n # Algorithm: if a 1 is surrounded by four 0's, keep it.\n # If a 1 is surrounded by three 0's, keep it.\n # If a 1 is surrounded by two 0's, at opposite edges, keep it.\n # If a 1 is next to only one 0,\n # If a 1 is entirely surrounded by 1's, keep it.", "def is_null(self):\n return self.begin >= self.end", "def is_null(self):\n return self.begin >= self.end", "def fixval(arr, repval, retarr=False):\n # 2009-09-02 14:07 IJC: Created\n # 2012-12-23 11:49 IJMC: Halved run time.\n\n if retarr:\n arr2 = arr.ravel().copy()\n else:\n arr2 = arr.ravel()\n\n finiteIndex = np.isfinite(arr2)\n if not finiteIndex.any():\n badIndex = find((1-finiteIndex))\n arr2[badIndex] = repval\n\n if retarr:\n return arr2.reshape(arr.shape)\n else:\n return", "def is_continuous(series: List) -> bool:\n\n if series.dtype in [\n np.int16,\n np.int32,\n np.int64,\n np.float16,\n np.float32,\n np.float64,\n int,\n float,\n ]:\n if (\n len(series.astype(int).unique()) / len(series) == 1\n or \"id\" == series.name.lower()\n ):\n return False\n\n elif sorted(series.unique()) == [0, 1]:\n return False\n elif len(series.unique()) == 1:\n return False\n\n else:\n return True\n else:\n\n return False", "def trajectory_is_finite(trajectory):\n\tfor point in trajectory.points:\n\t\tfor position in point.positions:\n\t\t\tif math.isinf(position) or math.isnan(position):\n\t\t\t\treturn False\n\t\tfor velocity in point.velocities:\n\t\t\tif math.isinf(velocity) or math.isnan(velocity):\n\t\t\t\treturn False\n\treturn True", "def is_nan(self, row_data):\n return math.isnan(row_data)", "def arrNotInArrList(arr, arrList):\n a = np.array(arr)\n for item in arrList:\n item = np.array(item)\n if np.array_equiv(item, a):\n return False\n return True", "def _is_1d_varray(arr):\r\n return len(arr.shape) < 2 or arr.shape[1] == 1", "def check_np_array_nan(func):\r\n\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n result = func(*args, **kwargs)\r\n if type(result) in [tuple, list]:\r\n count = 0\r\n for an_array in result:\r\n if type(an_array) is dict:\r\n for key in an_array:\r\n if np.isnan(an_array[key]).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th dict are:\\n\"\r\n )\r\n hydro_logger.warning(\"value of \" + key + \":\\n\")\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array[key])))\r\n else:\r\n if np.isnan(an_array).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th array are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array)))\r\n count = count + 1\r\n elif type(result) is np.array:\r\n if np.isnan(result).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(result)))\r\n return result\r\n\r\n return wrapper", "def isscalar(self):\n return not bool(self.shape)", "def locate_nan_rows(arr):\n # Count the number of NaNs in each row\n nan_counts = np.sum(~np.isfinite(arr), axis=1)\n # Trigger on a NaN appearing anywhere in a line/row\n nans, = np.where(nan_counts > 1)\n return frozenset(nans)", "def _use_arrow() -> bool:\n # Explicitly coerce to bool here because mypy is (incorrectly) complaining\n # that we're trying to return 'Any'.\n return bool(config.get_option(\"global.dataFrameSerialization\") == \"arrow\")", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def is_mountain_array(self, a):\r\n n = len(a)\r\n if n < 3:\r\n return False\r\n # Invalidate monotonic slopes\r\n elif (a[0] > a[1] or\r\n a[n - 2] < a[n - 1]):\r\n return False\r\n\r\n p = None\r\n for i in range(0, n - 1):\r\n\r\n # Search for local maxima\r\n if p is None:\r\n if a[i] > a[i + 1]:\r\n p = i\r\n if a[i] == a[i + 1]:\r\n return False\r\n\r\n # Confirm maxima as global maxima\r\n else:\r\n if a[i] <= a[i + 1]:\r\n return False\r\n\r\n return True", "def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )" ]
[ "0.63142204", "0.59511065", "0.59251046", "0.5863669", "0.5700599", "0.5661153", "0.5581066", "0.54970616", "0.54685277", "0.54147017", "0.53897524", "0.5384138", "0.53668594", "0.5293467", "0.52856606", "0.527953", "0.5257239", "0.5248469", "0.5248469", "0.5215622", "0.5214552", "0.5208751", "0.5203614", "0.5202484", "0.5198838", "0.5193919", "0.5189366", "0.5188746", "0.5166378", "0.5151065", "0.5150715", "0.5113811", "0.51121247", "0.5092918", "0.50574017", "0.5031536", "0.5023751", "0.4974188", "0.49710858", "0.49501473", "0.4941934", "0.4941791", "0.4908937", "0.48817545", "0.4858084", "0.48401442", "0.4836846", "0.48362005", "0.48349297", "0.4833922", "0.4827712", "0.4827587", "0.481302", "0.4797446", "0.47908837", "0.4780888", "0.47779173", "0.4773159", "0.4771351", "0.47478172", "0.47465932", "0.47408104", "0.47364107", "0.47230977", "0.47213367", "0.4718314", "0.47170666", "0.471318", "0.46990225", "0.4698543", "0.46934554", "0.46915248", "0.46872962", "0.46798098", "0.46760356", "0.4674317", "0.46735588", "0.46707052", "0.46660113", "0.46627653", "0.4660738", "0.4656856", "0.46536893", "0.4647143", "0.4638803", "0.46259135", "0.46259135", "0.4623568", "0.4622985", "0.46203443", "0.46158746", "0.46080184", "0.45849746", "0.45835337", "0.45819953", "0.4581449", "0.4579174", "0.45780462", "0.45772702", "0.45746973" ]
0.60420185
1
Convert `data` to float64 or int(64|32|16|8); as fallback, return `data`. Assume `data` is of type `utf8` or a dictionary of utf8.
def _autocast_column(data: pyarrow.ChunkedArray) -> pyarrow.ChunkedArray: # All-empty (and all-null) columns stay text for chunk in data.iterchunks(): # https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout _, offsets_buf, _ = chunk.buffers() # If data has an offset, ignore what comes before # # We don't need to grab the _int_ offset: we can just look at the # byte-representation of it. offset_0_buf = offsets_buf[chunk.offset * 4 : (chunk.offset + 1) * 4] # last offset isn't always the last 4 bytes: there can be padding offset_n_buf = offsets_buf[ (chunk.offset + len(chunk)) * 4 : (chunk.offset + len(chunk) + 1) * 4 ] if offset_0_buf.to_pybytes() != offset_n_buf.to_pybytes(): # there's at least 1 byte of text. (Assumes the CSV reader doesn't # pad the buffer with gibberish.) break else: # there are 0 bytes of text return data # Convert "" => null, so pyarrow cast() won't balk at it. sane = pyarrow.chunked_array( [_nix_utf8_chunk_empty_strings(chunk) for chunk in data.iterchunks()] ) for chunk in sane.iterchunks(): # pyarrow cast() uses double-conversion, so it parses "NaN" and "Inf" # as doubles. Workbench doesn't support NaN or Inf, so don't convert to # them. if _utf8_chunk_may_contain_inf_or_nan(chunk): return data try: numbers = sane.cast(pyarrow.float64()) except pyarrow.ArrowInvalid: # Some string somewhere wasn't a number return data # Test that there's no infinity. We'll use numpy. .to_numpy() with # zero_copy_only=False will convert nulls to NaN. That's fine, since we # know `numbers` has no NaN values (because `cast()` would have raised # rather than return a NaN.) for chunk in numbers.iterchunks(): npchunk = chunk.to_numpy(zero_copy_only=False) if np.inf in npchunk or -np.inf in npchunk: # Numbers too large return data # Downcast integers, when possible. # # We even downcast float to int. Workbench semantics say a Number is a # Number; so we might as well store it efficiently. try: # Shrink as far as we can, until pyarrow complains. # # pyarrow will error "Floating point value truncated" if a conversion # from float to int would be lossy. # # We'll return the last _successful_ `numbers` result. numbers = numbers.cast(pyarrow.int32()) numbers = numbers.cast(pyarrow.int16()) numbers = numbers.cast(pyarrow.int8()) except pyarrow.ArrowInvalid: pass return numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def try_float(data):\n try:\n return float(data)\n except (ValueError, TypeError ):\n return data", "def float_from_string(data):\n return float(maybe_number(data))", "def _float(data):\n try:\n return float(data)\n except ValueError as err:\n if data in ('None', 'NA', 'nan'):\n return nan\n else:\n raise ValueError(err)", "def read_float(data):\n s_type = \"=%s\" % get_type(\"float\")\n return struct.unpack(s_type, data.read(4))[0]", "def try_int(data):\n try:\n return int(data)\n except (ValueError, TypeError ):\n return data", "def convert_data_type(value: str):\n try:\n f = float(value)\n if f.is_integer():\n return int(f)\n return f\n except ValueError:\n return value", "def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data", "def castData(data, type='int64'):\n data = data.astype(type)\n return data", "def FixFloats(Data):\n\n if isinstance(Data, dict):\n return { k: FixFloats(v) for k, v in Data.items() }\n\n if isinstance(Data, list):\n return [ FixFloats(v) for v in Data ]\n\n if isinstance(Data, float): # Only floats being changed here\n return str(Data)\n\n return Data", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def to_float(data, **kwargs):\n return Component(\n \"ToFloat\",\n arguments={\n 'data': Component.of(data)\n },\n options={\n \n },\n constraints=kwargs)", "def replace_comma(data):\n try:\n aux = np.float32(data.replace(',', '.'))\n except AttributeError as err:\n print('No string. Convert to numeric')\n aux = np.float32(data)\n return aux", "def _maybe_convert_to_number(v: Any) -> Any:\n try:\n return int(v)\n except Exception:\n pass\n\n try:\n return float(v)\n except Exception:\n pass\n\n return v", "def _fixtypes(data):\n if 'probability' in data:\n data['probability'] = float(data['probability'])\n return data", "def convertData(data):\n\n return data", "def _convert_python_data(data):\n if isinstance(data, Tensor) and data.adapter_flag:\n return ms_adapter_registry.tensor(data)\n if isinstance(data, Tensor) and not isinstance(data, PythonTensor):\n return PythonTensor(data, internal=True)\n if isinstance(data, CSRTensor) and not isinstance(data, PythonCSRTensor):\n return PythonCSRTensor(csr_tensor=data)\n if isinstance(data, COOTensor) and not isinstance(data, PythonCOOTensor):\n return PythonCOOTensor(coo_tensor=data)\n if isinstance(data, RowTensor) and not isinstance(data, PythonRowTensor):\n return PythonRowTensor(row_tensor=data)\n if isinstance(data, tuple):\n return tuple(_convert_python_data(x) for x in data)\n if isinstance(data, list):\n # Keep list object not change for inplace operation.\n for i in range(len(data)):\n data[i] = _convert_python_data(data[i])\n return data\n if isinstance(data, dict):\n return dict((_convert_python_data(key), _convert_python_data(value)) for key, value in data.items())\n return data", "def canonicalize(data):\n\n if isinstance(data, np.ndarray):\n if data.dtype == np.uint8:\n data = data.astype(np.float32) / 255.0\n data = torch.from_numpy(np.ascontiguousarray(data))\n\n elif isinstance(data, torch.Tensor):\n if data.dtype == torch.uint8:\n data = data.float() / 255.0\n\n else:\n raise NotImplementedError()\n\n return data", "def float_to_int(data, digits=None, dtype=np.int32):\n # convert to any numpy array\n data = np.asanyarray(data)\n\n # if data is already an integer or boolean we're done\n # if the data is empty we are also done\n if data.dtype.kind in 'ib' or data.size == 0:\n return data.astype(dtype)\n\n # populate digits from kwargs\n if digits is None:\n digits = decimal_to_digits(1e-8)\n elif isinstance(digits, float) or isinstance(digits, np.float):\n digits = decimal_to_digits(digits)\n elif not (isinstance(digits, int) or isinstance(digits, np.integer)):\n log.warn('Digits were passed as %s!', digits.__class__.__name__)\n raise ValueError('Digits must be None, int, or float!')\n\n # data is float so convert to large integers\n data_max = np.abs(data).max() * 10**digits\n # ignore passed dtype if we have something large\n dtype = [np.int32, np.int64][int(data_max > 2**31)]\n # multiply by requested power of ten\n # then subtract small epsilon to avoid \"go either way\" rounding\n # then do the rounding and convert to integer\n as_int = np.round((data * 10 ** digits) - 1e-6).astype(dtype)\n\n return as_int", "def toInteger(data):\n\tif isInteger(data):\n\t\treturn data\n\telse:\n\t\treturn ord(data)", "def _decode_35708(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29974:\n start_byte += n_bytes\n n_bytes = 4\n var_size = struct.unpack('<I', data[start_byte:\n start_byte + n_bytes])[0]\n start_byte += n_bytes\n n_bytes = var_size\n\n return np.frombuffer(data[start_byte:start_byte + n_bytes],\n dtype=np.float64)", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def _convert_to_float(s):\n try:\n return float(s)\n except:\n return s", "def format_float(data):\n try:\n return format(float(data), '.1f')\n except:\n return None", "def convert(data):\n result = None\n if isinstance(data, bytes):\n try:\n result = json.loads(data.decode(CODING))\n except:\n print('wrong data format!')\n finally:\n return result\n elif isinstance(data, dict):\n try:\n result = json.dumps(data).encode(CODING)\n except:\n print('wrong data format!')\n finally:\n return result\n else:\n print('wrong data format!')\n return result", "def read_double(data):\n s_type = \"=%s\" % get_type(\"double\")\n return struct.unpack(s_type, data.read(8))[0]", "def optimal_data_mode(data: str) -> DATA_MODE:\n numeric_regex = r\"^\\d+$\"\n alphanum_regex = r\"^[A-Z0-9 $%*+\\-./:]+$\"\n\n if re.match(numeric_regex, data):\n return DATA_MODE.Numeric\n elif re.match(alphanum_regex, data):\n return DATA_MODE.Alphanumeric\n\n # check that we can encode in iso8859 (if not, exit program until ECI is implemented)\n try:\n data.encode(\"iso8859\")\n return DATA_MODE.Byte\n except UnicodeEncodeError:\n print(\"Error: Invalid character(s) in string\")\n exit(1)", "def _convert_data(data, inf_ok=False, min_len=1):\n # If it's scalar, convert to array\n if np.isscalar(data):\n data = np.array([data], dtype=float)\n\n # Convert data to NumPy array\n data = np.array(data, dtype=float)\n\n # Make sure it is 1D\n if len(data.shape) != 1:\n raise RuntimeError(\"Input must be a 1D array or Pandas series.\")\n\n # Remove NaNs\n data = data[~np.isnan(data)]\n\n # Check for infinite entries\n if not inf_ok and np.isinf(data).any():\n raise RuntimeError(\"All entries must be finite.\")\n\n # Check to minimal length\n if len(data) < min_len:\n raise RuntimeError(\n \"Array must have at least {0:d} non-NaN entries.\".format(min_len)\n )\n\n return data", "def convert_to_float(s):\n try:\n return float(s)\n except TypeError:\n return None", "def _ensure_number(value):\n assert isinstance(value, (bytes, float, int)), \"value has to be either bytes or float or int\"\n\n return int.from_bytes(value, byteorder=\"big\") if type(value) is bytes else value", "def convertFloat(s):\n try:\n float(s)\n return \"FLOAT\"\n except:\n return s", "def _convert_data(self, data):\n if isinstance(data, np.ndarray):\n data = data.astype(float_save(), copy=False)\n if self.compress:\n return KaldiCompressedMatrix.compress(data, self.compression_method)\n return KaldiMatrix(data)\n\n if isinstance(data, KaldiMatrix):\n if self.compress:\n return KaldiCompressedMatrix.compress(data, self.compression_method)\n return data\n\n if isinstance(data, KaldiCompressedMatrix):\n if not self.compress:\n return data.to_matrix()\n return data\n\n raise ValueError(\"Data is not ndarray or KaldiMatrix\")", "def data_to_num3072(data):\n bytes384 = chacha20_32_to_384(data)\n return int.from_bytes(bytes384, 'little')", "def _deserialize_primitive(data, klass):\n try:\n value = klass(data)\n except UnicodeEncodeError:\n value = unicode(data)\n except TypeError:\n value = data\n return value", "def _cast(d):\n if d[u\"type\"] in (u\"uri\", u\"bnode\", u\"literal\"):\n return d[u\"value\"]\n elif d[u\"type\"] == u\"typed-literal\":\n if d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#integer\":\n return int(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#float\":\n return float(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#double\":\n return float(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#integer\":\n return d[u\"value\"]\n raise NotImplementedError(\"can not cast '{}'\".format(d.items()))", "def convert_float(value):\n if isinstance(value, bytes):\n value = value.decode(\"utf-8\")\n return float(value)", "def tryConvertToFloat(obj: Any) -> float:\n\n try:\n return float(obj)\n except:\n return None", "def round_fp(data):\n dtype = type(data)\n if dtype == float:\n return round(data, 8)\n elif dtype == np.ndarray:\n return np.round(data, 8)\n elif dtype == dict:\n for key, value in data.items():\n data[key] = round_fp(value)\n elif dtype == list:\n return [round_fp(item) for item in data]\n return data", "def data_to_int(data): \r\n data = str(data).strip().upper()\r\n if data[0]== 'B':\r\n return bin_to_int(data[1:])\r\n elif data[0]== 'H':\r\n return hex_to_int(data[1:])\r\n else:\r\n return int(data, 10)", "def _to_cpu(data: Any) -> Any:\n if isinstance(data, (Tensor, BaseDataElement)):\n return data.to('cpu')\n elif isinstance(data, list):\n return [_to_cpu(d) for d in data]\n elif isinstance(data, tuple):\n return tuple(_to_cpu(d) for d in data)\n elif isinstance(data, dict):\n return {k: _to_cpu(v) for k, v in data.items()}\n else:\n return data", "def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64", "def parse_number_auto_dtype(x: str) -> Union[int, float]:\n\n value: Union[int, float] = float(x)\n\n if value.is_integer():\n value = int(value)\n\n return value", "def try_int_or_float(value: Any) -> Union[int, float, None]:\n return try_int(str(value)) or try_float(str(value))", "def safe_convert(value_to_convert: str, type_to_convert_to: type) -> Optional[Union[int, float]]:\n if value_to_convert is None:\n return None\n else:\n return type_to_convert_to(value_to_convert)", "def _force_unicode(data):\n try:\n data = unicode(data, \"utf-8\")\n except UnicodeDecodeError:\n data = unicode(data, \"latin1\")\n return data", "def maybe_float(v):\n try:\n return float(v)\n except ValueError:\n return v", "def decode(val):\n if isinstance(val, Decimal):\n return float(val)\n return val", "def _readFloat(self, rawData, offset=0):\n val, = unpack(\n self.floatFormat, rawData[\n offset:offset + self.floatFormatLen])\n \n return val", "def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32", "def data_normalize (self, data):\r\n data = data + (2**15)\r\n data = data / ((2**16) - 1)\r\n data = 2 * data\r\n data = data - 1\r\n\r\n return data", "def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32", "def get_data_float(self):\n if self.__data is None:\n return None\n return self.__data.copy()", "def as_int_or_float(val):\n if type(val) == str:\n ret_val = float(val) if '.' in val else int(val)\n return ret_val\n return val", "def cast_string_to_python_type(string):\n try:\n if string is None:\n return None\n return int(string)\n except:\n try:\n return clean_float(string)\n except ValueError:\n if string == '':\n return None\n else:\n return string", "def _process(proc_data):\n for key in proc_data:\n try:\n proc_data[key] = int(proc_data[key])\n except (ValueError):\n try:\n proc_data[key] = float(proc_data[key])\n except (ValueError):\n pass\n return proc_data", "def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def _cast_unsupported_dtypes(tensor):\n\n if tensor.dtype.__eq__(dtypes.int64):\n # outside-compilation doesn't support int64 input yet.\n return math_ops.cast(tensor, dtypes.int32)\n if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(\n dtypes.float16):\n # Since host can't handle bf16, convert tensor to f32.\n return math_ops.cast(tensor, dtypes.float32)\n return tensor", "def decode_data(data):\n try:\n return json_decode(base64.b64decode(data))\n except:\n return None", "def _get_rawvalue_value(data, encoding=None):\n\n try:\n if encoding is None or encoding == \"\":\n return data\n elif encoding == 'base64':\n return base64.b64decode(data)\n return base64.b64decode(data)\n except:\n return data", "def guessDataType(value):\n try: # see if the element is a float()\n if \".\" in value: # if no decimal point, prefer to save as a int.\n return(float(value))\n else:\n raise ValueError\n except ValueError:\n try: # see if it's actually an int?\n return(int(value))\n except ValueError:\n try: # see if I can cooerce it into a location:\n return(location(loc=value))\n except (TypeError, IndexError, AttributeError, AssertionError, ValueError): # this is not working, just store it as a string\n return(str(value))", "def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result", "def unpack_int64(data):\n value = unpack(DecodeUtils.INT64_BYTE_FORMAT, data[:8])[0]\n return value, 8", "def ensure_int(cls, data):\n if data is None: # Failed to fetch attribute. no need to convert.\n return None\n\n # For some reason bool is considered an integral type. We want to avoid True\n # to be converted to 1, and False to 0 on numeric matchers since it can be\n # misleading.\n if isinstance(data, numbers.Integral) and not isinstance(data, bool):\n return data\n\n if not isinstance(data, str):\n _LOGGER.error('Cannot convert %s to int. Failing.', type(data))\n return None\n\n _LOGGER.warning(\n 'Supplied attribute is of type %s and should have been an int. ',\n type(data)\n )\n\n try:\n return int(data)\n except ValueError:\n _LOGGER.error('Cannot convert %s to int. Failing.', type(data))\n return None", "def tryFloat(value):\n try:\n return float(value)\n except:\n return value", "def float_or_none(s):\n if s:\n return float(s)", "def check_type_force_float(x, name):\n if type(x) is int:\n return float(x)\n elif type(x) is not float and type(x) is not numpy.float64:\n raise TypeError(\"%r should be a float\" % (name,))\n else:\n return x", "def get_float_data(d, name):\n data = list(d[name])\n while '' in data:\n data.remove('')\n\n return list(map(float, data))", "def convert_str_float(x):\n\ttry:\n\t\treturn float(x)\n\texcept ValueError:\n\t\tprint(\"must be a number\")", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def parse_number_auto_dtype(x):\n\n value = float(x)\n\n if value.is_integer():\n value = int(value)\n\n return value", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent", "def _decode_value(data):\n\n # since Value messages contain only one value, it didn't seem necessary to\n # represent it as a Python class.\n\n msg = Message(data)\n value = None\n count = 0\n\n for field in msg:\n count += 1\n if field.tag == ValueTags.STRING:\n value = field.as_string()\n\n elif field.tag == ValueTags.FLOAT:\n value = field.as_float()\n\n elif field.tag == ValueTags.DOUBLE:\n value = field.as_double()\n\n elif field.tag == ValueTags.INT64:\n value = field.as_int64()\n\n elif field.tag == ValueTags.UINT64:\n value = field.as_uint64()\n\n elif field.tag == ValueTags.SINT64:\n value = field.as_sint64()\n\n elif field.tag == ValueTags.BOOL:\n value = field.as_int32() != 0\n\n else:\n raise ValueError('Unexpected tag %d while decoding value'\n % (field.tag,))\n\n # the MVT spec says that there should be one and only one field in the\n # Value message, so check for that.\n if value is None:\n raise ValueError('Found no fields when decoding value')\n if count > 1:\n raise ValueError('Found multiple fields when decoding value')\n\n return value", "def percent_decode(data):\n if data is None:\n return None\n return unquote(xstr(data))", "def to_numeric(s):\n\n try:\n s = float(s)\n except Exception as e:\n log.debug('Caught `{e!s}` trying to cast {s!r} to numeric'.format(**locals()))\n pass\n return s", "def _decode_binary(data):\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError: # pragma: no cover\n # for data written an upstream java App\n data = data.decode('latin-1')\n return data", "def str2type(raw_val):\n try:\n return int(raw_val)\n except ValueError:\n pass\n\n try:\n return float(raw_val)\n except ValueError:\n return raw_val", "def _to_num(self, value):\n output = {}\n # actually integer values\n if isinstance(value, (int, long)):\n output[\"convert\"] = True\n output[\"value\"] = value\n output[\"type\"] = type(value)\n # some floats can be converted without loss\n elif isinstance(value, float):\n output[\"convert\"] = (int(value) == float(value))\n output[\"value\"] = value\n output[\"type\"] = type(value)\n # we can't convert nonetypes\n elif isinstance(value, types.NoneType):\n output[\"convert\"] = False\n output[\"value\"] = None\n output[\"type\"] = type(value)\n # we can't convert non-string\n elif not isinstance(value, basestring):\n output[\"convert\"] = False\n output[\"value\"] = \"Nonstring\"\n output[\"type\"] = type(value)\n else:\n value = value.strip()\n try:\n # try to convert value to float\n float_value = float(value)\n output[\"convert\"] = True\n output[\"value\"] = float_value\n output[\"type\"] = type(float_value)\n except ValueError:\n # if fails try to convert value to int\n try:\n int_value = int(value)\n output[\"convert\"] = True\n output[\"value\"] = int_value\n output[\"type\"] = type(int_value)\n # if fails it's a string\n except ValueError:\n output[\"convert\"] = False\n output[\"value\"] = None\n output[\"type\"] = type(value)\n return output", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent", "def XPLMGetDataf(inDataRef):\n return float", "def to_image_data(data):\n \n # removing image\n if not data:\n return u''\n\n # image path (not changed)\n if data[0:5] != u'data:':\n return None\n \n # TODO: better MIME handling\n mime = data[5:data.index(u';')].lower()\n img = data[data.index(u',') + 1:].decode('base64')\n \n return mime, img", "def get_data_type(params):\n return tf.float16 if params.use_fp16 else tf.float32", "def to_float(s):\n try:\n return float(s)\n except ValueError:\n return np.nan", "def to_float(x, key):\n x = x.strip()\n if not x or x in ('NA', 'n/a'):\n return None\n if '.' in x:\n # There are '.'s, so commas are placeholders\n x = x.replace(',', '') \n if x.endswith('ft'):\n scale = 0.3048\n x = x[:-2].strip()\n else:\n scale = 1 \n try:\n return scale * float(x)\n except:\n logging.warn('Could not convert %s value %s to float', key, x)\n return None", "def _as_float(self, name):\n org_type = self._get_type(name)\n if org_type == 'float': return None\n valid = ['single', 'int']\n is_num_str = self.is_like_numeric(name) if org_type == 'string' else False\n if not (org_type in valid or is_num_str):\n msg = 'Cannot convert variable {} of type {} to float!'\n raise TypeError(msg.format(name, org_type))\n if org_type == 'single':\n self._as_int(name)\n if org_type in ['int', 'string']:\n self._meta['columns'][name]['type'] = 'float'\n if org_type == 'int':\n self._data[name] = self._data[name].apply(\n lambda x: float(x) if not np.isnan(x) else np.NaN)\n elif org_type == 'string':\n self._data[name] = self._data[name].apply(lambda x: float(x))\n return None", "def read_int(data):\n s_type = \"=%s\" % get_type(\"int\")\n return struct.unpack(s_type, data.read(4))[0]", "def to_data_type(data_type, value):\n if data_type in ['Decimal256(0)']:\n return f'toDecimal256(\\'{value}\\',0)'\n\n else:\n return f'to{data_type}(\\'{value}\\')'", "def interpret_packet_value_pair(data):\n if data is None:\n return None, None\n packet_type = int.from_bytes(data[3:4], 'little')\n name = value = None\n if packet_type == 1:\n name = str(data[17:], 'utf8') \n value = float(ustruct.unpack('<i', data[12:16])[0])\n elif packet_type == 5:\n name = str(data[21:29], 'ascii').strip()\n value = ustruct.unpack('<d', data[12:20])[0]\n else:\n display.scroll('Packet type {} not recognised'.format(packet_type))\n return name, value", "def format_number(data):\n # strip commas and leading/trailing spaces\n data = data.strip().replace(',', '').replace('$', '')\n try:\n return int(data)\n except:\n return None", "def pack_data(self, data):\n if type(data) is bool or str(data).isnumeric():\n return data\n return str(data)", "def _check_data(data, dset_attrs):\n dtype = dset_attrs['dtype']\n float_to_int = (np.issubdtype(dtype, np.integer)\n and np.issubdtype(data.dtype, np.floating))\n if float_to_int:\n attrs = dset_attrs['attrs']\n scale_factor = [c for c in attrs if 'scale_factor' in c][0]\n scale_factor = attrs[scale_factor]\n\n # apply scale factor and dtype\n data = np.multiply(data, scale_factor)\n if np.issubdtype(dtype, np.integer):\n data = np.round(data)\n\n data = data.astype(dtype)\n\n return data", "def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def preprocess_data(self, data, scale_data=True):\n print(\"preprocess_data not implemented\")\n return data", "def parse_fan_kv(sensor_data):\n if sensor_data and sensor_data != \"NA\":\n return int(float(sensor_data))\n else:\n return -1", "def _decode_float(fp):\n return struct.unpack('>f', fp.read(4))[0]", "def try_float(value: Any) -> Optional[float]:\n try:\n return float(value)\n except (TypeError, ValueError):\n return None", "def unpack64(data):\n\treturn struct.unpack('<Q', data)[0]", "def convertFloat(num):\n try:\n num = float(num)\n return num\n except (TypeError, ValueError, ArithmeticError):\n print(\"num cannot convert float\")", "def ensure_unicode(data, encoding=\"utf8\"):\n if isinstance(data, bytes):\n return data.decode(encoding)\n else:\n return unicode_type(data)", "def genesis_to_nullable_float(genesis_str):\n\n if genesis_str == '':\n return None\n return float(genesis_str)", "def safe_float(str):\n if not str:\n return None\n try:\n return float(str)\n except ValueError:\n return 0", "def obscure(data: bytes) -> bytes:\n return b64e(compress(data, 9))" ]
[ "0.748317", "0.69878703", "0.6950177", "0.6709056", "0.6397729", "0.6396958", "0.63965064", "0.6293659", "0.6279694", "0.6161165", "0.61318475", "0.61182475", "0.5992608", "0.596135", "0.5870688", "0.5860507", "0.58470494", "0.58467394", "0.5794232", "0.5766028", "0.5722432", "0.57021475", "0.57009083", "0.5693537", "0.5623742", "0.56217533", "0.56190467", "0.56024563", "0.5591062", "0.5557807", "0.55545074", "0.5547972", "0.55467117", "0.55372334", "0.55362505", "0.5528639", "0.5518509", "0.54979485", "0.54864734", "0.5482022", "0.5479633", "0.5469251", "0.546361", "0.54569215", "0.5452578", "0.5445365", "0.54437643", "0.54346734", "0.5431632", "0.5430551", "0.5422943", "0.5422077", "0.54060686", "0.54014117", "0.5389296", "0.53855205", "0.5374171", "0.5359809", "0.53589404", "0.53501236", "0.53437275", "0.5334046", "0.5324168", "0.5314614", "0.52948827", "0.52778", "0.52760243", "0.5268432", "0.52670604", "0.5262354", "0.52616423", "0.5261639", "0.5260554", "0.5254393", "0.52379245", "0.52371395", "0.52351314", "0.52321273", "0.5230381", "0.5229942", "0.5223477", "0.52230805", "0.5208119", "0.51889324", "0.5188179", "0.5176476", "0.517616", "0.5166385", "0.5165358", "0.51627463", "0.5162206", "0.5158508", "0.5157113", "0.5153471", "0.51518106", "0.5149578", "0.5148945", "0.5145264", "0.51420164", "0.5138902", "0.5135104" ]
0.0
-1
Parse CSV, TSV or other delimiterseparated text file. Raise LookupError for an `encoding` Python cannot handle. Raise UnicodeError when the file simply cannot be read as text. (e.g., a UTF16 file that does not start with a byteorder marker.)
def _parse_csv( path: Path, *, settings: Settings = DEFAULT_SETTINGS, encoding: Optional[str], delimiter: Optional[str], has_header: bool, autoconvert_text_to_numbers: bool, ) -> ParseCsvResult: warnings = [] with contextlib.ExitStack() as ctx: n_bytes = path.stat().st_size if n_bytes > settings.MAX_CSV_BYTES: # We can't simply os.truncate() the input file, because sandboxed code # can't modify input files. truncated_path = ctx.enter_context(tempfile_context(prefix="truncated-")) with path.open("rb") as src, truncated_path.open("wb") as dest: os.sendfile(dest.fileno(), src.fileno(), 0, settings.MAX_CSV_BYTES) path = truncated_path warnings.append( _trans_cjwparse( "csv.truncated_file", "{n_bytes_truncated, one{Truncated # byte} other{Truncated # bytes}} from file (maximum is {max_n_bytes} bytes)", dict( n_bytes_truncated=(n_bytes - settings.MAX_CSV_BYTES), max_n_bytes=settings.MAX_CSV_BYTES, ), ) ) utf8_path = ctx.enter_context(tempfile_context(prefix="utf8-", suffix=".txt")) # raises LookupError, UnicodeError warnings.extend( transcode_to_utf8_and_warn(path, utf8_path, encoding, settings=settings) ) # Sniff delimiter if not delimiter: delimiter = detect_delimiter(utf8_path, settings) with tempfile_context(suffix=".arrow") as arrow_path: # raise subprocess.CalledProcessError on error ... but there is no # error csv-to-arrow will throw that we can recover from. child = subprocess.run( [ "/usr/bin/csv-to-arrow", "--delimiter", delimiter, "--max-rows", str(settings.MAX_ROWS_PER_TABLE), "--max-columns", str(settings.MAX_COLUMNS_PER_TABLE), "--max-bytes-per-value", str(settings.MAX_BYTES_PER_VALUE), utf8_path.as_posix(), arrow_path.as_posix(), ], capture_output=True, check=True, ) warnings.extend(_parse_csv_to_arrow_warnings(child.stdout.decode("utf-8"))) reader = pyarrow.ipc.open_file(arrow_path.as_posix()) raw_table = reader.read_all() # efficient -- RAM is mmapped table, more_warnings = _postprocess_table( raw_table, has_header, autoconvert_text_to_numbers, settings ) return ParseCsvResult(table, warnings + more_warnings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read(filename, encodings=['ascii', 'utf-8', 'utf-16', 'latin-1']):\n text = None\n\n for encoding in encodings:\n try:\n f = open(filename, encoding=encoding)\n text = f.read()\n f.close()\n except UnicodeDecodeError:\n f.close()\n except UnicodeError:\n f.close()\n except FileNotFoundError:\n raise FileNotFoundError(\"Could not open file.\")\n\n if not text:\n raise UnicodeError(filename)\n\n return text", "def parse(self, filename):\n try:\n if 't' in self.FILE_OPEN_MODE:\n kw = {'encoding': self.FILE_ENCODING, 'errors': 'ignore'}\n else:\n kw = {}\n with open(filename, self.FILE_OPEN_MODE, **kw) as infile:\n self._parse(infile)\n except IOError:\n raise FileFormatError()", "def load_content_file_with_encoding (filename) :\n error = None\n for enc in [ \"utf8\", \"latin1\", None ] :\n try :\n with open(filename, \"r\", encoding = enc) as f : content = f.read()\n return content, enc\n except Exception as e :\n error = e\n raise error", "def read(filename, encoding='utf-8'):\r\n text, encoding = decode( file(filename, 'rb').read() )\r\n return text, encoding", "def parse_file(file_path, encoding='utf-8', print_errors=False):\n with open(file_path, 'r', encoding=encoding) as f:\n return parse(\n f.read(),\n file_name=os.path.basename(file_path),\n print_errors=print_errors\n )", "def parse_separated(filename, delimiter):\n with csvhelper.UnicodeReader(filename,\n delimiter=delimiter) as report_reader:\n return parse_generic(report_reader)", "def get_file_encoding(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n try:\n for line in f:\n pass\n except UnicodeDecodeError:\n return 'ISO-8859-1'\n else:\n return 'utf-8'", "def load_text(self, encoding='utf8', encoding_errors='ignore'):\n log.error('Cannot load: %s', self.file_name)", "def parse_file(\n self, filename: Path, encoding: Optional[str] = None, debug: bool = False\n ) -> NL:\n with open(filename, encoding=encoding) as stream:\n return self.parse_stream(stream, debug)", "def _read_file(self, file_object):\n try:\n if file_object.closed:\n codecs.open(file_object.name, 'r+b', encoding=self.input_encoding)\n\n reader = UnicodeReader(file_object, output_encoding=self.output_encoding, dialect=self.input_dialect)\n rows = []\n for row in reader:\n rows.append(row)\n except:\n raise CsvReadException(\"Unable to read file with {x} - likely an encoding problem (non fatal)\".format(x=self.input_encoding))\n\n try:\n self._populate_data(rows)\n return rows\n except:\n raise CsvStructureException(\"Unable to read file into meaningful datastructure using encoding {x} (non fatal)\".format(x=self.input_encoding))", "def txt_file_reader(path):\n return open(path, encoding=cfg.ENCODING)", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines", "def test_utf8_cp1252_char_file(self):\n\t\tmain.Main(['input/utf8.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/utf8.csv'))", "def _read_tsv( input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n # if sys.version_info[0] == 2:\n # line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines", "def __init__(self, file_path=None, writer=None,\n output_encoding=\"utf-8\", input_encoding=\"utf-8\",\n try_encodings_hard=True, fallback_input_encodings=None,\n from_row=0, from_col=0, ignore_blank_rows=False,\n input_dialect=csv.excel):\n self.file_path = None\n self.output_encoding = output_encoding\n self.input_encoding = input_encoding\n\n # useful to know about this for any future work on encodings: https://docs.python.org/2.4/lib/standard-encodings.html\n if fallback_input_encodings is None and try_encodings_hard:\n fallback_input_encodings = [\"cp1252\", \"cp1251\", \"iso-8859-1\", \"iso-8859-2\", \"windows-1252\", \"windows-1251\", \"mac_roman\"]\n else:\n fallback_input_encodings = []\n self.fallback_input_encodings = fallback_input_encodings\n\n self.from_row = from_row\n self.from_col = from_col\n self.ignore_blank_rows = ignore_blank_rows\n self.input_dialect = input_dialect\n\n # Store the csv contents in a list of tuples, [ (column_header, [contents]) ]\n self.data = []\n\n # Get an open file object from the given file_path or file object\n if file_path is not None:\n if type(file_path) == file:\n self.file_path = file_path.name\n # NOTE: if you have passed in a file object, it MUST work - as in, it must be set to\n # read the right encoding, and everything. We will not try to parse it again if it\n # fails the first time. If it is closed, you will also need to be sure to set the input_encoding.\n # All round - better if you just give us the file path\n self.file_object = file_path\n if self.file_object.closed:\n self.file_object = codecs.open(self.file_object.name, 'r+b', encoding=self.input_encoding)\n\n # explicitly read this file in\n self._read_file(self.file_object)\n else:\n self.file_path = file_path\n if os.path.exists(file_path) and os.path.isfile(file_path):\n self._read_from_path(file_path)\n else:\n # If the file doesn't exist, create it.\n self.file_object = codecs.open(file_path, 'w+b', encoding=self.output_encoding)\n\n elif writer is not None:\n self.file_object = writer", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def parse(filename):\n if filename.endswith('.tsv'):\n # Horrible filename-based hack; in future examine contents of file here\n return parse_separated(filename, '\\t')\n if filename.endswith('.xlsx'):\n return parse_xlsx(filename)\n # fallback to old assume-csv behavior\n return parse_separated(filename, ',')", "def read_text_file(fpath, encoding, read_size=-1, force_unix_linebreaks=True):\n with codecs.open(fpath, encoding=encoding) as f:\n contents = f.read(read_size)\n\n if read_size > 0:\n contents = contents[:read_size]\n\n if force_unix_linebreaks:\n contents = linebreaks_win2unix(contents)\n\n return contents", "def test_file_readlines_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines_utf8()", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n # reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in f.readlines():\n # if sys.version_info[0] == 2:\n # line = list(cell.decode('utf-8') for cell in line)\n lines.append(line.strip().split('\\t'))\n return lines", "def load_text_file(file_name: str) -> str:\r\n try:\r\n with open(file_name, encoding='windows-1251') as file_object:\r\n return file_object.read()\r\n except FileNotFoundError as err:\r\n print(f\"{err}\\n\"\r\n f\"Please make sure the file you are trying to open exists!\")\r\n quit()", "def loadTextFromFile(fullFilename, fileEncoding=\"utf-8\"):\n with codecs.open(fullFilename, 'r', encoding=fileEncoding) as fp:\n allText = fp.read()\n # logging.debug(\"Complete load text from %s\", fullFilename)\n return allText", "def read_tsv(input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(np.unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines", "def process_file(file_path):\n\n enc = detect_bom_encoding(file_path)\n if enc is None:\n with open(file_path, 'r') as f:\n result = run_checks(file_path, f)\n else:\n with open(file_path, 'r', encoding=enc) as f:\n result = run_checks(file_path, f)\n print('Finished processing %s\\n' % file_path)\n return result", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def test_unicode_open(self):\n\n unicode_text = u\"\"\"Polish: Ą Ł Ż\nChinese: 倀 倁 倂 倃 倄 倅 倆 倇 倈\nMusical Notes: ♬ ♫ ♯\"\"\"\n\n with utils.unicode_open('tests/files/unicode.txt') as f:\n opened_text = f.read()\n self.assertEqual(unicode_text, opened_text)", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def _parse_textfile(self):\n\n field_names = list(self.FIELD_NAME_TO_INDEX.keys())\n field_indices = list(self.FIELD_NAME_TO_INDEX.values())\n frame = pd.read_csv(\n self.filepath,\n header=None, # MAGIC file has no header line\n delimiter=self.DELIMITER,\n usecols=field_indices,\n names=field_names,\n converters=self.FIELD_CONVERTERS,\n )\n return frame", "def test_file_read_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_utf8()", "def detect_encoding(filename, default_to_utf8 = True, **kwargs):\n # Read some of the file\n import os.path\n filename = from_posix(filename)\n file_len = os.path.getsize(filename)\n read_len = min(_READ_CHUNK_SIZE, file_len)\n\n # ... unless we're supposed to!\n if kwargs.get('read_all', False):\n read_len = file_len\n\n # Read the first read_len bytes raw, so we can detect the encoding\n with open(filename, 'rb') as raw_handle:\n raw = raw_handle.read(read_len)\n\n # Detect the encoding the file specfies, if any.\n import codecs\n if raw.startswith(codecs.BOM_UTF8):\n encoding = 'utf-8-sig'\n else:\n # Detect encoding using the best detector available\n try:\n # First try ICU. ICU will report ASCII in the first 32 Bytes as\n # ISO-8859-1, which isn't exactly wrong, but maybe optimistic.\n import icu\n encoding = icu.CharsetDetector(raw).detect().getName().lower()\n except ImportError: # pragma: nocover\n # If that doesn't work, try chardet - it's not got native components,\n # which is a bonus in some environments, but it's not as precise.\n import chardet\n encoding = chardet.detect(raw)['encoding'].lower()\n\n # Chardet is more brutal in that it reports ASCII if none of the first\n # Bytes contain high bits. To emulate ICU, we just bump up the detected\n # encoding.\n if encoding == 'ascii':\n encoding = 'iso-8859-1'\n\n # Both chardet and ICU may detect ISO-8859-x, which may not be possible\n # to decode as UTF-8. So whatever they report, we'll try decoding as\n # UTF-8 before reporting it.\n if default_to_utf8 and encoding in ('ascii', 'iso-8859-1', 'windows-1252'):\n # Try decoding as utf-8\n try:\n raw.decode('utf-8')\n # If this worked... well there's no guarantee it's utf-8, to be\n # honest.\n encoding = 'utf-8'\n except UnicodeDecodeError:\n # Decoding as utf-8 failed, so we can't default to it.\n pass\n\n return encoding", "def load(fp, encoding=None, lexer=None, tokenizer=None, detokenize=True):\n lexer = lexer() if lexer else Lexer()\n stream = TokenStream(fp, tokenizer=tokenizer)\n parse = lexer.parse(stream)\n\n if detokenize:\n return list(lexer.detokenize(parse))\n return parse", "def read_csv_file(path):\n\tfrom csv import reader\n\n\tif PY2:\n\t\twith codecs.open(path, 'r', 'utf-8') as msgfile:\n\t\t\tdata = msgfile.read()\n\n\t\t\t# for japanese! #wtf\n\t\t\tdata = data.replace(chr(28), \"\").replace(chr(29), \"\")\n\t\t\tdata = reader([r.encode('utf-8') for r in data.splitlines()])\n\t\t\tnewdata = [[text_type(val, 'utf-8') for val in row] for row in data]\n\telse:\n\t\twith io.open(path, mode='r', encoding='utf-8', newline='') as msgfile:\n\t\t\tdata = reader(msgfile)\n\t\t\tnewdata = [[ val for val in row ] for row in data]\n\treturn newdata", "def _read_tsv(cls, input_file, quotechar=None):\n return readfile(input_file)", "def ImportTXT(FilePath):\n try:\n with open(FilePath, 'r') as file:\n return(file.read())\n except Exception as e:\n raise e", "def parse(fcontents, utf16=False): # TODO where does this conversion take place??\n if utf16:\n # Handle a bunch of Unicode nonsense; files appear to be in UTF-16LE\n quant_results = fcontents.split(BLOCK_DIVIDER_UTF16)[3]\\\n .decode(AWKWARD_ENCODING).encode('ascii', 'ignore').split(\"\\r\\n\")\n else:\n quant_results = fcontents.split(BLOCK_DIVIDER)[3].split(\"\\r\\n\")\n\n for res in quant_results:\n items = res.split(\"\\t\")\n if re.search(\"\\d+\", items[0]): # ignore non-digit rows\n amt = items[9]\n if amt == OUTSIDE_LADDER:\n amt = 100\n yield (int(items[0]), items[1], float(amt))", "def unicode_open(filename, *args, **kwargs):\n kwargs['encoding'] = \"utf-8\"\n if PY3:\n return open(filename, *args, **kwargs)\n return codecs.open(filename, *args, **kwargs)", "def test_the_main_with_non_utf_file(self):\r\n with self.assertRaises(SystemExit):\r\n the_main_function('test_non_utf_8.csv.csv')", "def parse_file(file_name, out):\n try:\n with open(file_name) as f:\n parse_string(f.read(), out)\n except Exception as e:\n logging.error(\"Error when opening and parsing file %s: %s\" % (file_name, e))\n print(\"Error occurred when parsing file. See logs for more details.\",file=sys.stderr)", "def load_csvFile(file_location, file_name,sep,encoding):\n try:\n fullpath=file_location+file_name\n df = pd.read_csv(fullpath, encoding=encoding,sep=sep)\n return df\n except IOError:\n print('Error loading the file: ' , file_name)\n sys.exit(1)", "def tokenize(fp):\n\n tokenizer = Tokenizer()\n\n for (lineno, line) in enumerate(fp):\n try:\n line = line.decode('utf8')\n except UnicodeDecodeError as detail:\n print >>sys.stderr, \"failed to decode line %i: %s\" % (lineno+1,\n detail)\n line = line.decode('utf8', 'replace')\n\n # This should use \"yield from ...\" (new in Python 3.3)\n for t in tokenizer.feed_data(line):\n yield t\n last = tokenizer.finalize()\n if last:\n yield last", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(cell for cell in line)\n lines.append(line)\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def parse_data(fp):\n pass", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def test_file_readlines_as_utf8(self):\n FileWriter(self.unicode_path).write_utf8(self.multiline_unicode_string)\n line_list = FileReader(self.unicode_path).readlines_as(\"utf-8\")\n self.assertEqual(line_list, self.uni_multi_list)", "def convertFile(file_name):\n for format in source_formats:\n try:\n with codecs.open(file_name, 'rU', format) as source_file:\n write_conversion(source_file)\n return\n except UnicodeDecodeError:\n pass\n\n print(\"Error: failed to convert '\" + file_name + \"'.\")", "def read_file(path: Union[str, Path], encoding: str = 'utf-8') -> Optional[List[str]]:\n if isinstance(path, str):\n path = Path(path).resolve()\n values: List[str] = []\n if path.suffix == '.csv':\n with path.open(encoding=encoding, newline='') as csvfile:\n values = [row for row in csv.reader(csvfile)]\n elif path.suffix == '.json':\n with path.open(encoding=encoding) as jsonfile:\n values = json.load(jsonfile)\n if not isinstance(values, list):\n _log.error(f\"Expected a list in JSON file {path.name},\"\n f\"got {type(values).__name__}\")\n return None\n else: # Parse as text, one value per line\n data = path.read_text(encoding=encoding)\n values = [line for line in data.split('\\n') if line != '']\n return values", "def test_file_utf8_readwrite_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read()\n self.assertEqual(self.unicode_string, unicode_text)", "def parse(self, infile):\r\n raise NotImplementedError()", "def validate_csv(filename: str) -> bool:\n # From: https://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python\n try:\n with open(filename, newline='') as csvfile:\n start = csvfile.read(4096)\n\n # isprintable does not allow newlines, printable does not allow umlauts...\n if not all([c in string.printable or c.isprintable() for c in start]):\n return False\n dialect = csv.Sniffer().sniff(start)\n return True\n except csv.Error:\n # Could not get a csv dialect -> probably not a csv.\n return False\n except UnicodeError:\n return False", "def read_file(file: str, encoding='windows-1251', sep=';'):\n return pd.read_csv(file, encoding=encoding, sep=sep)", "def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):\n csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)\n for row in csv_reader:\n yield [unicode(cell, 'utf-8') for cell in row]", "def testParseUnicode(self):\n test_file = self._GetTestFilePath([u'skydriveerr-unicode.log'])\n event_queue_consumer = self._ParseFile(self._parser, test_file)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n self.assertEqual(len(event_objects), 19)\n\n # TODO: check if this test passes because the encoding on my system\n # is UTF-8.\n expected_text = (\n u'No node found named Passport-Jméno-člena')\n self.assertEqual(event_objects[3].text, expected_text)", "def determineEncoding(self, filepath):\n with open(self.filepath,\"r\",encoding='utf-16') as reader: \n try:\n line = reader.readline()\n return \"utf-16\"\n except:\n return \"utf-8\"", "def test_windows_1252_file(self):\n\t\tmain.Main(['input/windows_1252.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/windows_1252.csv'))", "def test_file_utf8_readas_writeas(self):\n FileWriter(self.unicode2_path).write_as(self.unicode_string, \"utf-8\")\n unicode_text = FileReader(self.unicode2_path).read_as(\"utf-8\")\n self.assertEqual(unicode_text, self.unicode_string)", "def txt_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = fileobj.decode('utf-8')\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = f.read().decode('utf-8')\n elif hasattr(fileobj, 'read'):\n data = fileobj.read().decode('utf-8')\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def test_file_utf8_readwrite(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(unicode_text, self.unicode_string)", "def test_unicode_decode_error(parser):\n with pytest.raises(UnicodeDecodeError):\n parser.load('jsonexamples/test_parsing/n_array_invalid_utf8.json')", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding='utf-8') as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def __init__(self, csvfile, *args, **kwargs):\n self.encoding = kwargs.pop('encoding', 'utf-8')\n csv.DictReader.__init__(self, csvfile, *args, **kwargs)", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def load_text_file(i):\n\n fn = i['text_file']\n\n en = i.get('encoding', '')\n if en == '' or en == None:\n en = 'utf8'\n\n try:\n f = open(fn, 'rb')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening text file='+fn+' ('+format(e)+')'}\n\n try:\n b = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n r = {'return': 0, 'bin': b}\n\n if i.get('delete_after_read', '') == 'yes':\n import os\n os.remove(fn)\n\n if i.get('keep_as_bin', '') != 'yes':\n try:\n # decode into Python string (unicode in Python3)\n s = b.decode(en).replace('\\r', '')\n except Exception as e:\n return {'return': 1, 'error': 'problem decoding content from file \"'+fn+'\" ('+format(e)+')'}\n\n r['string'] = s\n\n cl = i.get('split_to_list', '')\n cd = i.get('convert_to_dict', '')\n\n if cl == 'yes' or cd == 'yes':\n lst = s.split('\\n')\n r['lst'] = lst\n\n if cd == 'yes':\n dd = {}\n\n ss = i.get('str_split', '')\n rq = i.get('remove_quotes', '')\n if ss == '':\n ss = ':'\n\n for q in lst:\n qq = q.strip()\n ix = qq.find(ss)\n if ix > 0:\n k = qq[0:ix].strip()\n v = ''\n if ix+1 < len(qq):\n v = qq[ix+1:].strip()\n if v != '' and rq == 'yes':\n if v.startswith('\"'):\n v = v[1:]\n if v.endswith('\"'):\n v = v[:-1]\n dd[k] = v\n\n r['dict'] = dd\n\n return r", "def validate_file(\n file: str,\n format: FormatType = FormatType.csv,\n delimiter: str = ',',\n header_delimiter: str = None,\n skip_blank_lines: bool = True,\n):\n\n with open_resource(file) as resource_io:\n\n if format == FormatType.csv:\n reader = CSVReader(\n resource_io,\n delimiter=delimiter,\n header_delimiter=header_delimiter,\n skip_blank_lines=skip_blank_lines,\n )\n elif format == FormatType.jsonl:\n reader = JSONLReader(resource_io)\n elif format == FormatType.json:\n reader = JSONReader(resource_io)\n else:\n raise ValueError\n\n for _ in reader:\n pass", "def _parse(self, infile):\n raise NotImplementedError()", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def check_file_encoding(self, input_file_path):\n self.log([u\"Checking encoding of file '%s'\", input_file_path])\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_file_encoding\"):\n return self.result\n if not gf.file_can_be_read(input_file_path):\n self._failed(u\"File '%s' cannot be read.\" % (input_file_path))\n return self.result\n with io.open(input_file_path, \"rb\") as file_object:\n bstring = file_object.read()\n self._check_utf8_encoding(bstring)\n return self.result", "def try_decode(text, encoding=\"utf-8\"):\n try:\n return text.decode(encoding, \"ignore\")\n except Exception:\n return text", "def read_lines(files):\n for file in files:\n for line in file.readlines():\n try:\n line = line.decode('utf-8')\n except UnicodeDecodeError:\n line = line.decode('latin-1')\n yield line.strip()", "def textread(filepath):\n return np.array(pd.read_csv(filepath, \n sep = \"\\s+|\\t+|\\s+\\t+|\\t+\\s+\",\n header=None,\n comment='#',\n engine='python'))", "def detect_encoding(data):\n enc_list = ['UTF-8', 'LATIN-1', 'iso8859-1', 'iso8859-2',\n 'UTF-16', 'CP720']\n code = locale.getpreferredencoding(False)\n if code not in enc_list:\n enc_list.insert(0, code)\n for c in enc_list:\n try:\n for line in data:\n line.decode(c)\n except (UnicodeDecodeError, UnicodeError):\n continue\n return c\n print(\"Encoding not detected. Please pass encoding value manually\")", "def parse_file(self, source):\n # If this is a file-like object, we should be able to read it.\n try:\n raw_data = source.read()\n except AttributeError:\n # This raises FileNotFoundError if the file doesn't exist.\n with open(source) as source_obj:\n raw_data = source_obj.read()\n\n # Parse the data in string format.\n return self.parse_string(raw_data)", "def read_file(filename, encoding = None):\n filename = from_posix(filename)\n if not encoding:\n # Detect encoding\n encoding = detect_encoding(filename)\n\n # Finally, read the file in the detected encoding\n import io\n with io.open(filename, mode = 'r', encoding = encoding) as handle:\n return handle.read()", "def simple_text_reader(text_file):\n with open(text_file, 'rt') as file:\n data = file.read()\n return data", "def read_transcription_units(filename, sep=\"\\t\", comment=\"#\", encoding=None,\n mode=\"rb\", **kw_args):\n\n def parse_flat_file(file_handle):\n raise NotImplementedError\n\n def parse_xhtml(file_handle):\n soup = BeautifulSoup(file_handle, \"lxml\")\n for row in soup.rowset.find_all(name=\"row\", recursive=False):\n prom = misc.convert(row.promoter_id.string, unicode)\n tu = misc.convert(row.transcription_unit_id.string, unicode)\n units[prom].append(tu)\n\n # read information from the file\n kw_args[\"mode\"] = mode\n kw_args[\"encoding\"] = encoding\n units = defaultdict(list)\n with open_file(filename, **kw_args) as (file_h, ext):\n if ext == \".xml\":\n parse_xhtml(file_h)\n else:\n parse_flat_file(file_h)\n return dict(units)", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def read_file(filename):\n with codecs.open(filename, 'r', 'utf8') as f:\n return f.read()", "def _is_utf8(filename: str) -> bool:\n import codecs\n\n try:\n f = codecs.open(filename, encoding=\"utf-8\", errors=\"strict\")\n for _ in f:\n pass\n return True\n except UnicodeDecodeError:\n return False", "def readNormalizer(language):\n\n encoding = None\n\n fname = os.path.join(nm_dir, '%s.txt' % language) \n if not os.path.exists(fname):\n return []\n\n lst = []\n for l in open(fname): \n if not l.strip(): continue\n\n mo = enc_reg.match(l)\n if mo:\n encoding= mo.group(1)\n continue\n\n if l.startswith('#'): continue\n\n fields = l.split()\n if len(fields) == 1:\n fields = (fields[0], '') # replace XX with ''\n\n k = unicode(fields[0], encoding) \n v = unicode(fields[1], encoding) \n\n lst.append((k, v))\n\n return lst", "def test_file_readlines_unicode(self):\n FileWriter(self.unicode_path).write_utf8(self.multiline_unicode_string)\n line_list = FileReader(self.unicode_path).readlines_utf8()\n self.assertEqual(line_list, self.uni_multi_list)", "def test_file_bin_read_unicode_as_bin(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n bin_data = FileReader(self.unicode_path).read_bin() #read unicode file as binary\n uni_text = bin_data.decode(\"utf-8\") #decode to utf-8\n self.assertEqual(uni_text, self.unicode_string)", "def unicode_csv_reader(data, **kwargs):\r\n\tdata_file = csv.reader(data, **kwargs)\r\n\tfor row in data_file:\r\n\t\tyield [str(cell) for cell in row]", "def _read_data_file(userfile, separator, header, types=None):\n if not file_exists:\n return None\n from pandas import read_csv\n\n #create dataframe for read in file\n df = None\n\n #handle different parameters for read\n head = _handle_header(header)\n separ = _handle_separator(separator)\n dtypes = _handle_dtypes(types)\n\n #attempt to read file with given parameters\n try:\n if head is None:\n df = read_csv(userfile, sep=separ, header=None)\n else:\n df = read_csv(userfile, sep = separ, header = head)\n\n except OSError as e:\n print(\"Error importing file: '\" + userfile + \"'\")\n print(e)\n return None\n if dtypes is not None:\n df = _change_types(df, dtypes)\n return (df)", "def read(text):\n text = str(text)\n if not helpers.contains_only_phonetic_chars(text):\n raise NonSupportedTextException()\n return _process_replacements(text)", "def _read_csv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\",\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n if sys.version_info[0] == 2:\r\n line = list(unicode(cell, 'utf-8') for cell in line)\r\n lines.append(line)\r\n return lines", "def _read_tsv(cls, input_file):\n with open(input_file, \"r\", encoding=\"cp1252\") as f:\n pre_lines = f.readlines()\n post_lines = []\n for line in pre_lines:\n post_lines.append(line.strip().split(\"\\t\"))\n return post_lines", "def parse(self, fp, headersonly=False):\n fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')\n try:\n return self.parser.parse(fp, headersonly)\n finally:\n fp.detach()", "def read_text(self, encoding):\n with self.open(\"r\", encoding=encoding) as f:\n return f.read()", "def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def test_parse_mapping_file_handles_errors(self):\r\n # Empty file\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n [])\r\n # string\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n 'my_mapping_file.txt')\r\n # invalid format (no header line with leading # sign)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['sampleID\\ta\\tb',\r\n '1\\tf\\t43',\r\n '2\\tt\\t44'])\r\n # invalid format (no non-header lines)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['#sampleID\\ta\\tb'])\r\n # invalid format (no header line)\r\n self.assertRaises(QiimeParseError,\r\n parse_mapping_file,\r\n ['1\\tf\\t43',\r\n '2\\tt\\t44'])", "def parse_file(self, file_or_filename, parse_all=False):\n try:\n file_contents = file_or_filename.read()\n except AttributeError:\n with open(file_or_filename, \"r\") as f:\n file_contents = f.read()\n return self.parse_string(file_contents, parse_all)", "def _read_tsv(file_path):\n translation_pairs = []\n with file_path.open() as f:\n # Note: the correct way to do this is with csv.DictReader, but some examples\n # have quote characters that confuse the csv parser. Since we know the\n # source never has its own tab or newline characters, basic Python string\n # manipulation is fine here, as long as the model doesn't predict tabs or\n # newlines.\n for line in f:\n line = line.strip()\n line = line.split('\\t')\n if len(line) != 2:\n raise ValueError(\n f'Line {line} could not be parsed. You may need to manually '\n 'replace tab or newline characters in the model output with '\n 'spaces.'\n )\n source, translation = line\n translation_pairs.append(\n evaluation.TranslationPair(source=source, translation=translation)\n )\n return translation_pairs", "def test_windows_1252_1_file(self):\n\t\tmain.Main(['input/windows_1252_1.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/windows_1252_1.csv'))" ]
[ "0.6560378", "0.62868285", "0.61147213", "0.6035779", "0.6029811", "0.5926391", "0.5827321", "0.5804859", "0.58034223", "0.5774665", "0.5699063", "0.56861764", "0.56857336", "0.56818", "0.56673634", "0.5613292", "0.56082886", "0.55889183", "0.5518909", "0.5518909", "0.5518909", "0.55159867", "0.55073386", "0.5488", "0.5467707", "0.5462672", "0.5461123", "0.54584926", "0.54452777", "0.54385155", "0.5428859", "0.5408091", "0.54025745", "0.53977805", "0.53501135", "0.53345555", "0.53270483", "0.5326952", "0.5326054", "0.5316049", "0.53136176", "0.5301101", "0.5296773", "0.52926505", "0.52903634", "0.5279742", "0.52745557", "0.52690434", "0.52516073", "0.5250611", "0.5235892", "0.52281916", "0.5225652", "0.5220494", "0.52132344", "0.5211097", "0.520259", "0.52015793", "0.5191538", "0.51897633", "0.51787394", "0.5176887", "0.51690793", "0.5160847", "0.5152664", "0.5130986", "0.5127576", "0.51214015", "0.5120436", "0.5115808", "0.5111099", "0.51107186", "0.5106272", "0.50902736", "0.50875413", "0.50857496", "0.50824136", "0.5079789", "0.50756955", "0.5073268", "0.5068103", "0.50515836", "0.5051333", "0.5051333", "0.50507766", "0.50418323", "0.50376564", "0.5026355", "0.5024894", "0.50218046", "0.5015802", "0.5014692", "0.5014271", "0.50103515", "0.5005578", "0.49995333", "0.49962002", "0.49961233", "0.4993816", "0.49914452" ]
0.60616267
3
Initialize your data structure here.
def __init__(self):        self.intervals = []            ### O(len(intervals))    def addNum(self, val: int) -> None:        if(len(self.intervals) == 0):            self.intervals.append([val, val])            return                flag, left = 1, -math.inf        for i, interval in enumerate(self.intervals):            for point in interval:                right = point                if(left == val or right == val):                    return                elif(left < val and right > val):                    if(flag):                        ### merge case                        if(val == left+1 and val == right -1):                            self.intervals[i-1][1] = self.intervals[i][1]                            self.intervals.pop(i)                        elif(val == left+1):                            self.intervals[i-1][1] = val                        elif(val == right-1):                            self.intervals[i][0] = val                        else:                            self.intervals.insert(i, [val, val])                    ### val in one of the existing intervals                    return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self.data = []\n self.record = {}", "def initialize(self):\n self.data = None\n self.errors = []", "def initialize(self):\n\t\tpass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n self.structure = {}", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []\n self.idx = {}", "def __init__(self):\n self._dict = {}\n self._array = []", "def init(self) -> None:", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def _init(self):\n pass", "def initialize(self):\n return", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def initialize(self) -> None:\n pass", "def initialize(self):\n pass # pragma: no cover", "def __init__(self):\n self.d = {}\n self.l = []", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialise(self):", "def __init__(self):\n self.l = {}\n self.s = {}", "def __init__(self):\n self._data=[]", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def init(self) -> None:\n ...", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self):\n self.keys = []\n self.values = []", "def __init__(self):\n self.d = {}\n self.h = []", "def memb_init(self):\n self.initialize()", "def __init__(self):\n self.dic={}\n self.data=[]", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def __init__(self, data={}):\n self._update_(data)", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def _init(self):\n raise NotImplementedError", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, data):\n self.data = data\n return", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self):\n self.data = {}\n self.refresh()", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def init(self):", "def init(self):", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE", "def __init__(self, data: dict = {}):\n pass", "def initialize(self): \r\n pass", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def __init__(self):\n self.table = {}\n self.ls = []", "def initialize(self):\r\n self.bucket_array.initialize()", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data=None):\n self.data = data", "def __init__(self):\n self._data = set()", "def __init__(self):\n self.key_dict = {}\n self.value_dict = {}\n self.head, self.last = None, None" ]
[ "0.77675813", "0.7644977", "0.7644977", "0.7644977", "0.7644977", "0.7644977", "0.7644977", "0.75942147", "0.7585732", "0.75570554", "0.75295925", "0.75295925", "0.75295925", "0.75295925", "0.75295925", "0.7495997", "0.7495997", "0.7476943", "0.7476324", "0.7476324", "0.7476324", "0.7476324", "0.7476324", "0.7476324", "0.7476324", "0.7476324", "0.7444046", "0.7426174", "0.7414901", "0.7412705", "0.7389825", "0.7389825", "0.73886", "0.7387709", "0.7382186", "0.7322768", "0.7314994", "0.7305519", "0.7298582", "0.7286579", "0.7270692", "0.7258291", "0.7251561", "0.7251561", "0.7251561", "0.724886", "0.724886", "0.724886", "0.7242286", "0.7238919", "0.72366285", "0.72099364", "0.7199996", "0.7199996", "0.7199996", "0.7199996", "0.71975994", "0.71975994", "0.7194569", "0.7188321", "0.718493", "0.71754736", "0.7161506", "0.71592903", "0.7150457", "0.71463454", "0.7134975", "0.7130184", "0.71225584", "0.71120083", "0.71120083", "0.71120083", "0.71120083", "0.71096426", "0.710111", "0.7098334", "0.7084819", "0.7066696", "0.7064153", "0.70615715", "0.70605284", "0.70595896", "0.70373815", "0.70373815", "0.70342225", "0.7021891", "0.7014882", "0.6998706", "0.6993985", "0.69936633", "0.6987309", "0.6983285", "0.69756395", "0.69756395", "0.6975564", "0.6975564", "0.6975564", "0.6975564", "0.69677687", "0.6954737", "0.693993" ]
0.0
-1
Clean up any files leftover from past runs with different hyperparameters.
def clean_up(model_path): cmds = ["rm */grad*.pickle", "rm -r checkpoints", "rm */train_len", "rm log_human_read.csv", "rm */log_human_read.csv", "rm -r best_model", "rm */*epoch*"] for cmd in cmds: os.system("cd {} && {}".format(model_path, cmd))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def _clean_up_temporary_files(dataset_dir):\n return", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def clean_up(self):\n self.fname = None\n self.failed_files = []\n self.custom_failed = []\n self.results = None", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def clean_files(self):\n self.filenames.clear()", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'fitting')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'fitting', existing_file))", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def clean():\n clean_files()", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def remove_intermediate_files(self):\r\n\r\n # tmp files are written in the current dir,\r\n # app controller always jumps into dir specified via exec_dir\r\n # Note: blast intermediates are not removed\r\n exec_dir = str(self.Parameters['--exec_dir'].Value)\r\n inp_file_name = str(self.Parameters['--query_NAST'].Value)\r\n\r\n exec_dir = exec_dir.rstrip('\"')\r\n exec_dir = exec_dir.lstrip('\"')\r\n\r\n inp_file_name = inp_file_name.rstrip('\"')\r\n inp_file_name = inp_file_name.lstrip('\"')\r\n\r\n tmp_suffixes = [\".CPS\", \".CPS.CPC\", \".CPS_RENAST\", \".CPS_RENAST.cidx\",\r\n \".CPS.CPC.wTaxons\", \".cidx\"]\r\n cs_tmp_files = [\r\n exec_dir +\r\n '/' +\r\n inp_file_name +\r\n x for x in tmp_suffixes]\r\n remove_files(cs_tmp_files, error_on_missing=False)\r\n\r\n db_param = self.Parameters['--db_NAST']\r\n if db_param.isOn():\r\n nast_db_name = str(db_param.Value)\r\n nast_db_name = nast_db_name.rstrip('\"')\r\n nast_db_name = nast_db_name.lstrip('\"')\r\n\r\n # Better do not remove this file since other ChimeraSlayer\r\n # instances running on the same ref set might use this file\r\n # Should be rather deleted in the calling function\r\n# remove_files([nast_db_name + \".cidx\"],\r\n# error_on_missing=False)\r\n\r\n fasta_param = self.Parameters['--db_FASTA']\r\n if fasta_param.isOn():\r\n fasta_name = str(fasta_param.Value)\r\n fasta_name = fasta_name.rstrip('\"')\r\n fasta_name = fasta_name.lstrip('\"')\r\n\r\n blast_db_files = [\r\n fasta_name +\r\n x for x in [\r\n \".nsq\",\r\n \".nin\",\r\n \".nhr\",\r\n \".cidx\"]]\r\n remove_files(blast_db_files, error_on_missing=False)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def cleanup(self):\n self.__log('Resetting value for output_filename, making way for another go.')\n self.output_filename = None", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def clean_up(self):\n try:\n data_dir = os.environ[\"DATA\"]\n plots_dir = os.environ[\"PLOTS\"]\n logs_dir = os.environ[\"LOGS\"]\n except KeyError as detail:\n print \"GenerateSpectrum.clean_up: error\", detail, \"not set\"\n print \" --> source analysis environment scripts before running!\"\n sys.exit(1)\n for root, dirs, files in os.walk(os.getcwd()):\n for file in files:\n is_data = re.search(r\".*\\.root$\", file)\n is_plot = re.search(r\".*\\.png$\", file)\n hostname = socket.gethostname()\n is_log = re.search(r\"^rat\\.\"+hostname+r\"\\.[0-9]+\\.log$\", file)\n if is_data:\n try:\n root_file = TFile(file)\n tree = root_file.Get(\"T\")\n tree.ls()\n except ReferenceError as detail:\n \"generate_spectrum.clean_up: error in TFile,\", detail\n sys.exit(1)\n file_manips.copy_file(os.path.join(root, file), data_dir)\n elif is_plot:\n file_manips.copy_file(os.path.join(root, file), plots_dir)\n elif is_log:\n file_manips.copy_file(os.path.join(root, file), logs_dir)", "def clean_up(self, early_stopping, current_epoch):\n\n early_stopping: EarlyStopping = early_stopping\n\n if early_stopping.enable_stopping:\n lower_limit = early_stopping.best_loss_index - 1\n else:\n lower_limit = current_epoch - self.config.model_files_stored - 1\n\n for file in listdir(self.training_model_path):\n\n try:\n epoch_of_file = int(file.split('.')[0].split('-')[-1])\n if epoch_of_file <= lower_limit:\n os.remove(self.training_model_path + file)\n except ValueError:\n pass\n except Exception as e:\n print(e)", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-100-python')\n tf.gfile.DeleteRecursively(tmp_dir)", "def cleanup(self):\n\t\tfor filename in self.cfg_files:\n\t\t\tif os.path.isfile(filename):\n\t\t\t\tsize = os.stat(filename)[6]\n\t\t\t\tif size == 0:\n\t\t\t\t\tos.remove(filename)\n\n\t\treturn True", "def cleanup(self):\n if self.cleanup_allowed:\n shutil.rmtree(self.out_dir)\n self.train_df, self.valid_df, self.test_df = None, None, None", "def deleteIntermediateFiles(self):\n uniq_files = set(self.files_to_delete)\n print (\"Deleting %d intermediate files\" % len(uniq_files))\n for fn in uniq_files:\n # don't delete log files\n if not fn.endswith(\".log\"):\n os.remove(fn)", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def tearDown():\n for output_file_path in Path(output_dir).glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n for output_file_path in Path(\".\").glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n config_file_path = Path(config_dir) / \"test_voting_learner_cross_validate.cfg\"\n config_file_path.unlink()\n\n remove_jsonlines_feature_files(train_dir)", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def clean_condor(c):\n for fname in glob.glob(os.path.join(ROOT_DIR, '*.dag')):\n _delete_pattern(fname + '.*')\n for fname in glob.glob(os.path.join(ROOT_DIR, '*.sub')):\n temps = []\n with open(fname, 'r') as f:\n for line in f.readlines():\n for w in ('log', 'error', 'output'):\n if line.startswith(w):\n temp = line.split('=')[-1].strip()\n if os.path.isfile(temp):\n temps.append(temp)\n if temps:\n print(f'Found submit script {fname}:')\n for temp in temps:\n _delete_file(temp, indent=' ')\n _delete_file(f'{ROOT_DIR}/docker_stderror')\n _delete_file(f'{ROOT_DIR}/parsetab.py')", "def cleanUp(self):\n\n tapeList = sorted(glob.glob('TAPE?'))\n tapeList = ['TAPE%d' % num for num in [1, 2, 5, 6, 7, 10]]\n for tape in tapeList:\n if os.path.isfile(tape): os.remove(tape)\n # end TAPE loop", "def cleanup_precluster_intermediate_files(batch_index):\n files = [\"seed{0}.S.fasta\".format(batch_index),\n \"seed{0}.orphans.fasta\".format(batch_index),\n \"batch{0}.fasta\".format(batch_index),\n \"batch{0}.remains.fasta\".format(batch_index),\n \"batch{0}.remains2.fasta\".format(batch_index)]\n\n files += glob.glob(\"batch{0}*.minimap\".format(batch_index))\n for file in files:\n try:\n os.remove(file)\n except:\n print >> sys.stderr, \"Failure to remove {0}. Ignore.\".format(file)", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def cleanup(options=None):\n if options is None:\n for f in glob.glob(\"*.grmpy.*\"):\n os.remove(f)\n elif options == 'regression':\n for f in glob.glob(\"*.grmpy.*\"):\n if f.startswith('regression'):\n pass\n else:\n os.remove(f)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")", "def perform_cleanup(scratch_image_name, scratch_text_name_root):\n\tfor name in (scratch_image_name, scratch_text_name_root + '.txt', \"tesseract.log\"):\n\t\ttry:\n\t\t\tos.remove(name)\n\t\texcept OSError:\n\t\t\tpass", "def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])", "def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "def rmGt(self):\n gtfiles = [self.outselect, self.outmktime, self.outltcube,\n self.outbincub, self.outbinmap, self.outbinexp, \n self.outexpmap, self.outsrcmap, \n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.fits'),\n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.log')]\n for f in gtfiles:\n if os.path.isfile(f):\n os.remove(f)\n return", "def delete_previous_files():\n def delete(root: Path):\n shutil.rmtree(root / 'output', ignore_errors=True)\n for p in root.iterdir():\n if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):\n p.unlink()\n\n delete(wt_registration_dir)\n delete(mut_registration_dir)", "def remove_stale_outputs(self) -> None:\n for path in sorted(self.initial_outputs):\n if self.should_remove_stale_outputs and not is_precious(path):\n Logger.file(f\"Remove the stale output: {path}\")\n Invocation.remove_output(path)\n else:\n Stat.forget(path)\n\n self.should_remove_stale_outputs = False", "def test_final_cleanup():\n cleanup_file(\"tfsaves\")", "def cleanup():\r\n compiledir = theano.config.compiledir\r\n for directory in os.listdir(compiledir):\r\n file = None\r\n try:\r\n try:\r\n filename = os.path.join(compiledir, directory, \"key.pkl\")\r\n file = open(filename, 'rb')\r\n #print file\r\n try:\r\n keydata = cPickle.load(file)\r\n for key in list(keydata.keys):\r\n have_npy_abi_version = False\r\n have_c_compiler = False\r\n for obj in flatten(key):\r\n if isinstance(obj, numpy.ndarray):\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n elif isinstance(obj, basestring):\r\n if obj.startswith('NPY_ABI_VERSION=0x'):\r\n have_npy_abi_version = True\r\n elif obj.startswith('c_compiler_str='):\r\n have_c_compiler = True\r\n elif (isinstance(obj, (theano.gof.Op, theano.gof.Type)) and\r\n hasattr(obj, 'c_code_cache_version')):\r\n v = obj.c_code_cache_version()\r\n if v not in [(), None] and v not in key[0]:\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n\r\n if not have_npy_abi_version or not have_c_compiler:\r\n try:\r\n #This can happen when we move the compiledir.\r\n if keydata.key_pkl != filename:\r\n keydata.key_pkl = filename\r\n keydata.remove_key(key)\r\n except IOError, e:\r\n _logger.error(\r\n \"Could not remove file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n if len(keydata.keys) == 0:\r\n shutil.rmtree(os.path.join(compiledir, directory))\r\n\r\n except EOFError:\r\n _logger.error(\r\n \"Could not read key file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n except IOError:\r\n _logger.error(\r\n \"Could not clean up this directory: '%s'. To complete \"\r\n \"the clean-up, please remove it manually.\",\r\n directory)\r\n finally:\r\n if file is not None:\r\n file.close()", "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def reset_old_files():\n commands = [\n 'rm -f {0}/tools/perf/page_sets/url*'.format(CHROMIUM_SRC),\n 'rm -f {0}/tools/perf/page_sets/data/url*'.format(CHROMIUM_SRC),\n 'rm -f ' \\\n '{0}/tools/perf/benchmarks/telemetryBenchmarks.py'.format(CHROMIUM_SRC),\n 'rm -f data/wpr_source/*',\n 'rm -f temp/*',\n 'rm -f data/results.db',\n 'rm -f {0}/data/har/*'.format(PLT_SRC),\n 'rm -f {0}/data/replay/*'.format(PLT_SRC),\n 'rm -f {0}/webpagereplay_logs/*'.format(CHROMIUM_SRC),\n 'rm -f {0}/telemetry/count.db'.format(PLT_SRC),\n ]\n\n for cmd in commands:\n p = Popen(cmd, shell=True)\n p.wait()", "def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def cleanup(filePath):\n restoreRenderSettings()\n cleanupCamera()\n cleanupWorld()\n deleteOutputPath(filePath)", "def cleanup():", "def _clean_up_project_file(self):\n\n print \"Reading in project file...\"\n with open(self.project_file,'r') as f_in:\n project_json = json.load(f_in)\n\n # Go through design_files references\n configurations = project_json['Project']['Configurations']\n n = len(configurations)\n indices_to_delete = []\n for i in range(n):\n if not os.path.basename(configurations[i]) in self.design_files:\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del configurations[i]\n\n # Go through design_space_files references\n design_space_models = project_json['Project']['DesignSpaceModels']\n n = len(design_space_models)\n indices_to_delete = []\n for i in range(n):\n if not os.path.basename(design_space_models[i]) in self.design_space_files:\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del design_space_models[i]\n\n # Go through test_bench_files references\n #test_benches = project_json['Project']['TestBenches']\n #n = len(test_benches)\n #indices_to_delete = []\n #for i in range(n):\n # if not os.path.basename(test_benches[i]) in self.test_bench_files:\n # indices_to_delete.append(i)\n #\n #for i in indices_to_delete.reverse():\n # del test_benches[i]\n\n # Write out the new, reduced in size, project dictionary\n with open(self.project_file,'wb') as f_out:\n json.dump(project_json, f_out, indent=4)\n\n print \"Written out cleaned up project dictionary.\"", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def tearDown(self):\n\n for fname in self.fnames:\n os.remove(fname)", "def file_removal(dataset, optim):\n\n try:\n os.remove('manual_logs/' + optim.lower() + '_train_loss_' + dataset.lower() + '.txt')\n except FileNotFoundError:\n print(\"No previous training loss logs for this optimizer detected.\")\n\n try:\n os.remove('manual_logs/' + optim.lower() + '_val_loss_' + dataset.lower() + '.txt')\n except FileNotFoundError:\n print(\"No previous validation loss logs for this optimizer detected.\")\n\n try:\n os.remove('manual_logs/' + optim.lower() + '_train_acc_' + dataset.lower() + '.txt')\n except FileNotFoundError:\n print(\"No previous training accuracy logs for this optimizer detected.\")\n\n try:\n os.remove('manual_logs/' + optim.lower() + '_val_acc_' + dataset.lower() + '.txt')\n except FileNotFoundError:\n print(\"No previous validation accuracy logs for this optimizer detected.\")", "def clean_up_for_next_cycle(self):\n shutil.copy(self.pr.config.releaseItemsFilePath, self.pr.config.backupFilesPath + '_'\n + str(datetime.now().strftime('%m-%d-%Y:%I.%M%p')) + '.txt') # take backup before clearing\n clear_file(self.pr.config.releaseItemsFileMergedBy)\n clear_file(self.pr.config.releaseItemsFilePath) # clear file for next release content\n # NOTE: user has to manually delete data added when in debug mode", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def removeRedundantFiles(workdir, outputfiles=[]):\n\n logger.info(\"Removing redundant files prior to log creation\")\n\n workdir = os.path.abspath(workdir)\n\n dir_list = [\"AtlasProduction*\",\n \"AtlasPoint1\",\n \"AtlasTier0\",\n \"buildJob*\",\n \"CDRelease*\",\n \"csc*.log\",\n \"DBRelease*\",\n \"EvgenJobOptions\",\n \"external\",\n \"fort.*\",\n \"geant4\",\n \"geomDB\",\n \"geomDB_sqlite\",\n \"home\",\n \"o..pacman..o\",\n \"pacman-*\",\n \"python\",\n \"runAthena*\",\n \"share\",\n \"sources.*\",\n \"sqlite*\",\n \"sw\",\n \"tcf_*\",\n \"triggerDB\",\n \"trusted.caches\",\n \"workdir\",\n \"*.data*\",\n \"*.events\",\n \"*.py\",\n \"*.pyc\",\n \"*.root*\",\n \"JEM\",\n \"tmp*\",\n \"*.tmp\",\n \"*.TMP\",\n \"MC11JobOptions\",\n \"scratch\",\n \"jobState-*-test.pickle\",\n \"*.writing\",\n \"pwg*\",\n \"pwhg*\",\n \"*PROC*\",\n \"madevent\",\n \"HPC\",\n \"objectstore*.json\",\n \"saga\",\n \"radical\",\n \"ckpt*\"]\n\n # remove core and pool.root files from AthenaMP sub directories\n try:\n cleanupAthenaMP(workdir, outputfiles)\n except Exception, e:\n print(\"Failed to execute cleanupAthenaMP(): %s\" % (e))\n\n # explicitly remove any soft linked archives (.a files) since they will be dereferenced by the tar command (--dereference option)\n matches = []\n import fnmatch\n for root, dirnames, filenames in os.walk(workdir):\n for filename in fnmatch.filter(filenames, '*.a'):\n matches.append(os.path.join(root, filename))\n for root, dirnames, filenames in os.walk(os.path.dirname(workdir)):\n for filename in fnmatch.filter(filenames, 'EventService_premerge_*.tar'):\n matches.append(os.path.join(root, filename))\n if matches != []:\n for f in matches:\n remove(f)\n # else:\n # print(\"Found no archive files\")\n\n # note: these should be partitial file/dir names, not containing any wildcards\n exceptions_list = [\"runargs\", \"runwrapper\", \"jobReport\", \"log.\"]\n\n to_delete = []\n for _dir in dir_list:\n files = glob(os.path.join(workdir, _dir))\n exclude = []\n\n if files:\n for exc in exceptions_list:\n for f in files:\n if exc in f:\n exclude.append(os.path.abspath(f))\n\n _files = []\n for f in files:\n if not f in exclude:\n _files.append(os.path.abspath(f))\n to_delete += _files\n\n exclude_files = []\n for of in outputfiles:\n exclude_files.append(os.path.join(workdir, of))\n for f in to_delete:\n if not f in exclude_files:\n remove(f)\n\n # run a second pass to clean up any broken links\n broken = []\n for root, dirs, files in os.walk(workdir):\n for filename in files:\n path = os.path.join(root, filename)\n if os.path.islink(path):\n target_path = os.readlink(path)\n # Resolve relative symlinks\n if not os.path.isabs(target_path):\n target_path = os.path.join(os.path.dirname(path), target_path)\n if not os.path.exists(target_path):\n broken.append(path)\n else:\n # If it's not a symlink we're not interested.\n continue\n\n if broken:\n for p in broken:\n remove(p)\n\n return 0", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def clear_model_checkpoints(self):\n if self.file_prefix is None:\n return\n\n with os.scandir() as path_list:\n for entry in path_list:\n if entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(\".h5\"):\n print(\"{}: Removing {}\".format(self.MODEL_NAME, entry.path))\n os.remove(entry.path)", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def clearRunDirectory(self):\n for root, dirs, files in os.walk(self.run_dir, topdown=False):\n for name in files:\n if name.lower().endswith(('.cps', '.txt', '.sbml', '.csv')):\n os.remove(os.path.join(root, name))\n for name in dirs:\n if len(os.listdir(os.path.join(root, name)))==0:\n os.rmdir(os.path.join(root, name))", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clear_experiment(experiment_name, architecture):\n\n for filename in params_to_filename(experiment_name, architecture):\n full_path = os.path.join(*[CHECKPOINT_DIR, filename])\n os.remove(full_path) if os.path.exists(full_path) else None", "def clean(args):\n log = 'removing tmp dir %s ' % (args.tmpdir)\n if args.tmpdir.endswith('STAR'):\n cmd = ['rm -rf %s' % (args.tmpdir)]\n run_subprocess(cmd,args,log)\n log = \"remove tmp files from output dir\"\n cmd = ['mv %s/crick_joinedLog.final.out %s/Crick_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_joinedLog.final.out %s/Watson_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/crick_mergedLog.final.out %s/Crick_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_mergedLog.final.out %s/Watson_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/crick_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/watson_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/joined* header.sam' % args.output_dir]\n run_subprocess(cmd, args, log)", "def teardown_method(self,method):\n filenames = ['poisson_bdm1_test.h5', 'poisson_bdm1_test.xmf','reference_triangle.ele',\n 'reference_triangle.node', 'reference_triangle.poly','proteus.log']\n for file in filenames:\n if os.path.exists(file):\n try:\n os.remove(file)\n except OSError as e:\n print (\"Error: %s - %s.\" %(e.filename, e.strerror ))\n else:\n pass", "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def cleanup(self, job_dirs=None):\n if not self.args.cleanup:\n return\n self.args.logger.info(\"Begin intermediate file cleanup\")\n # Remove intermediate working directory files\n filetypes = [constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, \"*\"),\n constants.OUTPUT_FEATURES_FILENAME.format(self.args.output_dir, \"*\"),\n constants.RESULTS_FILENAME.format(self.args.output_dir, \"*\")]\n for filetype in filetypes:\n for filename in glob.glob(filetype):\n try:\n os.remove(filename)\n except OSError as error:\n self.args.logger.warning(f\"Cleanup: unable to remove {filename}: {error}\")\n if job_dirs: # Remove condor job directories\n for job_dir in job_dirs:\n try:\n shutil.rmtree(job_dir)\n except OSError as error:\n self.args.logger.warning(f\"Cleanup: unable to remove {job_dir}: {error}\")\n self.args.logger.info(\"End intermediate file cleanup\")", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)", "def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"", "def cleanup(self):\n results = run_command(\"gppkg -q --all\")\n gppkgs = results.split('\\n')[self.start_output:self.end_output] #The first line is 'Starting gppkg with args', which we want to ignore.\n\n for gppkg in gppkgs:\n run_command(\"gppkg --remove \" + gppkg)", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))", "def remove_persisted_files():\r\n persistIncarnations = get_persist_incarnation_dirs()\r\n for p in persistIncarnations:\r\n clear_dir(p)\r\n os.remove(p)\r\n clear_dir(get_persist_src_backup_dir())\r\n clear_dir(get_persist_src_dir())\r\n clear_dir(get_persist_root_dir()) \r\n\r\n #make sure the persist kb data structures aren't keeping any info \r\n global PERSISTED_LOAD_IDS\r\n AGENT_KB_MAP.clear()\r\n KB_WORKING_SET.clear()\r\n copy = PERSISTED_LOAD_IDS[:]\r\n for x in copy:\r\n PERSISTED_LOAD_IDS.remove(x)", "def reset(self):\n def remove_auxiliary_dir():\n egg_info_dir = self.project_name_sc + \".egg-info\"\n remove_directories([\n egg_info_dir,\n \".env\",\n \".eggs\",\n \".pytest_cache\",\n \"build\",\n \"dist\",\n \".cache\",\n \".benchmark\",\n \".tox\",\n \".vagrant\",\n \".tox\"])\n remove_files([\n \".coverage\",\n \".doit.db\",\n \".doit.bak\",\n \".doit.dat\",\n \".doit.dir\",\n ])\n\n # TODO(lschneider): Remove unnecessary files without command lines.\n # This code could be run directly from this function. However\n # the pathlib library is not part of the standard python 2.\n prefix = \"python -c \\\"import pathlib; \"\n delete_pyfiles = prefix + \"import pathlib; [p.unlink() for p in pathlib.Path('.').rglob('*.py[co]')]\\\"\"\n delete_dirs = prefix + \"import pathlib; [p.rmdir() for p in pathlib.Path('.').rglob('__pycache__')]\\\"\"\n\n return {\n \"actions\": [\n delete_pyfiles,\n delete_dirs,\n remove_auxiliary_dir,\n ],\n \"verbosity\": 2\n }", "def delete_intermediate_csvs(wk_dir):\n # Remove intermediate csv tables\n out_files = os.listdir(wk_dir)\n delete_keys = [\"int_metrics\",\"region_dims\"]\n delete_list = [f for f in out_files if any(x in f for x in delete_keys)]\n for f in delete_list:\n os.remove(f)", "def _clean(base_dir):\n # remove the snakemake cache\n shutil.rmtree(os.path.join(base_dir, \".snakemake\"), ignore_errors=True)\n\n # remove seq2science caches\n shutil.rmtree(os.path.expanduser(os.path.join(xdg.XDG_CACHE_HOME, \"seq2science\")), ignore_errors=True)\n\n # remove historic seq2science cache location\n shutil.rmtree(os.path.expanduser(f\"~/.config/seq2science/\"), ignore_errors=True)\n\n print(\"All cleaned up!\")", "def cleanup(keep_num=5):\n\n keep_num = int(keep_num)\n assert keep_num > 0, \"[ERROR] keep_num must be > 0; refusing to proceed.\"\n\n with cd(\"%(path)s/packages\" % env):\n package_files = sorted(run(\"ls -1\").split())\n package_files = [_.replace(\".tar.gz\", \"\") for _ in package_files]\n\n with cd(\"%(path)s/releases\" % env):\n release_files = sorted(run(\"ls -1\").split())\n release_files.remove('current')\n\n diff = set(package_files).symmetric_difference(set(release_files))\n\n if diff:\n raise Exception(\"[ERROR]: Package and release directories are out of sync;\"\n \" refusing to proceed. Please fix this difference manually: %s\" % diff)\n\n package_files = package_files[:-keep_num]\n release_files = release_files[:-keep_num]\n\n with cd(\"%(path)s/packages\" % env):\n [sudo(\"rm %s.tar.gz\" % _) for _ in package_files]\n\n with cd(\"%(path)s/releases\" % env):\n [sudo(\"rm -r %s\" % _) for _ in release_files]", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def _identify_files_to_remove(self, job_result_filepaths, params):\r\n return []", "def clear_base_files(self):\r\n compilelock.get_lock()\r\n try:\r\n for base_dir in ('cuda_ndarray', 'cutils_ext', 'lazylinker_ext',\r\n 'scan_perform'):\r\n to_delete = os.path.join(self.dirname, base_dir + '.delete.me')\r\n if os.path.isdir(to_delete):\r\n try:\r\n shutil.rmtree(to_delete)\r\n _logger.debug('Deleted: %s', to_delete)\r\n except Exception:\r\n _logger.warning('Could not delete %s', to_delete)\r\n continue\r\n to_rename = os.path.join(self.dirname, base_dir)\r\n if os.path.isdir(to_rename):\r\n try:\r\n shutil.move(to_rename, to_delete)\r\n except Exception:\r\n _logger.warning('Could not move %s to %s',\r\n to_rename, to_delete)\r\n finally:\r\n compilelock.release_lock()" ]
[ "0.74296117", "0.7379356", "0.721091", "0.7174838", "0.7121328", "0.7096112", "0.7081824", "0.7073559", "0.7040567", "0.69962186", "0.69456387", "0.69263583", "0.689189", "0.68870115", "0.6879709", "0.68567586", "0.6842316", "0.6828324", "0.6814952", "0.6776448", "0.6773137", "0.6771143", "0.6756325", "0.673247", "0.6728041", "0.66938204", "0.66653943", "0.6661679", "0.66466695", "0.6637356", "0.6632531", "0.6629455", "0.66209126", "0.6572151", "0.657193", "0.6538119", "0.65340817", "0.6522573", "0.65219694", "0.6519101", "0.65106887", "0.6502323", "0.6499334", "0.64908683", "0.6484328", "0.6483646", "0.64808303", "0.6458406", "0.64535034", "0.64124733", "0.64022887", "0.63953507", "0.63822263", "0.6379299", "0.6360355", "0.63582367", "0.63528425", "0.6347931", "0.63438886", "0.6343861", "0.633112", "0.63273734", "0.63196003", "0.6298686", "0.62971467", "0.6292295", "0.6281242", "0.62788934", "0.62695396", "0.6260184", "0.6255184", "0.6248246", "0.6239175", "0.62384075", "0.6236825", "0.62300813", "0.62180275", "0.6207877", "0.6207649", "0.6185511", "0.61802447", "0.61786145", "0.61751676", "0.61656684", "0.6156938", "0.61542374", "0.6153858", "0.6145563", "0.6145278", "0.6127375", "0.6126558", "0.6124595", "0.61190796", "0.6117561", "0.61072826", "0.6106725", "0.61025697", "0.60856444", "0.60856444", "0.60856444", "0.60848886" ]
0.0
-1
Train a model with the current hyperparameters.
def run(job_path, model_path, metric): cmd = (f"cd $NFFDIR/scripts/cp3d/train " f"&& python train_parallel.py {job_path}") os.system(cmd) best_score, best_epoch = parse_score(model_path, metric) return best_score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def trainModel( self, featureTrain, classTrain):", "def train(self, X_train, y_train):\n\n self.model_pipeline.fit(X_train, y_train)", "def set_train(self):\n self.model.train()", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, X, y):\n self.model.fit(X, y)", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def train_model(dataset):\n\n # clear the session so that we can train more than one model\n K.clear_session()\n\n # initialize the model\n model = initalizer.init_nn()\n\n # fit the model\n model.fit(dataset, epochs=40)\n\n return model", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, trnM, trnL):\n print 'Training ...'\n self.clf.fit(trnM, trnL)", "def set_train(self):\n for m in self.models.values():\n m.train()", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train(self):\n p = self._params\n if self.train_data != None:\n tens_to_log = self.params.tens_to_log\n logging_hook = tf.train.LoggingTensorHook(tensors = tens_to_log,\n every_n_iter = p.logging_step,\n )\n t_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.train_data[\"x\"]},\n y = self.train_data[\"y\"],\n batch_size = p.batch_size,\n num_epochs = None,\n shuffle = True,\n )\n self._model.train(input_fn = t_fn,\n steps = self.params.training_steps,\n hooks = [logging_hook],\n )\n \n if self.eval_data != None:\n e_fn = tf.estimator.inputs.numpy_input_fn(x = {\"x\": self.eval_data[\"x\"]},\n y = self.eval_data[\"y\"],\n num_epochs = 1,\n shuffle = False,\n )\n eval_results = self.model.evaluate(input_fn = e_fn,\n checkpoint_path = self.model_dir,\n )\n print(eval_results)", "def train_model(model, x_train, y_train, x_test, y_test,\n epochs=None, batch_size=None):\n\n # Training\n if batch_size is None:\n batch_size = 128\n if epochs is None:\n epochs = 20\n\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n print('Train...')\n model.fit(x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n use_multiprocessing=True)", "def _train_model(self):\n raise NotImplementedError()", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def train(self, request):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Training Task\",\n )\n\n task = self._trainers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Train Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n result = task(request, self.datastore())\n\n # Run all scoring methods\n if self._auto_update_scoring:\n self.async_scoring(None)\n return result", "def train_small_test_version(self, hyperparams_dict):\n trainer = ModelTrainer(self.dataloaders, hyperparams_dict,\n self.wv_wrapper, self.path)\n model, losses, accuracies = trainer.train(epochs=3)\n return model, losses, accuracies", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def trainModel(data, labels, vData, vLabels, logger, params):\n\t#save the random seed\n\tlogger.logSeed()\n\n\t#create the model\n\tif params.emb:\n\t\t\n\t\tif params.useBothHalves:\n\t\t\tmodel = buildMultiEmbModel(params.numClasses(), params.windowSize, params.contextSize, params.wordWeights, params.eventMap)\n\t\telse:\n\t\t\tmodel = buildCNNEmbModel(params.numClasses(), params.windowSize, params.contextSize, params.wordWeights, params.eventMap)\n\n\telif params.split:\n\t\tmodel = buildSplitModel(params.numClasses(), params.windowSize, params.wordSize, params.contextSize, params.eventMap)\n\t\t#model = buildAttentionModel(params.numClasses(), params.windowSize, params.wordSize, params.contextSize, params.eventMap)\n\telse:\n\t\tmodel = buildModel(params.numClasses(), params.windowSize, params.wordSize, params.contextSize, params.eventMap)\n\n\t#fit the model\n\tmodel.fit(data, labels, nb_epoch=params.epochs, batch_size=params.batchSize, \n\t\tvalidation_data=(vData, vLabels), \n\t\tcallbacks=[logger], class_weight=params.weights)\n\n\tbest = logger.best()\n\n\tprint(\"Best Model round: {} val: {}\".format(logger.bestModel, logger.bestScore))\n\n\t#return the best\n\treturn best, logger.bestModel", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def train_models(self):\n scores = {}\n for name, clf in self.__clfs.items():\n print('=' * 80)\n print(name)\n scores[name] = self.__benchmark(clf)\n best = self.__get_best_score(scores)\n best_params = self.optimize(self.__clfs[best])\n simple_score, simple_model = self.__run_best_model(best_params, best)\n keras_model, keras_score = self.train_sequential()\n if keras_score >= simple_score: # check if the best simple model or the CNN model\n return keras_model\n else:\n return simple_model", "def train(self):\n model = TreeTrainer.train(self)\n model['latent'] = self._latent\n model['suffstats'] = {\n 'vert_ss': self._vert_ss,\n 'edge_ss': self._edge_ss,\n 'feat_ss': self._feat_ss,\n }\n return model", "def train_model(self):\n if not self.is_exist(self.path_model_directory):\n # Then create the parent folder\n os.makedirs(self.path_model_directory)\n\n # Create a meta-data pickle for the model\n self.create_meta_data_pickle()\n\n # Necessary meta-data file must be created before starting the training. Check if the file exists\n if self.is_exist(self.path_model_metadata):\n\n # We do not need to train a model if there is already a best model for the same training exist\n try:\n self.model = load_model(self.path_best_model)\n return\n except:\n self.log_event('There is no best trained model found in the parent folder. Going with the training...')\n\n # Load the model meta-data\n self.load_model_metadata()\n self.encoding_vector_size = self.number_of_distinct_items\n\n # Iterate trough the split data for the training\n for split_number in range(self.k_split):\n split_path = f'split_{str(split_number)}/'\n split_directory = self.path_model_directory + split_path\n\n # Check the split directory is already created. If it is, then we can directly start the training by using the existing data\n if self.is_exist(split_directory):\n try:\n self.load_best_tuned_model(split_number)\n except (IndexError, FileNotFoundError):\n self.load_fold_k_data_and_fit(split_number=int(split_number))\n\n else:\n # Create a folder for the split data and prepare the data for the training\n os.makedirs(split_directory)\n\n # Create an array which will contain train features-labels and test features-labels\n train_array = np.full(4, fill_value=self.mask_value, dtype=object)\n train_index = 0\n for position, split_name in enumerate(['train_split_', 'test_split_']):\n training_features_directory = split_directory + f'{split_name}{str(split_number)}_all_training_features.data'\n training_targets_directory = split_directory + f'{split_name}{str(split_number)}_all_training_targets.data'\n fold_directory = self.path_shared_folds + f'{split_name}{str(split_number)}.fold'\n\n self.process_training_data(fold_directory=fold_directory)\n\n self.save_data_to_disk(data_to_save=self.all_features, path_to_save=training_features_directory)\n train_array[train_index] = self.all_features\n train_index += 1\n self.all_features = None # Memory Management\n\n self.save_data_to_disk(data_to_save=self.all_targets, path_to_save=training_targets_directory)\n train_array[train_index] = self.all_targets\n train_index += 1\n self.all_targets = None # Memory Management\n\n # Assign the input data to respective variables for the training\n self.train_features = train_array[0]\n self.train_targets = train_array[1]\n self.test_features = train_array[2]\n self.test_targets = train_array[3]\n del train_array\n\n self.start_hyper_parameter_tuning(split_number)\n\n self.retrieve_best_model(metric=self.hyper_parameters['metric'])", "def train(self):\n print('Model training...')\n self.model.train()\n self.likelihood.train()\n optimizer = torch.optim.Adam(\n [{'params': self.model.parameters()}], lr=self.lr)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(\n self.likelihood, self.model)\n for i in range(self.steps):\n optimizer.zero_grad()\n output = self.model(self.X)\n loss = -mll(output, self.y)\n loss.backward()\n self.lscales.append(\n self.model.covar_module.base_kernel.base_kernel.lengthscale.tolist()[0]\n )\n self.noise_all.append(\n self.model.likelihood.noise_covar.noise.item())\n if self.verbose:\n print('iter: {} ...'.format(i),\n 'loss: {} ...'.format(np.around(loss.item(), 4)),\n 'length: {} ...'.format(np.around(self.lscales[-1], 4)),\n 'noise: {} ...'.format(np.around(self.noise_all[-1], 7)))\n optimizer.step()\n return", "def train(self, data):\n \n logger('[.] Training with whole dataset ...')\n \n datalist = self.unpack_data(data)\n self.knn_model.fit(datatuple['features'], datatuple['labels'])", "def train_model(self, model, data) -> keras.Model:\n self.history = model.fit(\n self.generator.flow(data.x.train, data.y.train),\n epochs=self.N_epochs,\n validation_data=(data.x.valid, data.y.valid),\n verbose=1,\n steps_per_epoch=int(np.floor(data.x.train.shape[0] / self.batch_size)),\n callbacks=self.callbacks,\n shuffle=True,\n )\n\n return model", "def train(fitted_model_filename):\n click.echo(\"Mode: training.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n\n # derive final path for fitted model as base output path for fitted models + model filename\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n boot_data = bootstrap(new_options, mode=\"train\")\n defaults = boot_data['defaults']\n\n X_train, y_train = boot_data['data']\n fitted_model = train_model(X_train, y_train, defaults)\n\n # dump fitted model\n os.makedirs(defaults.OUTPUT.FITTED_MODELS_PATH, exist_ok=True)\n save_obj(fitted_model, defaults.OUTPUT.FITTED_MODEL_PATH)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(self, training_data):\n pass", "def train(self, params):\n for param in params:\n if param not in self.__info__['space']:\n print(\"Error: not supported parameters {}\".format(param))\n\n if self.dataset_type == PROBLEM.CLASSIFICATION:\n model = SVC(C=float(params[\"C\"]),\n kernel=params[\"Kernel\"],\n gamma=params[\"Gamma\"],\n coef0=float(params[\"Coef0\"]))\n else:\n model = SVR(C=float(params[\"C\"]),\n kernel=params['Kernel'],\n gamma=params[\"Gamma\"],\n coef0=float(params[\"Coef0\"]))\n\n # train\n model.fit(self.X_train, self.Y_train)\n return model", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def _train(self, dataset):\n net = buildNetwork(\n dataset.params_length,\n self._default_hidden_layers,\n 1 # a binary classifier only requires one output layer\n )\n ds = SupervisedDataSet(dataset)\n trainer = BackpropTrainer(net, ds)\n trainer.trainUntilConvergence()\n net.activate(params.as_serialized)", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def __train_model(self):\n logger.info(\"Training the ALS model...\")\n self.model = ALS.train(self.ratings_RDD, self.rank, seed=self.seed,\n iterations=self.iterations, lambda_=self.regularization_parameter)\n logger.info(\"ALS model built!\")", "def train_model(self) -> Model:\n run = self.submit_experiment_run(wait_for_completion=self.wait_for_completion)\n model = run.register_model(\n model_name=self.model_name, model_path=self.model_path\n )\n return model", "def train(self, x_data, y_data):\n self.model.fit(np.array(x_data), np.array(y_data),\n batch_size=2,\n epochs=3,\n verbose=1)\n self.model.save_weights(self.model_filename)", "def train_model(model, X_train, y_train, X_val, y_val, image_name):\n if MODEL == 1:\n return train_model_1(model, X_train, y_train, X_val, y_val, image_name)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return train_cv_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n else:\n return train_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n elif MODEL == 2:\n return train_model_2(model, X_train, y_train, X_val, y_val, image_name)\n else:\n # For models 4, 5 and 6\n return train_model_4(model, X_train, y_train, image_name)", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def train(self, training_steps=10):", "def train(self, data, option, param_map):\n if option == \"lr\":\n md = self.logistic_regression(elastic_param=param_map[\"elastic_param\"],\n reg_param=param_map[\"reg_param\"],\n family=param_map[\"family\"])\n elif option == \"rf\":\n md = self.random_forest(max_depth=param_map[\"max_depth\"],\n max_num_tree=param_map[\"max_num_tree\"])\n elif option == \"gbdt\":\n md = self.gbdt(max_depth=param_map[\"max_depth\"],\n max_bins=param_map[\"max_bins\"])\n else:\n raise ValueError(\"ERROR | model %s does not support yet\" % option)\n\n self.model = md.fit(data)\n return self.model", "def train(self, train_input_data, train_labels, _epochs, _batch_size):\n\n # compile the model\n (self.merged_model).compile(\n optimizer=\"adam\", loss=\"binary_crossentropy\", metrics=[\"acc\"]\n )\n\n # fit the model\n history = (self.merged_model).fit(\n train_input_data,\n train_labels,\n batch_size=_batch_size,\n epochs=_epochs,\n verbose=1,\n )\n\n return history", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train_model(X_train, y_train, X_test, y_test, classifier, **kwargs):\r\n \r\n # instantiate model\r\n model = classifier(**kwargs)\r\n \r\n # train model\r\n model.fit(X_train,y_train)\r\n \r\n # check accuracy and print out the results\r\n fit_accuracy = model.score(X_train, y_train)\r\n test_accuracy = model.score(X_test, y_test)\r\n \r\n print(f\"Train accuracy: {fit_accuracy:0.2%}\")\r\n print(f\"Test accuracy: {test_accuracy:0.2%}\")\r\n \r\n return model", "def train_model(model, train_data, train_targets, epochs):\n history = model.fit(train_data, train_targets, epochs=epochs, \n batch_size=40, validation_split=0.15,verbose=False)\n \n return history", "def train(self, training_set):\n self.originModel.train(training_set)\n return self", "def train(self, trainX, trainY):\n self.model = KNeighborsRegressor(n_neighbors=5)\n self.model.fit(trainX, trainY)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def train(self, X, Y):\n if self.train_step:\n Model.__X__ = X\n Model.__Y__ = Y\n\n self.train_step.run(session=Model.session, feed_dict={Model.x: X, Model.y: Y})", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(parameters):\n X_train, X_val, X_test, y_train, y_val, y_test = get_data(parameters.feature)\n accuracy = -1\n if parameters.model == 'naive_bayes':\n accuracy, confusion_matrix = train_model(naive_bayes.MultinomialNB(), X_train, y_train, X_test, y_test)\n elif parameters.model == 'random_forest':\n accuracy = 0\n confusion_matrix = None\n for _ in range(10):\n accu, matrix = train_model(ensemble.RandomForestClassifier(n_estimators=40), X_train, y_train, X_test, y_test)\n if accu > accuracy:\n accuracy = accu\n confusion_matrix = matrix\n elif parameters.model == 'SVM':\n accuracy = train_model(svm.SVC(gamma='auto'), X_train, y_train, X_test, y_test)\n\n if accuracy > 0:\n print(\"%s, %s: %f\" % (parameters.model, parameters.feature, accuracy))\n print(confusion_matrix)", "def train():\n \n ## check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data\")\n return jsonify(False)\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n print(\"... training model\")\n model = model_train(test=test)\n print(\"... training complete\")\n\n return(jsonify(True))", "def train_model(args: argparse.Namespace, hp: HParams, extension_architecture: str, timestamp: str,\n logger: logging.Logger) -> None:\n # 1. Check if we have to train a single tier or a complete model (with several tiers)\n if args.tier is not None:\n # 1.1 Argument tier was defined. Only that tier will be trained.\n logging.info(f\"Training single tier of the model: Tier {args.tier}\")\n\n # 2. Setup tensorboard logging\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for each\n # run of the model, in this case every run to train a tier) so we add the extension of the\n # network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = f\"{hp.logging.dir_log_tensorboard}{extension_architecture}_\" \\\n f\"{timestamp}_tier{args.tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, args.tier, extension_architecture, timestamp, tensorboardwriter,\n logger)\n\n tensorboardwriter.close()\n\n else:\n # 1.2 Argument tier was not defined. Train all tiers of the model.\n logging.info(\"Training all tiers of the model\")\n\n for tier in range(1, hp.network.n_tiers + 1):\n # 2. Setup tensorboard logging (one for every tier)\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for\n # each run of the model, in this case every run to train a tier) so we add the extension\n # of the network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = hp.logging.dir_log_tensorboard + extension_architecture \\\n + f\"_{timestamp}_tier{tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, tier, extension_architecture, timestamp, tensorboardwriter, logger)\n\n tensorboardwriter.close()\n del tensorboardwriter", "def train_model(self):\n early_stopping = EarlyStopping(self, self.hyper.early_stopping_enabled, self.hyper.early_stopping_limit)\n loss_history_train = []\n loss_metric_train = tf.keras.metrics.Mean()\n\n x_train, next_values_train = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train,\n self.dataset.next_values_train])\n\n x_train_val, next_values_train_val = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train_val,\n self.dataset.next_values_train_val])\n\n for epoch in range(self.hyper.epochs):\n print(\"Epoch %d\" % (epoch,))\n\n for step, (x_batch_train, next_values_batch_train) in enumerate(zip(x_train, next_values_train)):\n self.train_step(x_batch_train, next_values_batch_train, loss_metric_train)\n\n if step % 50 == 0:\n print(\"\\tStep %d: mean loss = %.4f\" % (step, loss_metric_train.result()))\n\n loss_train_batch = loss_metric_train.result()\n loss_history_train.append(loss_train_batch)\n loss_metric_train.reset_states()\n\n self.model.save_weights(self.checkpoint_path.format(epoch=epoch))\n\n # Check early stopping criterion --> Has the loss on the validation set not decreased?\n best_epoch = early_stopping.execute(epoch, x_train_val, next_values_train_val)\n self.clean_up(early_stopping, epoch)\n\n if best_epoch > 0:\n print('Model from epoch %d was selected by early stopping.' % best_epoch)\n print('Training process will be stopped now.')\n\n self.save_model(best_epoch)\n\n return\n\n self.save_model(epoch=self.hyper.epochs - 1)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def TrainStudent(self, model_name, teacher_model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/student/\")\n teacher_model_path = kwargs.pop(\"teacher_model_path\", \"./checkpoints/teacher/\")\n temp = kwargs.pop(\"temp\", 10)\n num_epoch = kwargs.pop(\"num_epoch\", 20)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/student\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n soft_target_scale = kwargs.pop(\"soft_target_scale\", 1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(teacher_model_path):\n raise RuntimeError(\"Cannot find pretrained teacher model in '{}'\".format(teacher_model_path))\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n teacher_model_path = os.path.join(teacher_model_path, \"{}.ckpt\".format(teacher_model_name))\n\n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher and student model\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('student_model', regularizer=regularizer):\n logits, probs = self.student_model(batch_data, is_train=is_train)\n\n with tf.variable_scope('teacher_model'):\n teacher_logits, teacher_probs = self.teacher_model(batch_data, is_train=False, trainable=False, temp=temp)\n\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n\n teacher_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"teacher_model\")\n student_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"student_model\")\n teacher_loader = tf.train.Saver(teacher_variabels)\n student_saver = tf.train.Saver(student_variabels)\n \n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"hard_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n soft_target_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=teacher_probs, name=\"soft_loss\"))\n loss += soft_target_scale * soft_target_loss\n \n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n teacher_loader.restore(sess, teacher_model_path)\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n student_saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Student Model! The Best Validation Accuracy is: {}\".format(best_acc))", "def train_model(n_estimators, max_depth, learning_rate, training_data, training_labels, test_data, test_labels, full_filename):\n return sklearn_train_model(\n _create_model(n_estimators, max_depth, learning_rate),\n training_data, training_labels,\n test_data, test_labels,\n full_filename\n )", "def train(self, corpus, linearModels=None, linearNormalizers=None):\n labels, data = self.getLearningData(corpus, linearModels=linearModels,\n linearNormalizers=linearNormalizers)\n if configuration['others']['verbose']:\n sys.stdout.write(reports.seperator + reports.tabs + 'Sampling' + reports.doubleSep)\n if configuration['sampling']['focused']:\n data, labels = sampling.focusedSampling(data, labels, corpus, self.vocabulary)\n labels, data = sampling.overSample(labels, data, linearInMlp=True)\n if configuration['nn']['earlyStop']:\n # To make sure that we will get a random validation dataset\n labelsAndData = sampling.shuffleArrayInParallel(\n [labels, data[0], data[1], data[2]] if linearModels else [labels, data[0], data[1]])\n labels = labelsAndData[0]\n data = labelsAndData[1:]\n if configuration['others']['verbose']:\n lblDistribution = Counter(labels)\n sys.stdout.write(tabs + '{0} Labels in train : {1}\\n'.format(len(lblDistribution), lblDistribution))\n if configuration['others']['verbose']:\n valDistribution = Counter(labels[int(len(labels) * (1 - configuration['nn']['validationSplit'])):])\n sys.stdout.write(tabs + '{0} Labels in valid : {1}\\n'.format(len(valDistribution), valDistribution))\n self.classWeightDic = sampling.getClassWeights(labels)\n sampleWeights = sampling.getSampleWeightArray(labels, self.classWeightDic)\n labels = to_categorical(labels, num_classes=8 if enableCategorization else 4)\n self.model.compile(loss=configuration['nn']['loss'], optimizer=getOptimizer(), metrics=['accuracy'])\n history = self.model.fit(data, labels,\n validation_split=configuration['nn']['validationSplit'],\n epochs=configuration['nn']['epochs'],\n batch_size=configuration['mlp']['batchSize'],\n verbose=2 if configuration['others']['verbose'] else 0,\n callbacks=getCallBacks(),\n sample_weight=sampleWeights)\n if configuration['nn']['checkPoint']:\n self.model = load_model(\n os.path.join(configuration['path']['projectPath'], 'Reports-old',\n configuration['path']['checkPointPath']))\n # if configuration['others']['verbose']:\n # sys.stdout.write('Epoch Losses = ' + str(history.history['loss']))\n self.trainValidationData(data, labels, history)", "def train(self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any) -> None:\n pass", "def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def train(X_train, y_train, save_model='model.h5'):\n \n # Hyperparameters\n batch_size = 32\n epochs = 30\n learning_rate = 0.001\n \n # Loading model from model.py\n model = m(input_height=IMAGE_HEIGHT, input_width=IMAGE_WIDTH)\n \n # Plot model as image\n plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n \n # If trained model exist already then load first for further training\n if tf.gfile.Exists(save_model):\n model.load_weights(save_model)\n model.compile(loss='mse', optimizer=Adam(learning_rate))\n \n # Only save model which has best performed on validation set.\n # These are callbacks which are being used in \"model.fit\" call\n earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='min')\n mcp_save = ModelCheckpoint('model.h5', save_best_only=True, monitor='val_loss', mode='min')\n reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')\n\n # Train the model\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.2, shuffle=True)\n \n return", "def train(self, *args, **kwargs):\n return self._train(*args, **kwargs)", "def train(self, *args, **kwargs):\n return self._train(*args, **kwargs)", "def train(self, x_train, y_train, x_val, y_val):\n\n if self.model is not None:\n self.model.fit(x_train, y_train, validation_data=(x_val, y_val), nb_epoch=2, batch_size=128)\n\n else:\n print(\"You need to instantiate a model or load one from a file before training!\")", "def fit(self, model_name, **model_params):\n model = self.model_dict[model_name]\n model.set_params(**model_params)\n self.model = model.fit(\n self.data.loc[:, self.selected_features_], self.data.loc[:, self.target_name])", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train(self, iteration, train_examples, model_path=None):\n return self.process_trained_model(\n self.train_with_examples(iteration, train_examples, model_path),\n iteration,\n train_examples,\n model_path,\n )", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def train(self, dataset=None, epochs=2, verbose=1, workers=1):\n dataset = utils.prepare_dataset(dataset,\n self.config.batch_size,\n self.config.inputs,\n self.dtype,\n self.config.batch_decay)\n callbacks = [ModelCheckpoint(os.path.join(self.config.model_folder,\n '{epoch:03d}.hdf5'),\n monitor='val_loss',\n verbose=1,\n save_best_only=False,\n save_weights_only=False,\n mode='auto'),\n GeneratorCallback(self.config.test_string,\n self.config.inputs,\n self.config.generated_characters,\n self.dtype)\n ]\n for i in range(epochs):\n self.model.fit(dataset,\n initial_epoch=i,\n epochs=i + 1,\n verbose=verbose,\n use_multiprocessing=True,\n workers=workers,\n callbacks=callbacks)", "def train(self):\n if self.input_col is None:\n raise Exception(\"Preprocessing not specified\")\n self.classifier_model.train(self.input_col, self.output_col)", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass", "def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n pass", "def train_model(model_dir, model_type, train_steps, train_file_name):\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n\n m = build_estimator(model_dir, model_type)\n\n # set num_epochs to None to get infinite stream of data.\n m.train(input_fn=input_fn_train(train_file_name, num_epochs=None, shuffle=True),\n steps=train_steps)\n\n return m", "def train(ds, **kwargs):\n# {{{\n\n t_REPS = [ds.grand['representations'][tr] for tr in ds.data['trainers']]\n t_VALS = [ds.grand['values'][tr] for tr in ds.data['trainers']]\n\n # For convenience, set the mean of the training values to 0\n t_AVG = np.mean(t_VALS)\n t_VALS = np.subtract(t_VALS,t_AVG)\n\n # model determination (`s` and `l` hypers, then `a` coefficients)\n # {{{\n # train the hypers\n if ds.data['hypers']:\n print(\"Loading hyperparameters from Dataset.\")\n s = ds.data['s']\n l = ds.data['l']\n else:\n if 'k' in kwargs:\n k = kwargs['k']\n else:\n k = ds.setup['M']\n s, l = find_hypers(t_VALS,t_REPS,k)\n ds.data['hypers'] = True\n ds.data['s'] = s\n ds.data['l'] = l\n\n # train for alpha\n if ds.data['a']:\n print(\"Loading coefficients from Dataset.\") \n alpha = np.asarray(ds.data['a'])\n else:\n print(\"Model training using s = {} and l = {} . . .\".format(s,l))\n alpha = train_a(t_REPS,t_VALS,s,l)\n ds.data['a'] = alpha.tolist()\n # }}}\n\n return ds, t_AVG", "def train_model(X_train, y_train):\n rgs = linear_model.Lasso(alpha=0.1)\n rgs.fit(X_train, y_train)\n return rgs", "def fit_training_data(self):\n self.model.fit(self.X_train)" ]
[ "0.7977213", "0.7823189", "0.77489245", "0.77486706", "0.744381", "0.74099916", "0.74027795", "0.73351485", "0.7293548", "0.72867054", "0.7273446", "0.72411215", "0.7087587", "0.708289", "0.708289", "0.708289", "0.708289", "0.708289", "0.70720506", "0.7064753", "0.70588857", "0.7053754", "0.70412785", "0.7040929", "0.69661665", "0.6928948", "0.6909242", "0.68938595", "0.6881733", "0.68404883", "0.68369144", "0.6804179", "0.6793077", "0.6776669", "0.67417413", "0.6733174", "0.6726741", "0.6719101", "0.67138046", "0.67065465", "0.6700611", "0.6682345", "0.6652164", "0.6650539", "0.66500205", "0.6649993", "0.66417605", "0.6641194", "0.6638873", "0.6633406", "0.6624543", "0.6621249", "0.6612739", "0.66122866", "0.66122866", "0.6606269", "0.6602269", "0.65862954", "0.65793437", "0.65756804", "0.6573188", "0.65729195", "0.6569737", "0.65685695", "0.65653676", "0.65638435", "0.65616405", "0.6561236", "0.6556857", "0.65340203", "0.65337884", "0.65337884", "0.6526173", "0.65247726", "0.6522723", "0.65134764", "0.65129477", "0.65083396", "0.6508054", "0.6508025", "0.65062505", "0.6501652", "0.6493458", "0.64850676", "0.64799935", "0.64799935", "0.64792085", "0.64704025", "0.6469765", "0.6466652", "0.64462614", "0.64373654", "0.64370364", "0.6424216", "0.64188606", "0.64181477", "0.64181477", "0.6415636", "0.63976985", "0.6397179", "0.6396276" ]
0.0
-1
Update the config information with new dropout values.
def update_dropout(info, dropout, dropout_type, prop_name): if dropout_type == "schnet_dropout": info["model_params"]["schnet_dropout"] = dropout elif dropout_type == "chemprop_dropout": info["model_params"]["cp_dropout"] = dropout elif dropout_type == "readout_dropout": # if it's in the readout layers, find the dropout # layers in the readout dictionary and update them readout = info["model_params"]["readoutdict"] layer_dics = readout[prop_name] for layer_dic in layer_dics: if layer_dic["name"] == "Dropout": layer_dic["param"]["p"] = dropout info["model_params"]["readoutdict"] = {prop_name: layer_dics} elif dropout_type == "attention_dropout": info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout else: info["model_params"][dropout_type] = dropout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def update(self):\n self.save_config_file()", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def changeDropout(self,dropout):\n self.dropout = dropout", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def update_global_config(self, config, **kwargs):\n pass", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def refresh_configuration(self):\n pass", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)", "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def _on_config_changed(self, _):\n self._configure_pod()", "def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def apply_user_configuration(self, config):\n self.logDisplay.set_logging_level(config['log'].get('logging_level', fallback='Verbose'))\n\n # MIDI\n self.winchMidiInputCombo.select_item(config['midi'].get('winch_midi_input', fallback='<no selection>'))\n self.midiOutputCombo.select_item(config['midi'].get('midi_output', fallback='<no selection>'))\n\n # OSC\n oscdef = config['osc']\n self.oscListenerConfig.set_OSC_port(oscdef.get('listener_addr', fallback='localhost'),\n oscdef.getint('listener_port', fallback=3751))\n\n self.oscSenderConfig.set_OSC_port(oscdef.get('sender_addr', fallback='localhost'),\n oscdef.getint('sender_port', fallback=3752))\n\n # DMX\n self.dmxSelect.select_item(config['dmx'].get('dmx_output_serial_port', fallback='<no selection>'))\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n winchSelect.select_item(config['winches'].get(key, fallback = '<no selection>'))\n return", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])", "async def async_update_config(self, config: ConfigType) -> None:\n self._config = config\n # just in case min/max values changed\n if self._current_value is None:\n return\n self._current_value = min(self._current_value, self._maximum)\n self._current_value = max(self._current_value, self._minimum)\n self.async_write_ha_state()", "def _save_config(self, data):\n curr_conf = self.config_entry.options.copy()\n curr_conf.update(data)\n curr_conf.update(self._conf_devs_option)\n\n return self.async_create_entry(title=\"\", data=curr_conf)", "def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def config(self, config_dict):\r\n self._cfg.config = config_dict", "def configure(self, config: dict):\n self.config.update(config)", "def update(self, obj):\n\n self.cfg.update(obj)", "def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def on_update(self, evt):\n print(evt)\n for name in self.widgetNames:\n try:\n widget = wx.FindWindowByName(name)\n if isinstance(widget, wx.ComboBox):\n selection = widget.GetValue()\n choices = widget.GetItems()\n choices.insert(0, selection)\n value = choices\n else:\n value = widget.GetValue()\n\n data = self.tree.GetPyData(self.current_selection)\n data['Config'][name] = value\n self.tree.SetPyData(self.current_selection, data)\n except Exception as E:\n logging.error(\"{0!s}: {1!s}\".format(E, name))\n raise E", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def on_config_changed(self, event):\n unit = self.model.unit", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def _postprocess_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n new_config = self.config_space.copy()\n new_config.update(cast_config_values(config, config_space=self.config_space))\n return new_config", "def reload_config(self):\n pass", "def apply(self) -> None:\n _ba.apply_config()", "def update_configs(self, config):\n for what in self.plugins: # backend, repo etc.\n for key in self.plugins[what]: # s3, filesystem etc.\n # print(\"Updating configuration of\", what, key)\n self.plugins[what][key].config(what='set', params=config)\n return", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "def write_config(self):\n cfg = {\n 'channel':self.channel,\n 'seuil_min':self.seuil_min,\n 'last_level':self.last_level,\n 'last_level_date':self.last_level_date\n }\n write_conf(self.CONF_FILE,cfg)", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def update_zoo_config(self, site_name, virt_path, new_config):\n root_path = self.map_path(site_name, virt_path)\n zoo_config_path = os.path.join(root_path, \".zoo\")\n config = get_zoo_config(zoo_config_path) or {}\n\n app = config.get('application')\n # disabled ability\n if 'selected-engine' in new_config :\n new_engine = new_config.get('selected-engine')\n if 'parameters' in app:\n app['parameters']['selected-engine'] = new_engine\n else:\n app['parameters'] = OrderedDict()\n app['parameters']['selected-engine'] = new_engine\n\n\n if 'engines' in new_config:\n engines = new_config.get('engines')\n app['engines'] = engines\n\n if 'locations' in new_config:\n app['locations'] = new_config['locations']\n\n if \"description\" in app:\n app[\"description\"] = Literal(app[\"description\"])\n\n if \"find_installed_command\" in app:\n app[\"find_installed_command\"] = Literal(app[\"find_installed_command\"])\n\n if \"install_command\" in app:\n app[\"install_command\"] = Literal(app[\"install_command\"])\n\n if \"uninstall_command\" in app:\n app[\"uninstall_command\"] = Literal(app[\"uninstall_command\"])\n\n if \"upgrade_command\" in app:\n app[\"upgrade_command\"] = Literal(app[\"upgrade_command\"])\n\n # save .zoo\n YamlHelper.save(config, zoo_config_path)", "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def destination_config(self, destination_config):\n self._destination_config = destination_config", "def configure(new_config: Mapping):\n config.update(new_config)", "def __build_empty_config(self):\n\n self.__config.add_section('IN_OUT')\n self.__config['IN_OUT']['source'] = 'Set Source Directory'\n self.__config['IN_OUT']['destination'] = 'Set Destination Directory'\n self.__save_config()\n\n self.__is_dirty = False\n self.__default = True", "def reset_config() -> None:\r\n self.ui.labelCloneListsLocation.setText(str(pathlib.Path(config.path_clone_list).resolve()))\r\n self.ui.labelMetadataLocation.setText(str(pathlib.Path(config.path_metadata).resolve()))\r\n self.ui.lineEditCloneListDownloadLocation.setText(config.clone_list_metadata_download_location)\r\n parent.clone_lists_folder = config.path_clone_list\r\n parent.metadata_folder = config.path_metadata\r\n parent.clone_list_metadata_url = config.clone_list_metadata_download_location\r\n write_config(parent, dat_details, config, self)", "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def update_config(self):\n if self.integration is None:\n return\n self.enabled = self.integration.has_option(self.get_config_name())\n self.pedantic = self.integration.configuration.get_bool(\n 'filter.mrproper')", "def on_save(self):\r\n #new_config = ConfigParser.RawConfigParser()\r\n cur_config = self.config.dict_config\r\n #\r\n # update the dict_config\r\n cur_config[\"access_restriction\"][\"ips\"] = self.text_ips.get(1.0, tk.END).strip()\r\n cur_config[\"access_restriction\"][\"ar_url\"] = self.entry_url.get().strip()\r\n #\r\n cur_config[\"email\"][\"relay_server_host\"] = self.entry_server_host.get().strip()\r\n cur_config[\"email\"][\"relay_server_port\"] = self.entry_server_port.get().strip()\r\n cur_config[\"email\"][\"email_from\"] = self.entry_from.get().strip()\r\n cur_config[\"email\"][\"recipients\"] = self.text_recipients.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_enabled_subject\"] = self.entry_enabled_subject.get().strip()\r\n cur_config[\"email\"][\"ar_enabled_body\"] = self.text_enabled_body.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_disabled_subject\"] = self.entry_disabled_subject.get()\r\n cur_config[\"email\"][\"ar_disabled_body\"] = self.text_disabled_body.get(1.0, tk.END).strip()\r\n\r\n #self.action.save_config()\r\n # # sync dict_config to the gui\r\n # for section in self.config.dict_config:\r\n # new_config.add_section(section)\r\n # for item in self.config.dict_config[section]:\r\n # new_config.set(section, item, self.config.dict_config[section][item])\r\n # #\r\n # # saving to a file\r\n # with open(self.config.file_path, 'w') as newconfigfile:\r\n # new_config.write(newconfigfile)\r\n #\r\n # # mbox.showinfo(\"Information\",\r\n # # \"Current configuration has been successfully saved to '%s'\" % os.path.basename(self.configfile))\r\n # self.console.debug(\"Configuration has been saved to '%s'\" % self.config.file_path)\r", "def config(self, config):\n self._config = config", "def WriteConfig(self):\n config = wx.Config.Get()\n config.DeleteGroup(DEPS_CONFIG)\n config.SetPath(DEPS_CONFIG)\n pos = 0\n for entry in self.array:\n config.Write(\"Dependency%s\" % pos, entry)\n pos += 1\n config.SetPath('..')", "def _configure(self):\n OutputSoln._configure(self)", "def write_config(self, cfg, slot):\n pass", "def _add_db_in_config(self):\n logger.info(f\"Updating configuration file in {self.configfile}\")\n with open(self.configfile, \"a\") as fp:\n print(self.ref_name + \".genome : \" + self.ref_name, file=fp)", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))", "def config(self):\n state_file_id = \"{env}-{component}\".format(env=self.environment, component=self.component)\n\n grunt_config_template = \"\"\"lock = {{\nbackend = \"dynamodb\"\nconfig {{\nstate_file_id = \"{state_file_id}\"\naws_region = \"{region}\"\ntable_name = \"terragrunt_locks\"\nmax_lock_retries = 360\n}}\n}}\nremote_state = {{\nbackend = \"s3\"\nconfig {{\nencrypt = \"true\"\nbucket = \"{s3_bucket}\"\nkey = \"{env}/{component}/terraform.tfstate\"\nregion = \"{region}\"\n}}\n}}\"\"\"\n\n with open('.terragrunt', 'w') as f:\n f.write(grunt_config_template.format(\n state_file_id=state_file_id,\n region=self.metadata['REGION'],\n s3_bucket=self.s3_bucket,\n env=self.environment,\n component=self.component\n ))", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def refresh_config(self):\n self._user_config = UserConfig(None)", "def copy_config_to_properties(self, config):\n ## EPICS\n self.epics_root = config.get('epics_root')\n\n ## Directories\n self.smurf_cmd_dir = config.get('smurf_cmd_dir')\n self.tune_dir = config.get('tune_dir')\n self.status_dir = config.get('status_dir')\n self.default_data_dir = config.get('default_data_dir')\n\n ## Useful constants\n constant_cfg = config.get('constant')\n self.pA_per_phi0 = constant_cfg.get('pA_per_phi0')\n\n ## Timing\n timing_cfg = config.get('timing')\n self.timing_reference = timing_cfg['timing_reference']\n\n ## Cold amplifier biases\n amp_cfg = config.get('amplifier')\n\n # 4K HEMT\n self.hemt_Vg = amp_cfg['hemt_Vg']\n self.hemt_bit_to_V = amp_cfg['bit_to_V_hemt']\n self.hemt_Vd_series_resistor = amp_cfg['hemt_Vd_series_resistor']\n self.hemt_Id_offset = amp_cfg['hemt_Id_offset']\n self.hemt_gate_min_voltage = amp_cfg['hemt_gate_min_voltage']\n self.hemt_gate_max_voltage = amp_cfg['hemt_gate_max_voltage']\n\n # 50K HEMT\n self.fiftyk_Vg = amp_cfg['LNA_Vg']\n self.fiftyk_dac_num = amp_cfg['dac_num_50k']\n self.fiftyk_bit_to_V = amp_cfg['bit_to_V_50k']\n self.fiftyk_amp_Vd_series_resistor = amp_cfg['50K_amp_Vd_series_resistor']\n self.fiftyk_Id_offset = amp_cfg['50k_Id_offset']\n ## Tune parameters\n tune_band_cfg = config.get('tune_band')\n self.default_tune = tune_band_cfg['default_tune']\n self.gradient_descent_gain = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_gain'].items()}\n self.gradient_descent_averages = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_averages'].items()}\n self.gradient_descent_converge_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_converge_hz'].items()}\n self.gradient_descent_step_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_step_hz'].items()}\n self.gradient_descent_momentum = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_momentum'].items()}\n self.gradient_descent_beta = {\n int(band):v for (band,v) in\n tune_band_cfg['gradient_descent_beta'].items()}\n self.feedback_start_frac = {\n int(band):v for (band,v) in\n tune_band_cfg['feedback_start_frac'].items()}\n self.feedback_end_frac = {\n int(band):v for (band,v) in\n tune_band_cfg['feedback_end_frac'].items()}\n self.eta_scan_del_f = {\n int(band):v for (band,v) in\n tune_band_cfg['eta_scan_del_f'].items()}\n self.eta_scan_averages = {\n int(band):v for (band,v) in\n tune_band_cfg['eta_scan_averages'].items()}\n self.delta_freq = {\n int(band):v for (band,v) in\n tune_band_cfg['delta_freq'].items()}\n # Tracking algo\n self.lms_freq_hz = {\n int(band):v for (band,v) in\n tune_band_cfg['lms_freq'].items()}\n\n ## Reading/writing data\n self.fs = config.get('fs')\n\n ## In fridge\n self.R_sh = config.get('R_sh')\n\n ## Which bands are have their configurations specified in the\n ## pysmurf configuration file?\n smurf_init_config = config.get('init')\n bands = smurf_init_config['bands']\n\n ## Carrier\n self.dsp_enable = smurf_init_config['dspEnable']\n self.ultrascale_temperature_limit_degC = config.get('ultrascale_temperature_limit_degC')\n self.data_out_mux = {\n band:smurf_init_config[f'band_{band}']['data_out_mux']\n for band in bands}\n\n ## AMC\n # Which bands are present in the pysmurf configuration file?\n self.bands = bands\n self.amplitude_scale = {\n band:smurf_init_config[f'band_{band}']['amplitude_scale']\n for band in bands}\n self.iq_swap_in = {\n band:smurf_init_config[f'band_{band}']['iq_swap_in']\n for band in bands}\n self.iq_swap_out = {\n band:smurf_init_config[f'band_{band}']['iq_swap_out']\n for band in bands}\n self.ref_phase_delay = {\n band:smurf_init_config[f'band_{band}']['refPhaseDelay']\n for band in bands}\n self.ref_phase_delay_fine = {\n band:smurf_init_config[f'band_{band}']['refPhaseDelayFine']\n for band in bands}\n self.band_delay_us = {\n band:smurf_init_config[f'band_{band}']['bandDelayUs']\n for band in bands}\n self.att_uc = {\n band:smurf_init_config[f'band_{band}']['att_uc']\n for band in bands}\n self.att_dc = {\n band:smurf_init_config[f'band_{band}']['att_dc']\n for band in bands}\n self.trigger_reset_delay= {\n band:smurf_init_config[f'band_{band}']['trigRstDly']\n for band in bands}\n\n # Mapping from attenuator numbers to bands\n att_cfg = config.get('attenuator')\n att_cfg_keys = att_cfg.keys()\n attenuator = {}\n attenuator['band'] = np.zeros(len(att_cfg_keys),dtype=int)\n attenuator['att'] = np.zeros(len(att_cfg_keys),dtype=int)\n for i, k in enumerate(att_cfg_keys):\n attenuator['band'][i] = att_cfg[k]\n attenuator['att'][i] = k[-1]\n self.attenuator = attenuator\n\n ## RTM\n flux_ramp_cfg = config.get('flux_ramp')\n self.num_flux_ramp_counter_bits = flux_ramp_cfg['num_flux_ramp_counter_bits']\n self.reset_rate_khz = tune_band_cfg.get('reset_rate_khz')\n self.fraction_full_scale = tune_band_cfg.get('fraction_full_scale')\n\n ## Cryocard\n self.bias_line_resistance = config.get('bias_line_resistance')\n self.high_low_current_ratio = config.get('high_low_current_ratio')\n self.high_current_mode_bool = config.get('high_current_mode_bool')\n # Mapping from peripheral interface controller (PIC) to bias group\n pic_cfg = config.get('pic_to_bias_group')\n pic_cfg_keys = pic_cfg.keys()\n pic_to_bias_group = np.zeros((len(pic_cfg_keys), 2), dtype=int)\n for i, k in enumerate(pic_cfg_keys):\n val = pic_cfg[k]\n pic_to_bias_group[i] = [k, val]\n self.pic_to_bias_group = pic_to_bias_group\n\n ## Tracking algo\n # lmsGain ; this one's a little odd ; it's defined in each of\n # the band_# configuration file blocks, while the other main\n # tracking algorithm parameter, lms_freq_hz, is defined in the\n # tune_band configuration file block...\n self.lms_gain = {\n band:smurf_init_config[f'band_{band}']['lmsGain']\n for band in bands}\n self.lms_delay = {\n band:smurf_init_config[f'band_{band}']['lmsDelay']\n for band in bands}\n self.feedback_enable = {\n band:smurf_init_config[f'band_{band}']['feedbackEnable']\n for band in bands}\n self.feedback_gain = {\n band:smurf_init_config[f'band_{band}']['feedbackGain']\n for band in bands}\n self.feedback_limit_khz = {\n band:smurf_init_config[f'band_{band}']['feedbackLimitkHz']\n for band in bands}\n self.feedback_polarity = {\n band:smurf_init_config[f'band_{band}']['feedbackPolarity']\n for band in bands}\n\n ## Mappings\n # Bias groups available\n self.all_groups = config.get('all_bias_groups')\n\n # Number of bias groups and bias group to RTM DAC pair\n # mapping\n bias_group_cfg = config.get('bias_group_to_pair')\n bias_group_keys = bias_group_cfg.keys()\n\n # Number of bias groups\n self.n_bias_groups = len(bias_group_cfg)\n\n # Bias group to RTM DAC pair mapping\n bias_group_to_pair = np.zeros((len(bias_group_keys), 3), dtype=int)\n for i, k in enumerate(bias_group_keys):\n val = bias_group_cfg[k]\n bias_group_to_pair[i] = np.append([k], val)\n self.bias_group_to_pair = bias_group_to_pair\n\n # Bad resonator mask\n bad_mask_config = config.get('bad_mask')\n bad_mask_keys = bad_mask_config.keys()\n bad_mask = np.zeros((len(bad_mask_keys), 2))\n for i, k in enumerate(bad_mask_keys):\n bad_mask[i] = bad_mask_config[k]\n self.bad_mask = bad_mask", "def update_config(config: OmegaConf):\n # expected config_global format\n schema = OmegaConf.structured(config._metadata.object_type)\n\n # serialize config\n # For config logging we use yaml format (Trains: Artifacts -> Model configuration)\n # save config in a temp yaml file\n config_global_file = tempfile.NamedTemporaryFile(\"w+t\")\n config_global_file.write(OmegaConf.to_yaml(config))\n config_global_file.flush()\n config_global_file_name = config_global_file.name\n\n # sync with server if a task has been created\n current_task = Task.current_task()\n if current_task:\n # send yaml to trains server\n config_global_file_name = Task.current_task().connect_configuration(config_global_file_name)\n\n # for visualization (Trains: Hyperparameters)\n Task.current_task().connect(generate_trains_hyperparameter_dict(config))\n\n config_back_ = OmegaConf.load(config_global_file_name)\n config_back = OmegaConf.merge(schema, config_back_)\n\n return config_back", "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def config_changed(self, update_parent=True):\n super(Assembly, self).config_changed(update_parent)\n # driver must tell workflow that config has changed because\n # dependencies may have changed\n if self.driver is not None:\n self.driver.config_changed(update_parent=False)\n \n # Detect and save any loops in the graph.\n if hasattr(self, '_depgraph'):\n graph = self._depgraph._graph\n self._graph_loops = nx.strongly_connected_components(graph)", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def relay_buzzer_config(self):\r\n\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)\r\n\t\t\r\n\t\t\"\"\"Select the Output Port Register Configuration data from the given provided value\"\"\"\r\n\t\tif self.pin == 0 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN0)\r\n\t\telif self.pin == 1 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN1)\r\n\t\telif self.pin == 2 :\r\n\t\t\tbus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN2)", "def populate_config(self, config):\n self.use_wine_mappings.set_active(config['use_wine_mappings'])\n self.force_recheck.set_active(config['force_recheck'])\n self._previous_force_recheck = config['force_recheck']\n self.resume.set_active(config['resume'])\n try:\n self.glade.get_widget('time_added_checkbox').set_active(\n 'time_added' in config['transfer_meta'])\n except KeyError:\n pass\n self.resume_dat_entry.set_text(config['previous_resume_dat_path'])", "def getUserConfig(self):\n\n # Load Autobidder stats\n userconfig_json = open('./data/config.json')\n json1_str = userconfig_json.read()\n configops = json.loads(json1_str)[0]\n\n config_choices = []\n for key, value in configops.items():\n config_choices.append(value)\n\n conserve_bids = config_choices[0]\n sleep_time = config_choices[1]\n botspeed = config_choices[2]\n bidexpiration_ceiling = config_choices[3]\n buyceiling = config_choices[4]\n sellceiling = config_choices[5]\n\n sleep_time = int(sleep_time)\n botspeed = float(botspeed)\n conserve_bids = int(conserve_bids)\n bidexpiration_ceiling = int(bidexpiration_ceiling)\n buyceiling = float(buyceiling/100)\n sellceiling = float(sellceiling/100)\n\n if (buyceiling > 1):\n log_event(self.queue, \"buy ceiling greater than 1: \" +\n str(buyceiling))\n log_event(self.queue, \"setting it to .85: \")\n buyceiling = 0.85\n\n if (sellceiling > 1):\n log_event(self.queue, \"sell ceiling greater than 1: \" +\n str(sellceiling))\n log_event(self.queue, \"setting it to .95 \")\n sellceiling = 0.95\n\n self.conserve_bids = conserve_bids\n self.sleep_time = sleep_time\n self.botspeed = botspeed\n self.bidexpiration_ceiling = bidexpiration_ceiling\n self.buyceiling = buyceiling\n self.sellceiling = sellceiling\n\n # Return values but this really shouldn't be used - only used on initialization\n return conserve_bids, sleep_time, botspeed, bidexpiration_ceiling, buyceiling, sellceiling", "def update_host_config(self, hostid, config, **kwargs):\n pass", "def write_config(self):\r\n obj = [\r\n [self.ip,\r\n self.gate,\r\n self.mask,\r\n self.name,\r\n self.time]\r\n ]\r\n with open('config.json', 'wt') as jsonfile:\r\n json.dump(obj, jsonfile)", "def update(self):\n\n # check if gain information is available, if not, update config\n if \"d2d\" not in self.config:\n self.setup_d2d()\n\n for channel in self.light_channels:\n # turn on the light\n self.light_control(channel, 1)\n\n d_print(\"Letting gains settle for the {} channel...\".format(channel), 1)\n\n with picamera.PiCamera() as sensor:\n # set up the sensor with all its settings\n sensor.resolution = self.settings.resolution\n sensor.framerate = self.settings.framerate[channel]\n sensor.shutter_speed = self.settings.shutter_speed[channel]\n\n sensor.awb_mode = \"off\"\n sensor.awb_gains = (self.config[\"wb\"][channel][\"r\"], self.config[\"wb\"][channel][\"b\"])\n\n time.sleep(30)\n\n sensor.exposure_mode = self.settings.exposure_mode\n\n # set the analog and digital gain\n ag = float(sensor.analog_gain)\n dg = float(sensor.digital_gain)\n\n self.config[\"d2d\"][channel][\"digital-gain\"] = dg\n self.config[\"d2d\"][channel][\"analog-gain\"] = ag\n\n d_print(\"Measured ag: {} and dg: {} for channel {}\".format(ag, dg, channel), 1)\n d_print(\"Saved ag: {} and dg: {} for channel {}\".format(self.config[\"d2d\"][channel][\"analog-gain\"], self.config[\"d2d\"][channel][\"digital-gain\"], channel), 1)\n\n # turn the light off\n self.light_control(channel, 0)\n\n # update timestamp\n self.config[\"d2d\"][\"timestamp\"] = time.time()\n\n # save the new configuration to file\n self.save_config_to_file()", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def commit_config(self):\n raise NotImplementedError", "def config(self, cfg):\n self._config.update(cfg)\n return self", "def save_config(self):\n self.config.app_w = self.width()\n self.config.app_h = self.height()\n self.config.splitter = self.splitter.saveState()\n self.config.save()", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def _update_auto_config(self):\n\n # Initialize the yaml data\n nodes = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n ydata = yaml.load(stream)\n if \"nodes\" in ydata:\n nodes = ydata[\"nodes\"]\n except yaml.YAMLError as exc:\n print(exc)\n return\n\n for i in nodes.items():\n key = i[0]\n node = i[1]\n\n # Interfaces\n node[\"interfaces\"] = {}\n for item in self._nodes[key][\"interfaces\"].items():\n port = item[0]\n interface = item[1]\n\n node[\"interfaces\"][port] = {}\n addr = \"{}\".format(interface[\"pci_address\"])\n node[\"interfaces\"][port][\"pci_address\"] = addr\n if \"mac_address\" in interface:\n node[\"interfaces\"][port][\"mac_address\"] = interface[\"mac_address\"]\n\n if \"total_other_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_other_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_other_cpus\"\n ]\n if \"total_vpp_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_vpp_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_vpp_cpus\"\n ]\n if \"reserve_vpp_main_core\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"reserve_vpp_main_core\"] = self._nodes[key][\"cpu\"][\n \"reserve_vpp_main_core\"\n ]\n\n # TCP\n if \"active_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"active_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"active_open_sessions\"\n ]\n if \"passive_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"passive_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"passive_open_sessions\"\n ]\n\n # Huge pages\n node[\"hugepages\"][\"total\"] = self._nodes[key][\"hugepages\"][\"total\"]\n\n # Write the auto config config file\n with open(self._autoconfig_filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()", "def update_config(self, config, priority, source):\n for key, value in config.items():\n self._config[key].add(value, priority, source)", "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "def update(self, config):\n # find keys are in config but not in self.config\n extra_keys = set(config.keys()) - set(self.config.keys())\n if len(extra_keys) > 0:\n raise ValueError(\"keys {} in config are not in Config.config\".format(extra_keys))\n # update self.config by config\n else:\n self.config.update(config)", "async def push_config(_):\n nonlocal last_timezone\n\n new_timezone = str(hass.config.time_zone)\n\n if new_timezone == last_timezone:\n return\n\n last_timezone = new_timezone\n await hassio.update_hass_timezone(new_timezone)", "def update_drop(drops, ai_settings):\n drops.update(ai_settings)\n \n for drop in drops.copy():\n if drop.rect.bottom >= ai_settings.screen_height:\n drop.rect.bottom = 0", "def test_update_configuration(self):\n\n ts_name = 'test-update-1'\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertTrue(configuration.default)\n self.assertEquals(configuration.retentions, granularities.RETENTIONS_GRANULARITY)\n self.assertEquals(configuration.timezone, granularities.DEFAULT_TIMEZONE)\n self.assertEquals(configuration.aggregation_method,\n aggregations.DEFAULT_AGGREGATION)\n\n custom_tz = 'America/New_York'\n custom_agg = aggregations.AGGREGATION_LAST\n custom_ret = granularities.RETENTIONS_GRANULARITY\n custom_ret[granularities.SECOND] = 3 * 365 * 12 * 30 * 24 * 60 * 60\n timeserie_configuration.update_timeserie_configuration(\n self.get_local_dynamo_cli(), ts_name, custom_tz, custom_agg, custom_ret)\n\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertFalse(configuration.default)\n self.assertEquals(configuration.retentions, custom_ret)\n self.assertEquals(configuration.timezone, custom_tz)\n self.assertEquals(configuration.aggregation_method, custom_agg)", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def _update_droppings(self):\n\t\t# Update dropping positions.\n\t\tself.droppings.update()\n\n\t\t# Get rid of the droppings that have disappeared.\n\t\tfor dropping in self.droppings.copy():\n\t\t\tif dropping.rect.top >= 1050:\n\t\t\t\tself.droppings.remove(dropping)\n\n\t\tself._check_dropping_auto_collisions()", "def apply_config(self, responsible, paths, arg=None):\n self.warning(\"Reconfiguring NTP server (called with paths %s)\" % paths)\n return self.updateRunningConf(responsible)", "def change_pwds(config):\n if not config:\n click.echo(help_msg)\n return\n try:\n user_config = imp.load_source('config', config)\n except IOError as e:\n click.echo(\"File %s not found.\" % config)\n logger.error(\"Invalid path to config file: %s\" % e)\n except Exception as e:\n click.echo(\"Ooups. Something went wrong.\")\n click.echo(e)\n logger.critical(\"%s\" % e)\n else:\n for i in dir(config_f):\n if not i.startswith(\"__\"):\n try:\n user_config.__dict__[i]\n except KeyError:\n user_config.__dict__[i] = config_f.__dict__[i]\n handle_exceptions(main, user_config)", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def _configure(self):\n Values._configure(self)\n self.values = [self.inventory.one, self.inventory.two]\n return", "def override_config_field(self, update_conf: dict):\n if not update_conf:\n return\n\n forbidden_modify_fields = self.contains_forbidden_modify_field(update_conf)\n if forbidden_modify_fields:\n raise ConfigException(f'Config field cannot be modified: {forbidden_modify_fields}')\n\n self.add_config(update_conf, type='api_patch', apply_now=True)\n\n logger.debug(f'Need update config fields: {update_conf}')\n self.config.update(update_conf)\n logger.debug(f'Update done. config: {self.config}')\n\n application.server['event'].publish('config_update', {'config_update' : {'data': update_conf}})", "async def push_config(_):\n await oppio.update_opp_timezone(str(opp.config.time_zone))", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def update_region_config(cls, body: CloudAccountRegionConfigurationViewModel) -> Dict:\n\t\tpass", "def _update_config(self):\n \n # app size\n if self.IsMaximized():\n config.SETTINGS['app_maximized'] = True\n else:\n size = self.GetSize()\n config.SETTINGS['app_width'] = size[0]\n config.SETTINGS['app_height'] = size[1]\n config.SETTINGS['app_maximized'] = False\n \n # panes size\n pane = self.AUIManager.GetPane(self._collections_view)\n config.SETTINGS['collections_view_enabled'] = pane.IsShown()\n if pane.IsShown():\n config.SETTINGS['collections_view_width'] = pane.window.GetClientSize()[0]\n \n pane = self.AUIManager.GetPane(self._pdf_view)\n config.SETTINGS['pdf_view_enabled'] = pane.IsShown()\n if pane.IsShown():\n config.SETTINGS['pdf_view_height'] = pane.window.GetClientSize()[1]\n \n pane = self.AUIManager.GetPane(self._details_view)\n config.SETTINGS['details_view_enabled'] = pane.IsShown()\n if pane.IsShown():\n config.SETTINGS['details_view_width'] = pane.window.GetClientSize()[0]\n \n # article view\n config.SETTINGS['articles_view_columns'] = self._articles_view.GetColumnsSettings()", "def config(self, **kwargs):\n\n # our options that we deal with\n combobox = options[\"combobox\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in combobox:\n v = kwargs.pop(k)\n self.combobox.config(**{combobox[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)", "def update_config(self, config):\n self.config = {\n \"key\": \"\",\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['github_api_key']", "def UpdateConfig(self, instalog_config, update_info, env):\n if update_info.get('data_truncate', {}).get('enable', False):\n # If enable data_truncate, Instalog truncate once a day.\n instalog_config['buffer']['args']['truncate_interval'] = 86400\n\n threshold = update_info.get('input_http', {}).get(\n 'log_level_threshold', logging.NOTSET)\n instalog_config['input']['http_in']['args']['log_level_threshold'] = (\n threshold)\n\n if update_info.get('forward', {}).get('enable', False):\n args = update_info.get('forward', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_pull_socket_port\n instalog_config['output']['forward'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('forward')\n\n if update_info.get('customized_output', {}).get('enable', False):\n args = update_info.get('customized_output', {}).get('args', {}).copy()\n # Umpire is running in docker, and we always use IP of umpire and port\n # published by docker.\n args['hostname'] = socket.gethostbyname(socket.gethostname())\n args['port'] = env.umpire_instalog_customized_output_port\n instalog_config['output']['customized_output'] = {\n 'plugin': 'output_pull_socket',\n 'args': args\n }\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append(\n 'customized_output')\n\n if update_info.get('archive', {}).get('enable', False):\n instalog_config['output']['archive'] = {\n 'plugin': 'output_archive',\n 'args': update_info.get('archive', {}).get('args', {}).copy()\n }\n # Set the target_dir.\n target_dir = os.path.join(env.umpire_data_dir, 'instalog_archives')\n instalog_config['output']['archive']['args']['target_dir'] = target_dir\n for input_name in instalog_config['input']:\n instalog_config['input'][input_name]['targets'].append('archive')" ]
[ "0.6544299", "0.63342535", "0.60116196", "0.59151256", "0.5909534", "0.57759255", "0.57704425", "0.5765275", "0.5730661", "0.56408286", "0.5635697", "0.558882", "0.55770063", "0.5571904", "0.5553866", "0.5534613", "0.5478377", "0.546527", "0.5463798", "0.5436312", "0.5427711", "0.53996444", "0.5395192", "0.538703", "0.5386605", "0.53815395", "0.5366553", "0.5358142", "0.5354595", "0.5339216", "0.5329347", "0.5319775", "0.5313125", "0.53109384", "0.5303292", "0.5286871", "0.5284191", "0.5280756", "0.5276582", "0.5267846", "0.5240791", "0.5238083", "0.5231604", "0.5212591", "0.52108055", "0.52104163", "0.52080566", "0.52075183", "0.51953", "0.5194241", "0.51914704", "0.5189642", "0.51887417", "0.5181786", "0.5181326", "0.5181192", "0.5177958", "0.51670486", "0.516153", "0.51575154", "0.51575154", "0.51509017", "0.5141164", "0.51252645", "0.5118631", "0.5112413", "0.51049066", "0.510411", "0.51019764", "0.51018685", "0.5098213", "0.5095931", "0.50926745", "0.5085567", "0.5085032", "0.50816584", "0.5078761", "0.50740325", "0.5069506", "0.50594205", "0.50555575", "0.50547343", "0.50502515", "0.5047437", "0.5040857", "0.503545", "0.5031607", "0.50249803", "0.5015965", "0.5014158", "0.5013289", "0.50014305", "0.4997329", "0.4996845", "0.49882296", "0.49794328", "0.49707705", "0.49589464", "0.49485233", "0.49447882" ]
0.63966775
1
Update the config information with the number of attention heads.
def update_heads(info, heads): info["model_params"]["boltzmann_dict"]["num_heads"] = heads # Concatenate the fingerprints produced by the different heads info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate" readoutdict = info["model_params"]["readoutdict"] feat_dim = info["model_params"]["mol_basis"] for key, lst in readoutdict.items(): for i, dic in enumerate(lst): if "param" in dic and "in_features" in dic.get("param", {}): # make sure that the input dimension to the readout is equal to # `heads * feat_dim`, where `feat_dim` is the feature dimension # produced by each head readoutdict[key][i]["param"]["in_features"] = feat_dim * heads break info["model_params"]["readoutdict"] = readoutdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config", "def update(self, num_of_updates=25) -> None:\n\t\tfor _ in range(num_of_updates):\n\t\t\tself.__find_joint_configurations()", "def conf_update(self):\n pass", "def get_config(self):\n config = {\n 'F_': self.F_,\n 'attn_heads': self.attn_heads,\n 'attn_heads_reduction': self.attn_heads_reduction,\n 'edge_type_reduction': self.edge_type_reduction,\n 'attention_type': self.attention_type,\n 'attn_dropout': self.attn_dropout,\n 'feature_dropout': self.feature_dropout,\n 'activation': self.activation,\n 'use_value_bias': self.use_value_bias,\n 'use_key_bias': self.use_key_bias,\n 'kernel_initializer': self.kernel_initializer,\n 'bias_initializer': self.bias_initializer,\n 'attn_kernel_initializer': self.attn_kernel_initializer,\n 'attn_bias_initalizer': self.attn_bias_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'bias_regularizer': self.bias_regularizer,\n 'attn_kernel_regularizer': self.attn_kernel_regularizer,\n 'attn_bias_regularizer': self.attn_bias_regularizer,\n 'activity_regularizer': self.activity_regularizer,\n 'kernel_constraint': self.kernel_constraint,\n 'bias_constraint': self.bias_constraint,\n 'attn_kernel_constraint': self.attn_kernel_constraint,\n 'attn_bias_constraint': self.attn_bias_constraint\n }\n base_config = super(BatchShawMultigraphAttention, self).get_config()\n return dict(list(base_config.items())) + list(config.items())", "def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']", "def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)", "def n_configs(self, val):\n if val >= 1 and isinstance(val, int):\n if val != self._faux._n_configs:\n self._faux._n_configs = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def update(self, rxn_probs):\n pass", "def _InitAttentionParams(self, atten_tpl):\n p = self.params\n\n if isinstance(p.num_heads, list) != isinstance(atten_tpl, list):\n raise ValueError('p.num_heads and p.atten_tpl should both be lists '\n f'or both scalars for {p.name} num_heads={p.num_heads}.')\n if isinstance(p.num_heads, list) and (len(p.num_heads) != len(atten_tpl)):\n raise ValueError('num_heads and atten_tpl should both be lists '\n 'of the equal sizes: '\n f'{len(p.num_heads)} vs {len(atten_tpl)}')\n\n def _SetCommonParams(params, name, num_heads):\n # Raise warning if self.params override params from atten_tpl\n for key in ['input_dim', 'hidden_dim', 'num_heads', 'atten_dropout_prob']:\n if params.Get(key) is not p.Get(key):\n tf.logging.warning('attention param {} overriding: {} -> {}'.format(\n key, params.Get(key), p.Get(key)))\n if params.name is not name:\n tf.logging.warning('attention param name overriding: {} -> {}'.format(\n params.name, name))\n params.name = name\n params.input_dim = p.input_dim\n params.hidden_dim = p.hidden_dim\n params.num_heads = num_heads\n params.atten_dropout_prob = p.atten_dropout_prob\n if isinstance(p.num_heads, list):\n params.proj_tpl.make_output_proj_no_op = True\n # Each dim per head is now divided among all heads\n dim_per_head = p.hidden_dim // sum(p.num_heads)\n params.proj_tpl.dim_per_head = dim_per_head\n params.dim_per_head = dim_per_head\n params.hidden_dim = p.hidden_dim // len(p.num_heads)\n return params\n\n if isinstance(p.num_heads, list):\n params_list = []\n for i in range(len(atten_tpl)):\n params = atten_tpl[i].Copy()\n params = _SetCommonParams(params, 'mixed_atten_{}'.format(i),\n p.num_heads[i])\n params_list.append(params)\n params = params_list\n else:\n params = atten_tpl.Copy()\n params = _SetCommonParams(params, 'multihead_atten', p.num_heads)\n return params", "def update_count(self):\n pass", "def n_configs(self):\n return self._faux._n_configs", "def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])", "def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()", "def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")", "def _update_count(self):\n self._count = len(self._items)", "def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)", "def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())", "def update_count(self):\n pass # Do nothing", "def _InitAttentionParams(self, atten_tpl):\n p = self.params\n source_atten_tpls = []\n # Set up each source attention.\n for i in range(p.num_source):\n src_key = 'source_%d' % i\n src_atten = atten_tpl.Copy()\n src_atten = super()._InitAttentionParams(src_atten)\n if isinstance(src_atten, list):\n raise ValueError(\n 'TransformerMultiSourceAttentionLayer does not support '\n 'num_heads > 1.')\n src_atten.name = 'multihead_atten_%s' % src_key\n source_atten_tpls.append((src_key, src_atten))\n\n # Initialize multi-source attention.\n msa = p.multi_source_atten.Copy()\n msa.name = 'multi_source_atten'\n msa.input_dim = p.input_dim\n msa.hidden_dim = p.hidden_dim\n msa.source_atten_tpls = source_atten_tpls\n msa.primary_source_key = 'source_%d' % p.primary_source_index\n return msa", "def update_config(self, config) -> InferredConfig:\n categorical_dim = len(config.categorical_cols)\n continuous_dim = len(config.continuous_cols)\n if config.task == \"regression\":\n output_dim = len(config.target)\n elif config.task == \"classification\":\n output_dim = len(self.train[config.target[0]].unique())\n else:\n output_dim = None\n categorical_cardinality = None\n embedding_dims = None\n if not self.do_leave_one_out_encoder():\n categorical_cardinality = [\n int(self.train[col].fillna(\"NA\").nunique()) + 1 for col in config.categorical_cols\n ]\n embedding_dims = [(x, min(50, (x + 1) // 2)) for x in categorical_cardinality]\n if hasattr(config, \"embedding_dims\"):\n if config.embedding_dims is not None:\n embedding_dims = config.embedding_dims\n return InferredConfig(\n categorical_dim=categorical_dim,\n continuous_dim=continuous_dim,\n output_dim=output_dim,\n categorical_cardinality=categorical_cardinality,\n embedding_dims=embedding_dims,\n )", "def config_count(self) -> int:\n return pulumi.get(self, \"config_count\")", "def find_n(self):\n metadata_files = [\n file for file in self.cfg[\"input_files\"]\n if \"tas/metadata.yml\" in file\n ]\n self.cfg[\"N\"] = {}\n for meta_file in metadata_files:\n n_identifyer = meta_file.split(\"/tas/\")[0].split(\"/tas_\")[-1]\n metadata = group_metadata(get_cfg(meta_file).values(), \"dataset\")\n self.cfg[\"N\"][n_identifyer] = len(metadata.keys()) - 1", "def setMancount(self, cnt):\n self.__mancount=cnt", "def num_of_adaptors(self, num_of_adaptors):\n\n self._num_of_adaptors = num_of_adaptors", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )", "def updateInfo(self):\n\t\tif ( self.errorCount == 2 ):\n\t\t\tself.pitchText.text = \"Unclear microphone input...\"\n\n\t\tcurNote = self.listener.pitch.note\n\t\tcurFreq = self.listener.pitch.freq\n\t\tself.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune()\n\t\ttuneText = \"%0.2f Hz off from %s (%0.1f Hz)\" % (abs(self.tuneDelta), \n\t\t\t\t\t\t\t\t\t\t\t\tself.tuneNeighbor.note, \n\t\t\t\t\t\t\t\t\t\t\t\tcurFreq)\n\t\tself.pitchText.text = tuneText", "def _configure_frequencies(self) -> None:\n i = 3\n while i < len(self._lora_frequencies):\n self.set_ch_parameters(i, self._lora_frequencies[i], 0, 5, True)\n i += 1\n self.set_ch_parameters(i, 868800000, 7, 7, True)", "def make_mdn_heads(self, config):\n raise NotImplementedError", "def __init__(self, input_size, hidden_size, output_size, config):\r\n super(AttentionDecoder, self).__init__()\r\n\r\n self.config = config\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n\r\n self.attn = Attn(self.config.attn_type, self.hidden_size)\r\n self.out = nn.Linear(self.hidden_size * 2, output_size)\r\n\r\n if self.config.model in ['LSTM', 'GRU']:\r\n self.rnn = getattr(nn, self.config.model)(self.input_size + self.hidden_size, self.hidden_size,\r\n self.config.nlayer_dec, batch_first=True,\r\n dropout=self.config.dropout)\r\n else:\r\n try:\r\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[self.config.model]\r\n except KeyError:\r\n raise ValueError(\"\"\"An invalid option for `--model` was supplied,\r\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\r\n self.rnn = nn.RNN(self.input_size + self.hidden_size, self.hidden_size, self.config.nlayers,\r\n nonlinearity=nonlinearity, batch_first=True, dropout=self.config.dropout)", "def update_and_reset_config(self, config, feat, gold_move):\n config['next_move'] = gold_move\n self.classifier.update(feat,gold_move)\n return [config]", "def set_params(self, config):\n params = {'n_bins', 'edges', 'classes', 'chi', 'n_params'}\n self.__dict__.update((param, np.array(value)) for param, value in config.items() if param in params)", "def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config", "def _attention(self, inputs):\n attn_weights = K.batch_dot(x=inputs,\n y=K.permute_dimensions(inputs,\n pattern=(0, 2, 1)))\n return K.permute_dimensions(attn_weights, (0, 2, 1))", "def record_config(setup_state):\n bp.config = {k: v for k, v in setup_state.app.config.get_namespace('POKER_').items()}", "def updateBoneCount(self):\n\n if cmds.window(\"ART_BoneCounterWin\", exists=True):\n if self.rigUiInst.boneCounterInst is not None:\n self.rigUiInst.boneCounterInst.updateBoneCount()", "def _update_train_steps(configs, train_steps):\n configs[\"train_config\"].num_steps = int(train_steps)", "def _update_batch_size(configs, batch_size):\n configs[\"train_config\"].batch_size = max(1, int(round(batch_size)))", "def test_attention_sizes():\n encoder_out = Variable(torch.randn(152, 2, 256)) # seq, batch, dim\n query_vector = Variable(torch.randn(1, 2, 1024)) # seq, batch, dim\n\n attention = LocationAttention(encoded_dim=256, query_dim=1024, attention_dim=128)\n context, mask = attention(query_vector, encoder_out)\n assert context.size() == (1, 2, 256) # seq, batch, dim\n assert mask.size() == (1, 2, 152) # seq2, batch, seq1", "def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()", "def set_num_updates(self, num_updates):\n self.num_updates = num_updates", "def set_num_updates(self, num_updates):\n self.num_updates = num_updates", "def n_conf(self):\n return self._configuration_sets[0].n_conf", "async def update_device_data(self, send_config):\n if not self.config_sent:\n await send_config()\n if self.client: # in passive mode, client is None\n props = self.client._properties\n self.rssi = props.get('RSSI')", "def update_knowledge(self):\n pass", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def _MultiHeadedAtten(\n self, name, num_heads=None, enable_qkv_proj_in_onestep=False\n ):\n p = self.params\n if num_heads is None:\n num_heads = p.num_heads\n attn_memory_tpl = layers.LSHTaskWithMultiplierLayer.Params().Set(\n log_num_buckets=p.attn_log_num_buckets,\n num_hash_fn=p.attn_num_hash_fn,\n grid_size=p.attn_grid_size,\n rank=p.attn_rank,\n memory_act=p.attn_memory_act,\n add_bias=p.attn_add_bias,\n seed=p.attn_seed)\n atten_p = MultiHeadedAttention.Params().Set(\n name=name,\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n num_heads=num_heads,\n atten_dropout_prob=p.atten_dropout_prob,\n enable_value_proj=p.selfatten_enable_value_proj,\n enable_query_scale=p.enable_query_scale,\n enable_per_dim_scale=p.enable_per_dim_scale,\n packed_input=p.packed_input,\n fprop_dtype=p.fprop_dtype,\n use_bias=p.use_bias,\n enable_qkv_proj_in_onestep=enable_qkv_proj_in_onestep,\n enable_scaling_code_motion=p.enable_scaling_code_motion,\n device_mesh=p.device_mesh,\n weight_split_dims_mapping=p.weight_split_dims_mapping.dnh,\n attn_add_memory=p.attn_add_memory,\n memory_tpl=attn_memory_tpl,\n )\n atten_ap = atten_p.activation_split_dims_mapping\n atten_ap.blnh = p.activation_split_dims_mapping.blnh\n atten_ap.bld = p.activation_split_dims_mapping.bld\n if p.deterministic_dropout:\n atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n return atten_p", "def add_feat_conf(self, conf_map):\n conf_map['twin_trigger'] = str(self.twin_triggers.text()).replace('\\n', '')\n conf_map['twin_halves'] = str(self.twin_halves.text()).replace('\\n', '')", "def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n self._attention_heads = 4\n self._attention_key_dim = 128\n self._attention_concat = False\n self._attention_mask = False\n self._masking = False\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim,\n \"attention_heads\": self._attention_heads,\n \"attention_key_dim\": self._attention_key_dim,\n \"attention_concat\": self._attention_concat,\n \"attention_causal_mask\": self._attention_mask,\n \"masking\": self._masking\n }", "def get_config_count():\n return jsonify(config_count=config_count(), config_limit=config_limit)", "def __init__(self, num_heads: int, size: int, dropout: float = 0.1):\n super(MultiHeadedAttention, self).__init__()\n\n assert size % num_heads == 0\n\n self.head_size = head_size = size // num_heads\n self.model_size = size\n self.num_heads = num_heads\n\n self.k_layer = nn.Linear(size, num_heads * head_size)\n self.v_layer = nn.Linear(size, num_heads * head_size)\n self.q_layer = nn.Linear(size, num_heads * head_size)\n\n self.output_layer = nn.Linear(size, size)\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(dropout)", "def __init__(self, kernel_size=11, log_t=False):\n super(Attention, self).__init__()\n assert kernel_size % 2 == 1, \"Kernel size should be odd for 'same' conv.\"\n padding = (kernel_size - 1) // 2\n self.conv = nn.Conv1d(1, 1, kernel_size, padding=padding)\n self.log_t = log_t", "def updateConfBlendWeights(percent):\n global confWeight\n confWeight = float(percent)/100.0", "def set_numcells(self, N = []):\n self.set_gids(N)\n self.create_cells()\n\n #self.syn_output() # generate synaptic \"output\" in neuron\n #self.connect_cells()", "def update_journal(self):\n self.kittens_rescued += 1", "def n_conf(self):\n return max(len(self._inputs), len(self._configurations))", "def update_frequencies():\n pass", "def update(self, config):\n # find keys are in config but not in self.config\n extra_keys = set(config.keys()) - set(self.config.keys())\n if len(extra_keys) > 0:\n raise ValueError(\"keys {} in config are not in Config.config\".format(extra_keys))\n # update self.config by config\n else:\n self.config.update(config)", "def refresh(self):\n node, ans = self.list_head.next.next, 0\n # first update key_nodes in even positions\n while node:\n ans += 1\n node = node.next.next\n # then update tree_nodes's current_btree_node in odd positions\n node = self.list_head.next\n while node:\n node.current_btree_node = self\n if node.next:\n node = node.next.next\n else:\n break\n self.size = ans", "def _merge_heads(self, tensor, num_attention_heads, attn_head_size):\n if len(tensor.shape) == 5:\n tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()\n elif len(tensor.shape) == 4:\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}\")\n new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)\n return tensor.view(new_shape)", "def get_l16_config():\n config = get_base_config()\n config.update(dict(\n patches=(16, 16),\n dim=1024,\n ff_dim=4096,\n num_heads=16,\n num_layers=24,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=1024\n ))\n return config", "def __init__(self, emb_size: int = 768, num_heads: int = 12, dropout: float = 0.1):\n super().__init__()\n self.emb_size = emb_size\n self.num_heads = num_heads\n\n # Check \"head dim\" = number of features per head (d_k).\n self.head_dim = emb_size // num_heads\n assert self.head_dim * num_heads == self.emb_size, \"emb_size must be divisible by num_heads\"\n # Calculate scaling factor.\n self.scale = 1 / (self.head_dim ** 0.5)\n\n # V1: in vanilla self-attention Q,K,V are square matrices.\n # self.keys = nn.Linear(emb_size, emb_size)\n # self.queries = nn.Linear(emb_size, emb_size)\n # self.values = nn.Linear(emb_size, emb_size)\n \n # V2: single layer with emb_size, split into num (heads * head_dim) * 3 (Q,K,V).\n self.qkv = nn.Linear(emb_size, emb_size * 3)\n\n # Attention dropout.\n self.att_drop = nn.Dropout(dropout)\n self.projection = nn.Linear(emb_size, emb_size)", "def incr_no_of_attacks(self):\n\t\tself.__anom += 1\n\t\tself.__anom_lbl.setText(str(self.__anom))", "def attention(decoder_state, coverage=None, num_words_section=None, step=None):\n with variable_scope.variable_scope(\"Attention\"):\n # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n # (W_s s_t) + b_att is decoder_features; s_t = decoder_state\n decoder_features = linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)\n decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n\n def masked_attention(e, enc_padding_mask):\n if enc_section_padding_mask is not None:\n enc_padding_mask = tf.reshape(enc_section_padding_mask, [batch_size, -1])\n enc_padding_mask = tf.cast(enc_padding_mask, tf.float32)\n \"\"\"Take softmax of e then apply enc_padding_mask and re-normalize\"\"\"\n attn_dist = nn_ops.softmax(e) # take softmax. shape (batch_size, attn_length)\n attn_dist *= enc_padding_mask # apply mask\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n return attn_dist / tf.reshape(masked_sums, [-1, 1]) # re-normalize\n\n if use_coverage and coverage is not None: # non-first step of coverage\n if not hier:\n # Multiply coverage vector by w_c to get coverage_features.\n coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], \"SAME\") # c has shape (batch_size, seq_len, 1, attention_vec_size)\n \n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # shape (batch_size,seq_len)\n \n # Take softmax of e to get the attention distribution\n # attn_dist = nn_ops.softmax(e) # shape (batch_size, seq_len)\n attn_dist = masked_attention(e, enc_padding_mask)\n \n # Update coverage vector\n coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) # shape=(batch_size, seq_len,1,1)\n else:\n with tf.variable_scope(\"attention_sections\"):\n if FLAGS.fixed_attn:\n tf.logging.debug('running with fixed attn', '\\r')\n decoder_features_sec = linear(decoder_state, attention_vec_size, True, scope='Linear--Section-Features') # shape (batch_size, attention_vec_size)\n decoder_features_sec = tf.expand_dims(tf.expand_dims(decoder_features_sec, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n e_sec = math_ops.reduce_sum(v_sec * math_ops.tanh(encoder_section_features + decoder_features_sec), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n else:\n e_sec = math_ops.reduce_sum(v_sec * math_ops.tanh(encoder_section_features + decoder_features), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n with tf.variable_scope(\"attention_words\"):\n coverage_features = nn_ops.conv2d(coverage, w_c, [1, 1, 1, 1], \"SAME\") # c has shape (batch_size, seq_len, 1, attention_vec_size)\n \n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # shape (batch_size,seq_len)\n\n # Multiply by section weights\n \n e = tf.reshape(e, [batch_size, -1, num_words_section[0][0]])\n e = tf.multiply(e, attn_dist_sec[:,:,tf.newaxis])\n e = tf.reshape(e, [batch_size,-1])\n\n\n# --- Some hack for reweighting attention (similar to temp for softmax)\n if temperature > 0.0:\n e = e * temperature\n \n attn_dist = masked_attention(e, enc_padding_mask)\n coverage += array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) # shape=(batch_size, seq_len,1,1)\n \n else:\n # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n if hier:\n with tf.variable_scope(\"attention_sections\"):\n if FLAGS.fixed_attn:\n decoder_features_sec = linear(decoder_state, attention_vec_size, True, scope='Linear--Section-Features') # shape (batch_size, attention_vec_size)\n decoder_features_sec = tf.expand_dims(tf.expand_dims(decoder_features_sec, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n e_sec = math_ops.reduce_sum(\n v_sec * math_ops.tanh(encoder_section_features + decoder_features_sec), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n else:\n e_sec = math_ops.reduce_sum(\n v_sec * math_ops.tanh(encoder_section_features + decoder_features), [2, 3]) # [batch_size x seq_len_sections]\n attn_dist_sec = nn_ops.softmax(e_sec)\n\n with tf.variable_scope(\"attention_words\"):\n\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) #[batch_size x seq_len]\n\n e = tf.reshape(e, [batch_size, -1, num_words_section[0][0]])\n e = tf.multiply(e, attn_dist_sec[:,:,tf.newaxis])\n e = tf.reshape(e, [batch_size,-1])\n\n if temperature > 0.0:\n e = e * temperature\n \n attn_dist = masked_attention(e, enc_padding_mask)\n \n else:\n e = math_ops.reduce_sum(v * math_ops.tanh(encoder_features + decoder_features), [2, 3]) # calculate e\n # Take softmax of e to get the attention distribution\n if enc_padding_mask is not None:\n attn_dist = masked_attention(e, enc_padding_mask)\n else:\n attn_dist = nn_ops.softmax(e) # shape (batch_size, seq_len)\n\n if use_coverage: # first step of training\n coverage = tf.expand_dims(tf.expand_dims(attn_dist,2),2) # initialize coverage\n\n # TODO: coverage for hier\n\n # Calculate the context vector from attn_dist and encoder_states\n # ecnoder_sates = [batch , seq_len , 1 , encoder_output_size], attn_dist = [batch, seq_len, 1, 1]\n context_vector = math_ops.reduce_sum(array_ops.reshape(attn_dist, [batch_size, -1, 1, 1]) * encoder_states, [1, 2]) # shape (batch_size, enc_output_size).\n context_vector = array_ops.reshape(context_vector, [-1, enc_output_size])\n\n if hier:\n return context_vector, attn_dist, coverage, attn_dist_sec\n else:\n return context_vector, attn_dist, coverage", "def set_samples_info():\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp'}\n dirs_info = {config.train_dir: 0, config.validation_dir: 0}\n for d in dirs_info:\n iglob_iter = glob.iglob(d + '**/*.*')\n for i in iglob_iter:\n filename, file_extension = os.path.splitext(i)\n if file_extension[1:] in white_list_formats:\n dirs_info[d] += 1\n\n config.nb_train_samples = dirs_info[config.train_dir]\n config.nb_validation_samples = dirs_info[config.validation_dir]", "def adapt_to_config(self, neb_config: config.NEBConfig):\n if neb_config.optim_config.eval_config is not None:\n self.model.adapt_to_config(neb_config.optim_config.eval_config)\n self.spring_constant = neb_config.spring_constant\n self.weight_decay = neb_config.weight_decay", "def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config", "def __init__(self, classCount):\n self.NUM_CLASSES = 1+classCount\n self.STEPS_PER_EPOCH = self.STEPS_PER_EPOCH / self.IMAGES_PER_GPU\n self.VALIDATION_STEPS = self.VALIDATION_STEPS / self.IMAGES_PER_GPU\n super(ModelConfig, self).__init__()", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def update(self):\n self.save_config_file()", "def __set_joints(self, joint_configs):\n\n self.joint_count = len(joint_configs)\n\n for joint_conf in joint_configs:\n\n joint = Joint(joint_conf, self.use_ext_driver)\n self.joints.append(joint)\n\n if joint.structure != \"constant\":\n self.current_pos_as_theta.append(joint.current_angle)\n\n self.__prepare_dh_params()", "async def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n entities: list[NumberEntity] = []\n session: SHCSession = hass.data[DOMAIN][config_entry.entry_id][DATA_SESSION]\n\n for number in (\n session.device_helper.thermostats + session.device_helper.roomthermostats\n ):\n entities.append(\n SHCNumber(\n device=number,\n parent_id=session.information.unique_id,\n entry_id=config_entry.entry_id,\n attr_name=\"Offset\",\n )\n )\n\n if entities:\n async_add_entities(entities)", "def test_config(self):\n\n p = SyncProto(packet_port, None)\n\n d = make_axes(500, .1, usteps=16, steps_per_rotation=200)\n p.config(4, 18, 32, False, False, axes=d['axes1']);\n p.info()\n\n d = make_axes(1000, .2, usteps=16, steps_per_rotation=200,\n output_mode=OutMode.OUTPUT_OPENDRAIN, highval=OutVal.LOW)\n p.config(4, 7, 9, False, False, axes=d['axes1']);\n p.info()", "def plot_attention(self, n_cols=4):\n from matplotlib import pyplot as plt\n from matplotlib.ticker import MaxNLocator\n save_path = mkdir_join(self.save_path, 'att_weights')\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n for lth in range(self.n_layers):\n if not hasattr(self, 'yy_aws_layer%d' % lth):\n continue\n yy_aws = getattr(self, 'yy_aws_layer%d' % lth)\n plt.clf()\n fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))\n for h in range(self.n_heads):\n if self.n_heads > n_cols:\n ax = axes[h // n_cols, h % n_cols]\n else:\n ax = axes[h]\n ax.imshow(yy_aws[-1, h, :, :], aspect='auto')\n ax.grid(False)\n ax.set_xlabel('Input (head%d)' % h)\n ax.set_ylabel('Output (head%d)' % h)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n fig.savefig(os.path.join(save_path, 'layer%d.png' % lth))\n plt.close()", "def plot_attention(self, n_cols=4):\n from matplotlib import pyplot as plt\n from matplotlib.ticker import MaxNLocator\n save_path = mkdir_join(self.save_path, 'att_weights')\n if save_path is not None and os.path.isdir(save_path):\n shutil.rmtree(save_path)\n os.mkdir(save_path)\n for lth in range(self.n_layers):\n if not hasattr(self, 'yy_aws_layer%d' % lth):\n continue\n yy_aws = getattr(self, 'yy_aws_layer%d' % lth)\n plt.clf()\n fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))\n for h in range(self.n_heads):\n if self.n_heads > n_cols:\n ax = axes[h // n_cols, h % n_cols]\n else:\n ax = axes[h]\n ax.imshow(yy_aws[-1, h, :, :], aspect='auto')\n ax.grid(False)\n ax.set_xlabel('Input (head%d)' % h)\n ax.set_ylabel('Output (head%d)' % h)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n fig.tight_layout()\n fig.savefig(os.path.join(save_path, 'layer%d.png' % lth))\n plt.close()", "def update_insta_follower_count(self):\n\n df = self.Instagram.get_followers_df()\n n_followers = df.shape[0]\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWER_COUNT',\n n_followers)", "def configure(self, config: dict):\n self.config.update(config)", "def participate(self):\n if self.allow_reco():\n self.config[self.id] = self.chs_config()", "def __update_labels(self):\n\n self.__active_buses_stringvar.set(str(self.__bus_controller.buses_count))\n self.__active_lines_stringvar.set(str(len(self.__bus_controller.bus_dict)))\n self.__number_of_people_stringvar.set(str(self.__telegram_controller.people_count))\n self.__session_time_stringvar.set(self.session_time)\n\n messages =self.__bus_controller.bus_messages\n for n in range(0, BusController.MAX_MESSAGES_TO_DISPLAY):\n self.__free_text_stringvars_dict[n].set(messages[n])", "def attention(query, use_attention=False):\n attn_weights = []\n ds = [] # Results of attention reads will be stored here.\n for i in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % i):\n y = rnn_cell._linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(\n v[i] * math_ops.tanh(hidden_features[i] + y), [2, 3])\n if use_attention is False: # apply mean pooling\n weights = tf.tile(sequence_length, tf.pack([attn_length]))\n weights = array_ops.reshape(weights, tf.shape(s))\n a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(weights)\n # a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(tf.shape(s)[1])\n else:\n a = nn_ops.softmax(s)\n attn_weights.append(a)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return attn_weights, ds", "def update(self):\n self.brain.update()", "def update_inhibition(self) -> None:\n if self.spec.inhibition_type == \"fffb\":\n self.calc_fffb_inhibition()\n else:\n self.calc_kwta_inhibition()\n\n self.units.update_inhibition(torch.Tensor(self.size).fill_(self.gc_i))", "def set_config(self, config):\n self.adversarial = config.adversarial\n self.eps = config.eps\n self.probability = config.probability\n self.use_dynamics = config.use_dynamics\n self.random = config.random\n self.observable_noise = config.observable_noise\n self.use_max_norm = config.use_max_norm", "def learn(self,n):\n for i in range(n):\n self.class_counts,self.feature_counts = self.em_step(self.class_counts,\n self.feature_counts)", "def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "def inform(self, information):\n self.know = merge(self.know, information)", "def attention(inp, scope, e_dim, past, config):\n assert inp.shape.ndims == 3 # input should be of shape [batch, seqlen, embeddings] # [batch, sequence, features]\n assert e_dim % config.num_heads == 0 # embedding can be split in heads\n\n if past is not None:\n assert past.shape.ndims == 5 # [batch, 2, heads, seqlen, emebeddings]\n\n def split_heads(x):\n out = split_into_n_states(x, config.num_heads)\n out = tf.transpose(out, [0, 2, 1, 3])\n return out\n\n def merge_heads(x):\n out = merge_n_states(tf.transpose(x, [0, 2, 1, 3]))\n return out\n\n def mask_attention_weights(w):\n # w should have shape [batches, heads, dst_seq, src_seq], where information flows from scr to dst\n _, _, nd, ns = shapes_list(w)\n b = attention_mask(nd, ns, w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w * b - tf.cast(1e10, w.dtype) * (1 - b)\n return w\n\n def multihead_attention(q, k, v):\n w = tf.matmul(q, k, transpose_b=True)\n w *= tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))\n\n # mask attention weights\n w = mask_attention_weights(w)\n w = softmax_with_reduce_max(w)\n out = tf.matmul(w, v)\n return out\n\n with tf.variable_scope(scope):\n c = conv1d(inp, 'convolutional_attention', e_dim * 3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n # there is a stack below it\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=2)\n v = tf.concat([pv, v], axis=2)\n\n attn = multihead_attention(q, k, v)\n attn = merge_heads(attn)\n\n out = conv1d(attn, 'convolutional_projection', e_dim)\n return out, present", "def add_config(self, config):\n clean=lambda n: n.strip().strip('\"').lower()\n for line in config.split('\\n'):\n items=line.strip().split()\n if items and len(items) >= 3:\n cmd, evt, hnd=items[:3]\n \"\"\" NOTE\n - just 'bind' command expected right now\n - '+' prepended ti the handler means REPEAT (make sense just for keyboard keys actually)\n \"\"\"\n cmd=clean(cmd)\n if cmd in ['bind']:\n evt,hnd=(clean(evt), clean(hnd))\n if not cmd in self.config: self.config[cmd]={}\n repeat=hnd.startswith('+')\n if repeat: hnd=hnd[1:]\n self.config[cmd].update([[evt, [hnd, repeat]]])", "def _config_md(self):\n self.cntrl[\"imin\"] = 0\n self.cntrl[\"ntx\"] = 1\n self.cntrl[\"irest\"] = 0\n self.cntrl[\"maxcyc\"] = 0\n self.cntrl[\"ncyc\"] = 0\n self.cntrl[\"dt\"] = 0.002\n self.cntrl[\"nstlim\"] = 5000\n self.cntrl[\"ntpr\"] = 500\n self.cntrl[\"ntwe\"] = 500\n self.cntrl[\"ntwr\"] = 5000\n self.cntrl[\"ntwx\"] = 500\n self.cntrl[\"ntxo\"] = 1\n self.cntrl[\"ioutfm\"] = 1\n self.cntrl[\"ntf\"] = 2\n self.cntrl[\"ntc\"] = 2\n self.cntrl[\"ntt\"] = 3\n self.cntrl[\"gamma_ln\"] = 1.0\n self.cntrl[\"ig\"] = -1", "def appendsize(self, numents):\n self._numents += numents", "def update_config(self, config, priority, source):\n for key, value in config.items():\n self._config[key].add(value, priority, source)", "def set_numcells(self, N):\n\t\tself.create_cells(N)\n\t\tself.connect_cells()\n\t\tself.connect_stim()", "def update_counts(self, new_alpha, new_beta, decay):\n\n self._alpha = self._alpha / decay + new_alpha\n self._beta = self._beta / decay + new_beta\n self._n_updates += 1", "def rel_attn_core(\n self,\n q_head,\n k_head_h,\n v_head_h,\n k_head_r,\n seg_mat=None,\n attn_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n\n # content based attention score\n ac = torch.einsum(\"ibnd,jbnd->bnij\", q_head + self.r_w_bias , k_head_h)#mixout(self.r_w_bias, self.r_w_bias_target, self.mixout_p, self.training)\n\n # position based attention score\n bd = torch.einsum(\"ibnd,jbnd->bnij\", q_head + self.r_r_bias, k_head_r)#mixout(self.r_r_bias, self.r_r_bias_target, self.mixout_p, self.training)\n bd = self.rel_shift_bnij(bd, klen=ac.shape[3])\n\n # segment based attention score\n if seg_mat is None:\n ef = 0\n else:\n #ef = torch.einsum(\"ibnd,snd->ibns\", q_head + mixout(self.r_s_bias, self.r_s_bias_target, self.mixout_p, self.training), mixout(self.seg_embed, self.seg_embed_target, self.mixout_p, self.training))\n ef = torch.einsum(\"ibnd,snd->ibns\", q_head + self.r_s_bias, seg_embed)\n\n ef = torch.einsum(\"ijbs,ibns->bnij\", seg_mat, ef)\n\n # merge attention scores and perform masking\n attn_score = (ac + bd + ef) * self.scale\n if attn_mask is not None:\n # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask\n if attn_mask.dtype == torch.float16:\n attn_score = attn_score - 65500 * torch.einsum(\"ijbn->bnij\", attn_mask)\n else:\n attn_score = attn_score - 1e30 * torch.einsum(\"ijbn->bnij\", attn_mask)\n\n # attention probability\n attn_prob = F.softmax(attn_score, dim=3)\n attn_prob = self.dropout(attn_prob)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_prob = attn_prob * torch.einsum(\"ijbn->bnij\", head_mask)\n\n # attention output\n attn_vec = torch.einsum(\"bnij,jbnd->ibnd\", attn_prob, v_head_h)\n\n if output_attentions:\n return attn_vec, torch.einsum(\"bnij->ijbn\", attn_prob)\n\n return attn_vec", "def calc_attention(self, encoder_hidden_states):\n\n params = self.dec_params\n if len(encoder_hidden_states.shape) == 3:\n # Squeeze the first dimension\n encoder_hidden_states = np.squeeze(encoder_hidden_states, axis=0)\n\n # T x Attn_vec_size\n attn_enc_term = np.matmul(encoder_hidden_states, params.attn_enc_w)\n\n def attention(dec_state):\n attn_dec_term = (np.matmul(dec_state, params.attn_dec_w) +\n params.attn_dec_b) # T x A\n attn_sum = np.tanh(attn_enc_term + attn_dec_term) # T x A\n attn_logits = np.squeeze(np.matmul(attn_sum, params.attn_v)) # T\n attn_probs = softmax(attn_logits)\n\n context_vec = np.matmul(attn_probs, encoder_hidden_states)\n # The attention probabilities are necessary for coverage penalty calculation\n return (context_vec, attn_probs)\n\n return attention", "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def __init__(self, num_heads: int, size: int, size_v: int, dropout: float = 0.1):\n super(ContMultiHeadedAttention, self).__init__()\n\n assert size % num_heads == 0\n\n self.head_size = head_size = size // num_heads\n self.model_size = size\n self.num_heads = num_heads\n\n self.k_layer = nn.Linear(size, num_heads * head_size)\n self.v_layer = nn.Linear(size_v, num_heads * head_size)\n self.q_layer = nn.Linear(size, num_heads * head_size)\n\n self.output_layer = nn.Linear(size, size_v)\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(dropout)" ]
[ "0.5661511", "0.5599164", "0.54210174", "0.53882116", "0.5338775", "0.5247799", "0.5247248", "0.5225227", "0.51431704", "0.5058479", "0.49841285", "0.49445143", "0.49379683", "0.48532596", "0.4848556", "0.48481622", "0.4835506", "0.48258802", "0.48030823", "0.48024145", "0.47915727", "0.47881028", "0.4777855", "0.4774145", "0.47700423", "0.47676536", "0.4764091", "0.47598007", "0.47409284", "0.4735868", "0.47338778", "0.4726272", "0.4715355", "0.47108114", "0.47072908", "0.47068086", "0.47034568", "0.47007343", "0.4697328", "0.46916658", "0.46899405", "0.46878868", "0.46863377", "0.46862078", "0.46862078", "0.4678841", "0.4672696", "0.46645114", "0.4659769", "0.46561727", "0.4652312", "0.46459848", "0.4642793", "0.46386945", "0.4635084", "0.46227825", "0.46220273", "0.4607446", "0.45976704", "0.45794654", "0.45657238", "0.4548884", "0.45428395", "0.45406255", "0.45397905", "0.453845", "0.45328075", "0.45319912", "0.453196", "0.45316243", "0.45218778", "0.45202962", "0.45168367", "0.45153952", "0.45114404", "0.45112333", "0.45085615", "0.45085615", "0.45056832", "0.45035204", "0.4492113", "0.44916177", "0.4490304", "0.44888437", "0.44871995", "0.44831347", "0.44770584", "0.44757912", "0.44709072", "0.4469524", "0.4469373", "0.44690403", "0.44653308", "0.4464684", "0.44569665", "0.4455461", "0.4453155", "0.44522443", "0.44486168", "0.44458678" ]
0.5935313
0
Update a general parameter that's in the main info dictionary.
def update_general(info, key, val): info["model_params"][key] = val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_general_param(self, param, val):\n assert param in self.params, '%s is not recognized as a valid parameter' % param\n self.params[param].change_value(val)", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)", "def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None", "def updateParameters(self, parameters):", "def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)", "def update_params(self):\n pass", "def update_param(self, update_param):\n\n self._update_param = update_param", "def updateParameters(self):\n\n return", "def update_param_info(param_info, config, is_user_config=False):\n if 'parameters' not in config:\n return\n params = config['parameters']\n for name in params:\n val = params[name]\n if not is_user_config:\n # If this is not a user-provided configuration, we disallow parameter redefinition.\n if name in param_info:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter redefinition is not allowed for non-user configuration.\"\n \" This is a system configuration error that must not happen.\"\n \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)\n )\n if isinstance(val, dict):\n # This is a complete parameter definition with name, value and description.\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n if name not in param_info:\n param_info[name] = copy.deepcopy(val) # New parameter, set it info object.\n # TODO what about parameter type and description?\n else:\n logging.warn(\n \" Parameter (%s) entirely redefines existing parameter (%s).\"\n \" Normally, only value needs to be provided.\"\n \" We will proceed but you may want to fix this.\",\n json.dumps(val),\n json.dumps(param_info[name])\n )\n param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value\n else:\n # Just parameter value\n val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__\n if name not in param_info:\n param_info[name] = {\n 'val': val,\n 'type': val_type,\n 'desc': \"No description for this parameter provided (it was automatically converted from its value).\"\n }\n else:\n param_info[name]['val'] = val\n # Do final validations\n if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter has invalid type = '%s'.\"\n \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])\n )\n if 'type' not in param_info[name] or 'desc' not in param_info[name]:\n logging.warn(\n \"Parameter definition does not contain type ('type') and/or description ('desc').\"\n \" You should fix this. Parameter definition is\"\n \" %s = %s\", name, param_info[name]\n )", "def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})", "def update_settings(self, param):\n if param.name() == '':\n pass", "def update(self, **params):\n self.parameters.update(params)", "def _update_params(self):\n pass", "def update(self, params):", "def __adjust_param(self, option):\n # Get the name of the parameter.\n name = self.__option_params[option]\n\n # Ask the user for a new value.\n value = float(input(\"Enter value for {}: \".format(name)))\n self._params.update(name, value)\n\n # Update the description with the new value.\n desc = self.__make_description(name)\n self.update_description(option, desc)\n\n # Stay on the same menu.\n return self.get_name()", "def _update_params(self):\n raise NotImplementedException()", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def __updateParameter(self, currentParam, newParam):\n for i in xrange(len(currentParam)):\n for np in newParam:\n if np['name'] == currentParam[i]['name']:\n currentParam[i] = np", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def edit_parameter(request, parameter, **_kwargs):\n pass", "def setParam(self,param,value):\n if param in self.params.keys():\n self.params[param] = value", "def update_parameters(self,like_params):\n\n # get current dictionary with parameters, update and setup again\n params=self.get_params()\n\n for par in like_params:\n if par.name in params:\n params[par.name]=par.value\n\n self._setup_from_parameters(params)\n return", "def setParameter(self, name, value):", "def updateParameters(self,*args,**kwargs):\n for key in kwargs.keys():\n self._params[key] = kwargs[key]", "def update_parameter(cur, par, new_value):\n cur.execute(\"UPDATE parameters SET value=%f WHERE par='%s';\" % \n (new_value, par))", "def modifyParam(self, currentAttribute, updatedValue):\n instance_params = self.state.selectedSymObjects[0].instanceParams\n # if the value is name or connected objects, set the param instead of\n # the dict\n if currentAttribute not in instance_params:\n instance_params[currentAttribute] = {}\n #TODO look into this check, it seems like we do not need it\n if \"Value\" not in instance_params[currentAttribute]:\n catalog = self.state.catalog\n name = self.state.selectedSymObjects[0].componentName\n instance_params[currentAttribute][\"Value\"] = updatedValue\n instance_params[currentAttribute][\"Type\"] = \\\n catalog[\"SimObject\"][name]['ports'][currentAttribute]['Type']\n else:\n instance_params[currentAttribute][\"Value\"] = updatedValue\n\n self.state.addToHistory()\n self.state.highlightIncomplete()\n\n # remove highlight for object if it is complete now\n object = self.state.selectedSymObjects[0]\n if not object.incomplete:\n object.rect.setBrush(QColor(\"Green\"))", "def update_parameters(updates):\r\n for (key, val) in updates.items():\r\n par[key] = val\r\n print('Updating:', key, '-->', val)\r\n update_dependencies()", "def _update_varinfo(varinfo, data):\n varinfo_data = _get_varinfo(data)\n if \"vartype\" not in varinfo:\n varinfo.update(vartype=varinfo_data['vartype'])\n if \"ndim\" not in varinfo:\n varinfo.update(ndim=varinfo_data['ndim'])\n if \"size\" not in varinfo:\n varinfo.update(size=varinfo_data['size'])\n return varinfo", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def update_parameters(parameters, grads, learning_rate):\n pass", "def updateParameters(self, parameters):\n\t\treturn", "def _update_params(self):\n _load = not self.san_interface.runmode\n params={}\n if ('iosched' in self._updatedattr or _load) and self.iosched<>IoSchedType.default:\n params['iosched']=str(self.iosched)\n if ('readahead' in self._updatedattr or _load) and self.readahead :\n params['readahead']=self.readahead\n if params:\n for pt in self.paths():\n pt.provider.set_dev_params(pt,params)", "def editInfoVariable(self, product, info_variable):\r\n\r\n return info_variable", "def _update_param_from_tkvar(self,param_name,force=False):\n self.debug(\"TkPO._update_param_from_tkvar(%s)\"%param_name)\n\n param_obj = self.get_parameter_object(param_name)\n\n if not lookup_by_class(self.param_immediately_apply_change,\n type(param_obj)) and not force:\n return\n else:\n super(TkParameterized,self)._update_param_from_tkvar(param_name)", "def add_param(self, paraminfo):\n self.params[paraminfo.name] = paraminfo", "def update_params(self, t, x, **kwargs):\n raise NotImplementedError", "def __setitem__(self, name: str, value):\n super(Parameter, self).__setitem__(name, value)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def _update_param_from_tkvar(self,param_name):\n self.debug(\"TkPOb._update_param_from_tkvar(%s)\"%param_name)\n\n parameter,sourcePO=self.get_parameter_object(param_name,with_source=True)\n\n ### can only edit constant parameters for class objects\n if parameter.constant is True and not isinstance(sourcePO,type):\n return ### HIDDEN\n\n tkvar = self._tkvars[param_name]\n\n if self._tkvar_changed(param_name):\n # don't attempt to set if there was a string-to-object translation error\n if self.translators[param_name].last_string2object_failed:\n return ### HIDDEN\n\n # (use _original_get() because we don't want the tkvar to be reset to\n # the parameter's current value!)\n val = self._string2object(param_name,tkvar._original_get())\n\n try:\n self._set_parameter(param_name,val)\n except: # everything\n tkvar.set(tkvar._last_good_val)\n raise # whatever the parameter-setting error was\n\n self.debug(\"set %s to %s\"%(param_name,val))\n\n if hasattr(tkvar,'_on_modify'):\n tkvar._on_modify()\n\n ### call any function associated with GUI set()\n if hasattr(tkvar,'_on_set'):\n\n # CEBALERT: provide a way of allowing other gui components\n # to figure out where a callback error might have come\n # from. Callback instances (the Callback class is defined\n # in Tkinter.py) store a widget, but often it appears to\n # be the Tk instance - which is of no use in later\n # determining where an error might have originated.\n global _last_one_set\n if hasattr(self,'master'):\n _last_one_set = self.master\n\n tkvar._on_set()", "def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def updateParameters(self, parameters):\n return", "def _set_param(self, name, value):\n self._frozenjson._data[name] = value", "def _update_params(self, perf_params, loop_info):\n for vartype in list(perf_params.keys()):\n for var in perf_params[vartype]:\n self.tspec_params['performance_params'][var] = \\\n self.indent + 'param %s[] = %s;\\t#%s\\n' % (var, repr(default_perf_params[vartype]), vartype)\n\n #loop_info.vars: set of input vars", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def updateParams(self, v,n,m, ii):\n \n inst = self.instruments[self.stringInsts.index(self.selInstsVar[ii].get())]\n params = inst.getQParams()\n self.paramBoxes[ii]['values'] = params[:]\n self.paramBoxes[ii].current(0)", "def _update_config_parameter_value(self):\n field = self.env.ref(\"base.field_ir_config_parameter_value\")\n for r in self:\n if r.fields_id != field:\n # It's not for ir.config_parameter\n continue\n if r.company_id:\n # it's not default value\n continue\n if not r.res_id:\n # Paramater is not specified\n continue\n # Default value is updated. Set new value in column \"value\"\n model, res_id = r.res_id.split(\",\")\n value = r.get_by_record()\n param = self.env[\"ir.config_parameter\"].browse(int(res_id))\n param._update_db_value(value)", "def updateParameterNodeFromGUI(self, caller=None, event=None):\n\n\t\tif self._parameterNode is None or self._updatingGUIFromParameterNode and not self.active:\n\t\t\treturn\n\n\t\t#wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch\n\t\t#self._parameterNode.EndModify(wasModified)", "def _build_update_params(self, params):", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def set_parameter_value(self, parameter_name, new_value):\n self.description[\"config\"][\"values\"][parameter_name][\"value\"] = new_value\n ## Update MongoDB\n #self.mongo_client.cps2_project.objects.update_one(\n #{\"_id\": self.mongo_id},\n #{\"$set\": {\"config.values.\" + parameter_name + \".value\": new_value,\n #\"last_modified.value\": str(datetime.utcnow())}\n #}\n #)\n print(\"Switched the parameter \" + parameter_name + \" to \" + new_value + \" and updated MongoDB.\")", "def __update_params(self,**kwargs):\n updatedArgSet = set(self._updateParamsArgs) & kwargs.viewkeys()\n if len(updatedArgSet) > 0:\n args = self._subDictionary(self._updateParamsArgs)\n newArgs = self._onParamsUpdate(**args)\n updatedArgs =dict()\n for k in updatedArgSet:\n try:\n updatedArgs[k] = newArgs[k]\n except:\n pass\n\n self.__dictionary.update(newArgs)\n else:\n pass", "def updateParms(self):\n self.p1text.setText(\"{0:g}\".format(self.parmVal[0]))\n if len(self.parmVal) > 1:\n self.p2text.setText(\"{0:g}\".format(self.parmVal[1]))\n if len(self.parmVal) > 2:\n self.p3text.setText(\"{0:g}\".format(self.parmVal[2]))\n if len(self.parmVal) > 3:\n self.p4text.setText(\"{0:g}\".format(self.parmVal[3]))", "def update_parameters(self, timestamp, inputs):\n pass", "def _register_global_params(self, params):\n\n for name,obj in self.params().items():\n global_params.add(**{name:obj})\n\n for name,val in params.items():\n global_params.params(name).default=val\n\n params.update(global_params.get_param_values())\n params[\"name\"]=self.name", "def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):\n self.update_critic(ob_no, hidden, q_n)\n self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)", "def update_parameters(\n model_param: Dict[str, Union[float, List[float]]]\n ) -> Dict[str, float]:\n\n updated_param = {}\n\n for i, _ in enumerate(model_param[\"teff\"]):\n updated_param[f\"teff_{i}\"] = model_param[\"teff\"][i]\n updated_param[f\"radius_{i}\"] = model_param[\"radius\"][i]\n\n if \"parallax\" in model_param:\n updated_param[\"parallax\"] = model_param[\"parallax\"]\n elif \"distance\" in model_param:\n updated_param[\"distance\"] = model_param[\"distance\"]\n\n return updated_param", "def update_params(self, extra_params):\n self._params.update(extra_params)\n return self", "def update(self, info):\n self.is_active = info.p.active\n self.rev_info = info.rev_info" ]
[ "0.7279819", "0.71316004", "0.70896465", "0.68731415", "0.6845889", "0.68180555", "0.6810109", "0.67108864", "0.6680052", "0.6631445", "0.6597182", "0.6568276", "0.65336627", "0.65146816", "0.64628476", "0.64187586", "0.64153326", "0.63640064", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.6326515", "0.632127", "0.631701", "0.62342215", "0.62329715", "0.6224575", "0.62074846", "0.619209", "0.6145222", "0.6143303", "0.61419415", "0.61059785", "0.61011493", "0.60709673", "0.60689473", "0.6068537", "0.6057156", "0.6037527", "0.6006764", "0.6002779", "0.59891164", "0.59871614", "0.5984246", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.5960317", "0.59562373", "0.5952855", "0.59472305", "0.5933312", "0.5876481", "0.58735794", "0.5862359", "0.5860543", "0.58521354", "0.58462685", "0.58355737", "0.58166665", "0.58158666", "0.58142614", "0.5805332", "0.5796692", "0.5790774" ]
0.7829526
0
Update the config information and save it.
def update_info(job_path, vals, param_names, prop_name): with open(job_path, "r") as f: info = json.load(f) real_names = [] real_vals = [] for param_name, val in zip(param_names, vals): if param_name.startswith("log_"): # if anything starts with "log_" (e.g. "log_schnet_dropout"), # exponentiate its value to get the actual number real_names.append(param_name.replace("log_", "")) real_vals.append(np.exp(val)) else: real_names.append(param_name) real_vals.append(val) # update values for param_type, val in zip(real_names, real_vals): if 'dropout' in param_type: update_dropout(info=info, dropout=val, dropout_type=param_type, prop_name=prop_name) elif param_type == "num_heads": update_heads(info=info, heads=val) elif param_type == "attention_type": info["model_params"]["boltzmann_dict"]["type"] = val else: if param_type not in info["model_params"]: msg = (f"Warning: assuming that {param_type} " "is just a key in `model_params`, but " "it is not currently in `model_params` in " "the config file. If it should be in a " "different location then you will need " "to write a custom function for updating " "it.") fprint(msg) update_general(info, key=param_type, val=val) # save with open(job_path, "w") as f: json.dump(info, f, indent=4, sort_keys=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.save_config_file()", "def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def conf_update(self):\n pass", "def save_config(self):\n config.save_config(self.config, self.config_file)", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save(self) -> None:\n self._client.save_config()", "def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation", "def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def saveConfig(self):\r\n self.config[\"Settings\"] = {}\r\n settings = self.config[\"Settings\"]\r\n settings[\"datapath\"] = self.dataPath\r\n settings[\"videopath\"] = self.videoPath\r\n settings[\"dataoffset\"] = str(self.dataOffset)\r\n settings[\"colblindmode\"] = str(self.colBlindMode)\r\n with open(self.CONFIG_FILE,\"w\") as file:\r\n self.config.write(file)", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def save(self):\n if self.changed:\n logger.info(\"Overwriting Redis config\")\n self.client.config_rewrite()\n self.changed = False", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save(self) -> None:\n logger.info(\"Saving to config...\")\n yml.save(self._config, self.configpath)", "def save_to_conf(self):\n raise NotImplementedError", "def on_save(self):\r\n #new_config = ConfigParser.RawConfigParser()\r\n cur_config = self.config.dict_config\r\n #\r\n # update the dict_config\r\n cur_config[\"access_restriction\"][\"ips\"] = self.text_ips.get(1.0, tk.END).strip()\r\n cur_config[\"access_restriction\"][\"ar_url\"] = self.entry_url.get().strip()\r\n #\r\n cur_config[\"email\"][\"relay_server_host\"] = self.entry_server_host.get().strip()\r\n cur_config[\"email\"][\"relay_server_port\"] = self.entry_server_port.get().strip()\r\n cur_config[\"email\"][\"email_from\"] = self.entry_from.get().strip()\r\n cur_config[\"email\"][\"recipients\"] = self.text_recipients.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_enabled_subject\"] = self.entry_enabled_subject.get().strip()\r\n cur_config[\"email\"][\"ar_enabled_body\"] = self.text_enabled_body.get(1.0, tk.END).strip()\r\n cur_config[\"email\"][\"ar_disabled_subject\"] = self.entry_disabled_subject.get()\r\n cur_config[\"email\"][\"ar_disabled_body\"] = self.text_disabled_body.get(1.0, tk.END).strip()\r\n\r\n #self.action.save_config()\r\n # # sync dict_config to the gui\r\n # for section in self.config.dict_config:\r\n # new_config.add_section(section)\r\n # for item in self.config.dict_config[section]:\r\n # new_config.set(section, item, self.config.dict_config[section][item])\r\n # #\r\n # # saving to a file\r\n # with open(self.config.file_path, 'w') as newconfigfile:\r\n # new_config.write(newconfigfile)\r\n #\r\n # # mbox.showinfo(\"Information\",\r\n # # \"Current configuration has been successfully saved to '%s'\" % os.path.basename(self.configfile))\r\n # self.console.debug(\"Configuration has been saved to '%s'\" % self.config.file_path)\r", "def save_to_conf(self):\r\n raise NotImplementedError", "def save():\n\n env.config.save(env.config_file)", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "def save(self, config_path):\n raise NotImplementedError()", "def save(self):\n\t\tself.CONFIG.save()\n\t\tself.temp_files.save()", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update(self, obj):\n\n self.cfg.update(obj)", "def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_dir)\n\n\t\t\t#actions\n\t\t\tConfig.setBool('actions', 'save_to_file', Config.save_to_file)\n\t\t\tConfig.setBool('actions', 'save_to_tag', Config.save_to_tag)\n\n\t\t\t#sources\n\t\t\tConfig.setBool('sources', 'lyric_wikia', Config.lyric_wikia)\n\t\t\tConfig.setBool('sources', 'musix_match', Config.musix_match)\n\t\t\tConfig.setBool('sources', 'lyricsmode', Config.lyricsmode)\n\t\t\tConfig.setBool('sources', 'az_lyrics', Config.az_lyrics)\n\n\t\t\twith open(Config.config_path, 'w') as configfile:\n\t\t\t\tConfig.conf.write(configfile)\n\t\t\treturn True\n\n\t\t# Catch all config parser errors\n\t\texcept BaseConfigParserError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)", "def saveExitConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()\n self.close()", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def save_config(self, *args, **kwargs):\n raise NotImplementedError", "def save_config(self):\n self.config.app_w = self.width()\n self.config.app_h = self.height()\n self.config.splitter = self.splitter.saveState()\n self.config.save()", "def _save_config(self, data):\n curr_conf = self.config_entry.options.copy()\n curr_conf.update(data)\n curr_conf.update(self._conf_devs_option)\n\n return self.async_create_entry(title=\"\", data=curr_conf)", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def save_to_config(self) -> None:\n config_path = os.path.join(self.base_path, \"config.json\")\n\n with open(config_path, \"r\") as _json:\n c_dict = json.load(_json)\n\n c_dict[\"mean_similarity_error\"] = self.ME\n c_dict[\"similarity_correlation\"] = self.pearson_corr\n c_dict[\"similarity_spearman_correlation\"] = self.spearman_corr\n\n with open(config_path, \"w\") as _json:\n json.dump(c_dict, _json)", "def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)", "def commit_config(self):\n raise NotImplementedError", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def save_configuration(self):\n dom = self.vistrailsStartup.startup_dom()\n doc = dom.documentElement\n configuration_element = enter_named_element(doc, 'configuration')\n doc.removeChild(configuration_element)\n self.configuration.write_to_dom(dom, doc)\n self.vistrailsStartup.write_startup_dom(dom)\n dom.unlink()", "def save_conf(self):\r\n self.sendAndRecv(\"SAVECONF\\r\\n\")", "def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)", "def save_config(self):\n try:\n print(\"Clearing active users\")\n for room in self.rooms:\n room.room_attrbts['active'].clear()\n print('Saving config...')\n print(\"Known clients:\")\n self.pp.pprint(self.clients)\n print(\"Known rooms:\")\n for room in self.rooms: \n self.pp.pprint(room.name)\n self.pp.pprint(room.room_attrbts)\n path = os.environ.get('HOME') + '/.tinyserver'\n roomJSON = jsonpickle.encode(self.rooms)\n with open(path, 'w') as f:\n json.dump(roomJSON, f)\n except Exception as e:\n print(\"Error saving config!! {0}\".format(e))", "def SaveConfig(self):\n config_value = getattr(self, APPDATA)\n path_value = config_value.AbsolutePaths[0]\n default_cfg_file = os.path.join(path_value, CONFIG_FILE_NAME)\n temp_file = default_cfg_file + '.TEMP'\n if os.path.exists(default_cfg_file):\n json.dump(type(self)._CURRENT_CONFIG,\n open(temp_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)\n EnsureBackup(temp_file, default_cfg_file)\n else:\n if not os.path.isdir(path_value):\n os.mkdir(path_value)\n json.dump(type(self)._CURRENT_CONFIG,\n open(default_cfg_file.lower(),\n mode='w'),\n cls=ConfigEncoder,\n sort_keys=False,\n indent=4)", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command\n output = await self.send_command(self.cmd_save_config)\n\n # Return the commands of the configuration saving process\n return output", "async def save_config(self):\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command to ask for saving config. Wait till the question to overwrite\n # the startup file (\"Overwrite file [startup-config].... (Y/N)[N] ?\")\n output = await self.send_command(self.cmd_save_config, pattern=\"?\")\n\n # Confirm to save the config\n output += await self.send_command(\"Y\")\n\n # Return the commands of the configuration saving process\n return output", "def store_config(self):\n stored_config_filename = self.stored_config_filename\n if (not stored_config_filename.exists() or\n self.config != self._stored_config):\n # -- STORE CONFIG-DATA (persistently):\n self.config.save(self.stored_config_filename)\n self._stored_config = self.config.copy()\n self.dirty = False", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def save_config(self):\n if self.check_auth():\n config = self.get_github_config_path(self.CONFIG)\n parser = configparser.RawConfigParser()\n parser.add_section(self.CONFIG_SECTION)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_LOGIN,\n self.user_login)\n if self.user_token is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_TOKEN,\n self.user_token)\n if self.user_feed is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_FEED,\n self.user_feed)\n if self.enterprise_url is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_ENTERPRISE_URL,\n self.enterprise_url)\n if self.user_pass is not None:\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_USER_PASS,\n self.user_pass)\n else:\n parser.remove_option(self.CONFIG_SECTION,\n self.CONFIG_USER_PASS)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_VERIFY_SSL,\n self.verify_ssl)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_PRIMARY,\n self.clr_primary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_SECONDARY,\n self.clr_secondary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TERTIARY,\n self.clr_tertiary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_QUATERNARY,\n self.clr_quaternary)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_BOLD,\n self.clr_bold)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_CODE,\n self.clr_code)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_ERROR,\n self.clr_error)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_HEADER,\n self.clr_header)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_LINK,\n self.clr_link)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_LIST,\n self.clr_list)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_MESSAGE,\n self.clr_message)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_NUM_COMMENTS,\n self.clr_num_comments)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_NUM_POINTS,\n self.clr_num_points)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TAG,\n self.clr_tag)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TIME,\n self.clr_time)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TITLE,\n self.clr_title)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_TOOLTIP,\n self.clr_tooltip)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_USER,\n self.clr_user)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_VIEW_LINK,\n self.clr_view_link)\n parser.set(self.CONFIG_SECTION,\n self.CONFIG_CLR_VIEW_INDEX,\n self.clr_view_index)\n with open(config, 'w+') as config_file:\n parser.write(config_file)", "def save_data(self):\n # Validate\n try:\n self._data = self._schema(self._data)\n except vol.Invalid as ex:\n _LOGGER.error(\"Can't parse data: %s\",\n humanize_error(self._data, ex))\n\n # Load last valid data\n _LOGGER.warning(\"Reset %s to last version\", self._file)\n self.read_data()\n return\n\n # write\n try:\n write_json_file(self._file, self._data)\n except (OSError, json.JSONDecodeError) as err:\n _LOGGER.error(\"Can't store config in %s: %s\", self._file, err)", "def saveconfig(self, newconf, apply=True):\n with open(self.outputConfig, 'w') as f:\n f.write(newconf)\n if apply:\n self.ApplyNewSettings()", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()", "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")", "def saveCurrentConfig():\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", SW_CONFIG['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", SW_CONFIG['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", SW_CONFIG['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", SW_CONFIG['sw_version'])\n cf.set(\"sw_config\", \"startup\", SW_CONFIG['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", RUN_CONFIG['pop'])\n cf.set(\"run_config\", \"backup\", RUN_CONFIG['backup'])\n cf.add_section(\"hook_config'\")\n for k, v in HOOK_CONFIG:\n cf.set(\"hook_config\", k, v)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()", "def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH", "def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)", "def saveConfig(config):\n global SW_CONFIG\n cf = ConfigParser.ConfigParser()\n cf.add_section(\"dir_config\")\n cf.set(\"dir_config\", \"7zpath\", config['7zpath'])\n cf.set(\"dir_config\", \"sharefolder\", config['sharefolder'])\n cf.set(\"dir_config\", \"distpath\", config['distpath'])\n cf.add_section(\"sw_config\")\n cf.set(\"sw_config\", \"version\", config['sw_version'])\n cf.set(\"sw_config\", \"startup\", config['startup'])\n cf.add_section(\"run_config\")\n cf.set(\"run_config\", \"pop\", False)\n cf.set(\"run_config\", \"backup\", False)\n fp = open(CONFIG_FILE, \"w\")\n cf.write(fp)\n fp.close()\n SW_CONFIG = config", "def refresh_configuration(self):\n pass", "def save_setting(self):\n if self.is_checked.get():\n if \"Email\" not in s.alert:\n s.updateAlert(\"Email\")\n s.updateEmail(self.email_addr_entry.get())\n if not self.is_checked.get():\n if \"Email\" in s.alert:\n s.deleteAlert(\"Email\")\n s.deleteEmail()\n # Check the refresh interval\n if self.is_minimize_to_system_tray.get():\n s.updateMinimize(\"True\")\n else:\n s.updateMinimize(\"False\")\n\n if self.is_launch_at_start_up.get():\n s.updateLaunchAtStartup(\"True\")\n become_persistent(__file__)\n else:\n s.updateLaunchAtStartup(\"False\")\n remove_startup()\n\n s.updateSetting(self.interval_entry.get())\n Tracker.save_state(Tracker.FILENAME, s)", "def save(self):\n SignalPlug.save(self)\n self.config.set(\"port\", self._port)\n self.config.set(\"maxrand\", self._maxrand)\n self.config.set(\"minrand\", self._minrand)", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def _save_to_database(self, data):\n self._logger.info(\"Saving new config to database\")\n\n query1 = \"DELETE FROM project_config WHERE config_site = ?\"\n query2 = \"\"\"INSERT INTO project_config (config_site, config_json)\n VALUES (?, ?)\"\"\"\n\n dump = json.dumps(data)\n with self._bot.localdb as cursor:\n cursor.execute(\"BEGIN\")\n cursor.execute(query1, (self._bot.wikiid,))\n cursor.execute(query2, (self._bot.wikiid, dump))", "def saveNewConfiguration(self):\n selection = tk.filedialog. \\\n asksaveasfilename(title=\"Save CHUM configuration\")\n if selection:\n self._currentConfiguration = selection\n self._saveToFilePath(selection)", "def saveConfig(self, name=None):\n\n configDir = self.mwGlob['configDir']\n\n if self.config.get('profileName', '') == 'config':\n if 'reference' in self.config:\n del self.config['reference']\n\n # default saving for reference\n if name is None:\n name = self.config.get('reference', 'config')\n\n fileName = configDir + '/' + name + '.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n # if we save a reference first, we have to save the config as well\n if name != 'config':\n fileName = configDir + '/config.cfg'\n with open(fileName, 'w') as outfile:\n json.dump(self.config,\n outfile,\n sort_keys=True,\n indent=4)\n return True", "def save_config(self):\n if not os.path.exists(USER_CONFIG_PATH):\n os.makedirs(USER_CONFIG_PATH)\n\n # obtener el config actual\n config = self.get_config()\n\n # obtener el cliente\n client = self._args.get('client')\n\n # ciertos parametros no se tienen que salvar\n args = self._args.copy()\n for item in ['doc', 'command', 'client']:\n if item in args:\n args.pop(item)\n\n # actualizar el cliente default\n config['client'] = client\n\n # actualizar el resto de los parametros para ese cliente\n for item in args:\n if client in config:\n config[client][item] = args.get(item)\n else:\n config[client] = {item: args.get(item)}\n\n with open(USER_CONFIG_FILE, 'w') as config_file:\n yaml.dump(config, config_file, default_flow_style=False,\n allow_unicode=True)", "def save_config(self):\n\n h_config = configparser.ConfigParser()\n\n h_config[\"general\"] = {}\n if not self.configuration.interval:\n self.configuration.interval = __interval__\n h_config[\"general\"][\"interval\"] = str(self.configuration.interval)\n if not self.configuration.wifi_clients:\n self.configuration.wifi_clients = __wifi_clients_example__\n h_config[\"general\"][\"wifi_clients\"] = \",\".join(self.configuration.wifi_clients)\n if not self.configuration.schedules_names:\n self.configuration.schedules_names = __schedules_names_example__\n h_config[\"general\"][\"schedules_name\"] = \",\".join(self.configuration.schedules_names)\n\n h_config[\"unifi\"] = {}\n if not self.configuration.unifi_host:\n self.configuration.unifi_host = __unifi_controller_host__\n h_config[\"unifi\"][\"host\"] = self.configuration.unifi_host\n if not self.configuration.unifi_port:\n self.configuration.unifi_port = __unifi_controller_port__\n h_config[\"unifi\"][\"port\"] = str(self.configuration.unifi_port)\n if not self.configuration.unifi_username:\n self.configuration.unifi_username = __unifi_controller_user__\n h_config[\"unifi\"][\"username\"] = self.configuration.unifi_username\n if not self.configuration.unifi_password:\n self.configuration.unifi_password = __unifi_controller_pwd__\n h_config[\"unifi\"][\"password\"] = self.configuration.unifi_password\n\n h_config[\"hue\"] = {}\n if not self.configuration.hue_host:\n self.configuration.hue_host = __hue_hub_host__\n h_config[\"hue\"][\"host\"] = self.configuration.hue_host\n if not self.configuration.hue_port:\n self.configuration.hue_port = __hue_hub_port__\n h_config[\"hue\"][\"port\"] = str(self.configuration.hue_port)\n if not self.configuration.hue_key:\n self.configuration.hue_key = __hue_key__\n h_config[\"hue\"][\"key\"] = self.configuration.hue_key\n\n h_config[\"zmq\"] = {}\n if not self.configuration.pub_host:\n self.configuration.pub_host = __zmq_default_publishing_host__\n h_config[\"zmq\"][\"host\"] = self.configuration.pub_host\n if not self.configuration.pub_port:\n self.configuration.pub_port = __zmq_default_publishing_port__\n h_config[\"zmq\"][\"port\"] = str(self.configuration.pub_port)\n if \"no_pub\" in self.configuration:\n h_config[\"zmq\"][\"disabled\"] = str(int(self.configuration.no_pub))\n\n h_config[\"logging\"] = {}\n if self.configuration.syslog_host:\n h_config[\"logging\"][\"syslog_host\"] = self.configuration.syslog_host\n if self.configuration.syslog_port:\n h_config[\"logging\"][\"syslog_port\"] = str(self.configuration.syslog_port)\n if self.configuration.log_file:\n h_config[\"logging\"][\"log_file\"] = str(self.configuration.log_file)\n\n with self.config_file.open(mode='w') as configfile:\n h_config.write(configfile)\n logging.info(\"Configuration saved to {}\".format(str(self.config_file)))", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def update_config(self, config):\n self.config = {\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['key']", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def save_config(self, path):\n if os.path.isdir(path):\n path = os.path.join(path, 'config.json')\n print('Save config to {}'.format(path))\n with open(path, 'w', encoding='utf-8') as w:\n w.write(json.dumps(self.to_dict(), indent=2,\n sort_keys=True))", "def reload_config(self):\n pass", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def save(data, section): # save a config\n\tglobal _timesSaved\n\tif dynConfig['logConfigActions']:\n\t\tlogger.info( f'saving {section}: {data}' )\n\t# save\n\tif section != 'placeholderForSaving':\n\t\tcurrentConfigData[section] = data\n\t\tlogger.debug( f'saved {section}' )\n\telse:\n\t\t_timesSaved = 2\n\t# save to disk if this is the third save\n\tif _timesSaved == 0 or _timesSaved == 1:\n\t\t_timesSaved += 1\n\telse:\n\t\t_timesSaved = 0\n\t\ttry:\n\t\t\t# save to disk\n\t\t\twith open( configPath, 'w', encoding='utf-8' ) as file:\n\t\t\t\tjson.dump( currentConfigData, file, indent=4 )\n\t\texcept:\n\t\t\tlogger.error( f'failed to save config to disk!' )\n\t\t\traise ConfigError( 'error while saving the config file' )", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def write(self, config_path=CONFIG_PATH):\n\n with open(self.full_path(config_path), 'w') as conf_fh:\n conf_fh.write(self.local_config)", "def save_cfg(self, output_dir):\n output_path = os.path.join(output_dir, 'level_config.cfg')\n shutil.copy(self.cfg_path, output_path)", "def write_config(self):\n\n #Before writing to file we must convert underscores to dashes, moreover _id must be written as id, and _type as type\n\n if not os.path.exists(NetworkManager_conf_dir):\n os.makedirs(NetworkManager_conf_dir, mode=0755)\n\n profile_path = os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id)\n with open(profile_path, \"wb\") as configfile:\n self.cfg.write(configfile)", "def write_config(self):\n cfg = {\n 'channel':self.channel,\n 'seuil_min':self.seuil_min,\n 'last_level':self.last_level,\n 'last_level_date':self.last_level_date\n }\n write_conf(self.CONF_FILE,cfg)", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def update_shed_config(self, shed_conf):\n for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):\n if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:\n self._dynamic_tool_confs[index] = shed_conf\n self._save_integrated_tool_panel()", "def update_configs(self, config):\n for what in self.plugins: # backend, repo etc.\n for key in self.plugins[what]: # s3, filesystem etc.\n # print(\"Updating configuration of\", what, key)\n self.plugins[what][key].config(what='set', params=config)\n return", "def write_config(self):\n xshear_conf=XShearConfig(self['run'])\n xshear_conf.write()", "def commit(self) -> None:\n commit_app_config()", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def updated(self, newConfiguration):\n log.debug('ConfigListener: configuration %s updated' % newConfiguration)", "def refresh_config(self):\n with open(config_name, 'rb') as f:\n self.CONFIG = simplejson.load(f)\n\n return self", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def commit(self):\r\n with open(self._cfgfile, 'w') as cfgfile:\r\n self._cfgparser.write(cfgfile)" ]
[ "0.9107356", "0.79640085", "0.77216876", "0.7674935", "0.767021", "0.7651261", "0.7618307", "0.7609429", "0.7594311", "0.75444627", "0.74497473", "0.73920816", "0.7372493", "0.73333156", "0.73213214", "0.72997504", "0.7294209", "0.7274685", "0.72723716", "0.7229026", "0.722107", "0.71936655", "0.71646893", "0.7132433", "0.71136826", "0.70968014", "0.7088311", "0.70671356", "0.7049176", "0.7033914", "0.70230186", "0.70092934", "0.69836456", "0.69756657", "0.6960538", "0.6952331", "0.6936691", "0.6931863", "0.6915682", "0.68988794", "0.6890581", "0.6873425", "0.6841778", "0.68193406", "0.67349386", "0.6717688", "0.6717385", "0.6707082", "0.6695653", "0.66952014", "0.66930854", "0.66827756", "0.66563004", "0.6646592", "0.6640549", "0.6633868", "0.66139066", "0.6609525", "0.6596515", "0.6595272", "0.6585329", "0.6579575", "0.6576964", "0.6568321", "0.6567315", "0.6540389", "0.65342194", "0.651559", "0.65119636", "0.65109044", "0.65082645", "0.65057385", "0.6503268", "0.6494474", "0.6481774", "0.6467048", "0.6457414", "0.6451155", "0.6450412", "0.64295244", "0.64295244", "0.6426003", "0.64186543", "0.6415508", "0.64135814", "0.64103174", "0.640571", "0.63834774", "0.6381478", "0.6377594", "0.63734746", "0.6370084", "0.6369924", "0.6369", "0.6367641", "0.63614476", "0.63532835", "0.63509804", "0.6348136", "0.6346922", "0.63462716" ]
0.0
-1
Create a space for `hyperopt`.
def get_space(options, param_types, names): space = {} for i, lst in enumerate(options): param_type = param_types[i] name = names[i] # if categorical, sample one of the options randomly if param_type == "categorical": sample = hp.choice(name, lst) # otherwise sample between the minimum and maximum values elif param_type in ["int", "float"]: min_val = lst[0] max_val = lst[1] if "dropout" in name: if min_val == 0: min_val = 1e-4 low = np.log(min_val) high = np.log(max_val) sample = hp.loguniform(name, low=low, high=high) elif param_type == "float": sample = hp.uniform(name, low=min_val, high=max_val) elif param_type == "int": sample = hp.quniform(name, low=min_val, high=max_val, q=1) space[name] = sample return space
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_space(self, param_grid):\n if self.verbose>9:\n 'Building param space...'\n \n _warnings.filterwarnings('ignore')\n \n param_grid = param_grid.copy()\n space = {}\n for key in param_grid.keys():\n params = param_grid[key]\n \n if self.verbose>9:\n print('\\tinput:',key, params)\n \n type_str = str(type(params[0]))\n\n if 'float' in type_str or 'int' in type_str:\n \n min_ = min(params)\n max_ = max(params)\n log10_min_ = _np.log10(min_)\n log10_max_ = _np.log10(max_)\n\n if round(log10_max_)-round(log10_min_)>1 and round(log10_max_)-round(log10_min_)!=_np.inf: # use uniform distribution on log spacing \n \n space['log10.'+key] = _hyperopt.hp.uniform(key, log10_min_, log10_max_)\n \n if self.verbose>9:\n print('\\toutput:','log10.'+key, 'uniform', log10_min_, log10_max_)\n \n else:\n if 'int' in type_str:\n space[key] = _hyperopt.hp.quniform(key, min_, max_, 1)\n \n if self.verbose>9:\n print('\\toutput:',key, 'quniform', min_, max_)\n \n elif 'float' in type_str:\n space[key] = _hyperopt.hp.uniform(key, min_, max_)\n \n if self.verbose>9:\n print('\\toutput:',key, 'uniform', min_, max_)\n \n \n elif 'str' in type_str:\n space[key] = _hyperopt.hp.choice(key, [i for i in range(len(params))])\n \n if self.verbose>9:\n print('\\toutput:',key, 'choice', [i for i in range(len(params))])\n\n else:\n raise Exception('type(params[0]) is '+type_str+'. This type of hyperparameter is not yet supported.')\n\n assert(len(space.keys())==len(param_grid.keys())), 'len(space.keys())='+str(len(space.keys()))+', which is not equal to len(param_grid.keys())='+str(len(param_grid.keys()))\n \n if self.verbose>9:\n print('...finished building space')\n \n _warnings.filterwarnings('default')\n\n return space", "def get_space(): \n space = {\n 'timesteps_per_batch': hp.choice('timesteps_per_batch', [512, 1024, 2048, 4096, 8192]),\n 'vf_stepsize': hp.loguniform('vf_stepsize', -5, -2),\n 'max_kl' : hp.loguniform('max_kl', -2.5, -0.5),\n 'gamma': hp.uniform('gamma', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))), #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n 'lam': hp.uniform('lam', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))) #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n }\n return space", "def _create_spaces(self):\n raise NotImplementedError", "def create_space(self, pipeline):\n space = {}\n space_list = {}\n for block in list(pipeline.blocks.values()):\n space = {}\n\n tunable_hyperparameters = block.get_tunable_hyperparameters()\n primitive = str(block).split('MLBlock - ')[1]\n if(tunable_hyperparameters == {}):\n raise Exception(\n 'Can not create the domain Space.\\\n The value of tunnable hyperparameters is: {}')\n\n for hyperparameter in tunable_hyperparameters:\n hp_type = list(tunable_hyperparameters[hyperparameter].keys())\n if('values' in hp_type):\n value = tunable_hyperparameters[hyperparameter]['values']\n space[hyperparameter] = hp.choice(hyperparameter, value)\n elif('range' in hp_type):\n value = tunable_hyperparameters[hyperparameter]['range']\n if(tunable_hyperparameters[hyperparameter]['type'] == 'float'):\n values = np.linspace(value[0], value[1], 10)\n if(tunable_hyperparameters[hyperparameter]['default'] is None):\n np.append(values, None)\n space[hyperparameter] = hp.choice(\n hyperparameter, values)\n elif (tunable_hyperparameters[hyperparameter]['type'] == 'str'):\n space[hyperparameter] = hp.choice(hyperparameter, value)\n else:\n values = np.arange(value[0], value[1], 1)\n if(tunable_hyperparameters[hyperparameter]['default'] is None):\n np.append(values, None)\n space[hyperparameter] = hp.choice(\n hyperparameter, values)\n elif(tunable_hyperparameters[hyperparameter]['type'] == 'bool'):\n space[hyperparameter] = hp.choice(hyperparameter, [True, False])\n\n space_list[primitive] = space\n return space_list", "def make(self):\n return make_operation_space()", "def create_optimizer(self, context, optimizer, host):\n pass", "def create_space(num_rows, num_cols, goal=[], obstacles=[], *args):\n space = []\n for i in range (num_rows):\n space.append([])\n for i in range(num_rows):\n for j in range(num_cols):\n space[i].append([])\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j]=node()\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].h = math.sqrt((goal[0]-i)**2 + (goal[1]-j)**2)\n space[i][j].f = 10000\n space[i][j].g = 10000\n \n for obs in obstacles:\n space[obs[0]][obs[1]].h = 1000\n \n heuristics = np.zeros((num_rows,num_cols))\n for i in range(num_rows):\n for j in range(num_cols):\n heuristics[i][j]=space[i][j].h\n \n for i in range(num_rows):\n for j in range(num_cols):\n space[i][j].cor = [i, j]\n \n return space, heuristics", "def setSpace(*args):", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def __init__(self, space):\n super().__init__(space=space, exponent=1)", "def space(dim, dim2, dim3):\n space = Space()\n space.register(dim)\n space.register(dim2)\n space.register(dim3)\n return space", "def test_create_hyperflex_cluster_network_policy(self):\n pass", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def get_configspace():\n cs = CS.ConfigurationSpace()\n\n \n\n # Learning rate hyperparameter\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\n\n \n\n # Stochastic gradient descent momentum as parameter.\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\n\n cs.add_hyperparameters([lr, sgd_momentum])\n \n # Optimizer hyperparameters.\n #optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\n #cs.add_hyperparameters([optimizer])\n \n # Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense.\n #cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\n #cs.add_condition(cond)\n\n ''' The below is commented out because we're not fiddling with architecture in this optimization.'''\n #num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False)\n #num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True)\n #num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True)\n\n #freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False)\n #freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False)\n\n #cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize])\n\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\n\n cs.add_hyperparameters([dropout_rate])\n\n return cs", "def __init__(self, space):\n super().__init__(space=space, exponent=2)", "def run(self):\n if self.pp['net']:\n space = {\n # Qlearnnet\n 'net_lr': hp.loguniform('net_lr', np.log(5e-7), np.log(1e-4)),\n 'net_lr_decay': hp.loguniform('net_lr_decay', np.log(0.90), np.log(0.99)),\n # Singh\n # 'net_lr': hp.loguniform('net_lr', np.log(1e-7), np.log(5e-4)),\n 'beta': hp.uniform('beta', 16, 30),\n # Double\n 'net_copy_iter': hp.loguniform('net_copy_iter', np.log(5), np.log(150)),\n 'net_creep_tau': hp.loguniform('net_creep_tau', np.log(0.01),\n np.log(0.7)),\n # Exp. replay\n 'batch_size': scope.int(hp.uniform('batch_size', 8, 16)),\n 'buffer_size': scope.int(hp.uniform('buffer_size', 2000, 10000)),\n # N-step\n 'n_step': scope.int(hp.uniform('n_step', 3, 40)),\n # Policy\n 'vf_coeff': hp.uniform('vf_coeff', 0.005, 0.5),\n 'entropy_coeff': hp.uniform('entropy_coeff', 1.0, 100.0)\n }\n else:\n space = {\n 'beta': hp.uniform('beta', 7, 23),\n 'alpha': hp.uniform('alpha', 0.0001, 0.4),\n 'alpha_decay': hp.uniform('alpha_decay', 0.9999, 0.9999999),\n 'epsilon': hp.loguniform('epsilon', np.log(0.2), np.log(0.8)),\n 'epsilon_decay': hp.uniform('epsilon_decay', 0.9995, 0.9999999),\n 'gamma': hp.uniform('gamma', 0.7, 0.90),\n 'lambda': hp.uniform('lambda', 0.0, 1.0)\n }\n # Only optimize parameters specified in args\n space = {param: space[param] for param in self.pp['hopt']}\n if self.pp['hopt_fname'].startswith('mongo:'):\n self._hopt_mongo(space)\n else:\n self._hopt_pickle(space)", "def _parse_space_create(self, *cmd):\n self.created = {'storageserver': ''}\n cmd = list(*cmd)\n while cmd:\n param = cmd.pop(0)\n if param == \"-n\":\n self.created['name'] = cmd.pop(0)\n elif param == \"-N\":\n self.created['net'] = cmd.pop(0)\n elif param == \"-s\":\n self.created['size'] = cmd.pop(0)\n elif param == \"--redundancy\":\n self.created['redundancy'] = cmd.pop(0)\n elif param == \"--user\":\n self.created['user'] = cmd.pop(0)\n elif param == \"--user\":\n self.created['user'] = cmd.pop(0)\n elif param == \"--group\":\n self.created['group'] = cmd.pop(0)\n elif param == \"--mode\":\n self.created['mode'] = cmd.pop(0)\n elif param == \"-S\":\n self.created['storageserver'] += cmd.pop(0) + \",\"\n else:\n pass", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 1e-7\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [4096, 4096, 10]\n activation = 'relu'\n lr = 0.3\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def __init__(self, space):\n super().__init__(space=space, constant=0)", "def create_spaces():\n\n data.load('spaces')\n\n mc.refresh()\n spaces.build_all()", "def make_default_hyperparameters(dim):\n return numpy.ones(dim + 1)", "def set_hyperparams(self, params):", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def HyperbolicSpace(n):\n if n == 2:\n return HyperbolicPlane()\n raise NotImplementedError(\"currently only implemented in dimension 2\")", "def get_hyper_params():\n #################################\n ##### INSERT YOUR CODE HERE #####\n layers_size = [10]\n activation = 'relu'\n lr = 5e-4\n epochs = 30\n dropout_rate = 0.2\n init_kind = 'xavier'\n ##### END YOUR CODE HERE ########\n #################################\n hyper_params = {\n 'layers_size': layers_size,\n 'activation': activation,\n 'lr': lr,\n 'epochs': epochs,\n 'init_kind': init_kind,\n 'dropout_rate': dropout_rate,\n }\n return hyper_params", "def _create_algorithm(algo_name, algo_options, origin):\n if origin == \"nlopt\":\n algo = pg.algorithm(pg.nlopt(solver=algo_name))\n for option, val in algo_options.items():\n setattr(algo.extract(pg.nlopt), option, val)\n elif origin == \"pygmo\":\n pygmo_uda = getattr(pg, algo_name)\n algo_options = algo_options.copy()\n if \"popsize\" in algo_options:\n del algo_options[\"popsize\"]\n algo = pg.algorithm(pygmo_uda(**algo_options))\n\n return algo", "def make_operation_space():\n operation_space = {}\n\n # Set integInfo and integBranch\n operation_space['prepare_delenv'] = rmdmod.PrepareDelEnvOperation()\n\n # Call p4 integ for delete revisions\n operation_space['call_p4_integ'] = rmdmod.CallIntegOperation()\n\n # checkout README and place into a pending cln\n operation_space['create_changelist'] = rmdmod.CreateChangelistOperation()\n\n # open file for edit within changelist\n operation_space['reopen'] = rmdmod.ReopenOperation()\n\n # list history of deleted files\n operation_space['list_history'] = rmdmod.ListDelHistoryOperation()\n\n return operation_space", "def __init__(self, space):\n super().__init__(space=space, linear=False, grad_lipschitz=2)", "def test_create_hyperflex_auto_support_policy(self):\n pass", "def mod_space_opt(\n *,\n space,\n dryness_method,\n fuel_build_up_method,\n include_temperature,\n discrete_params,\n defaults=None,\n basinhopping_options=None,\n minimizer_options=None,\n mode=\"basinhopping\",\n x0=None,\n):\n to_optimise = gen_to_optimise(\n fail_func=fail_func,\n success_func=success_func,\n # Init (data) params.\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n _uncached_data=False,\n **discrete_params,\n )\n\n defaults_dict = defaults if defaults is not None else {}\n\n def to_optimise_with_discrete(x):\n return to_optimise(\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **defaults_dict,\n )\n\n def basinhopping_callback(x, f, accept):\n # NOTE: Parameters recorded here are authoritative, since hyperopt will not\n # properly report values modified as in e.g. `mod_quniform`.\n values = {\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **discrete_params,\n **defaults_dict,\n }\n values[\"dryness_method\"] = dryness_method\n values[\"fuel_build_up_method\"] = fuel_build_up_method\n values[\"include_temperature\"] = include_temperature\n\n minimizer_options_dict = minimizer_options if minimizer_options is not None else {}\n basinhopping_options_dict = (\n basinhopping_options if basinhopping_options is not None else {}\n )\n\n if x0 is None:\n x0 = space.continuous_x0_mid\n\n if mode == \"basinhopping\":\n res = basinhopping(\n to_optimise_with_discrete,\n x0=x0,\n seed=0,\n callback=basinhopping_callback,\n take_step=BoundedSteps(\n stepsize=0.3, rng=np.random.default_rng(0), verbose=True\n ),\n **{\n \"disp\": True,\n \"minimizer_kwargs\": dict(\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n ),\n \"T\": 0.05,\n \"niter\": 100,\n \"niter_success\": 15,\n **basinhopping_options_dict,\n },\n )\n elif mode == \"minimize\":\n res = minimize(\n to_optimise_with_discrete,\n x0=x0,\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n )\n else:\n raise ValueError\n\n return res", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def test_create_hyperflex_node_config_policy(self):\n pass", "def hypernet_args(parser, dhyper_chunks=11010, dhnet_arch='50,50,50',\n dtemb_size=32, demb_size=32, dhnet_act='relu', prefix=None,\n pf_name=None):\n assert(prefix is None or pf_name is not None)\n\n heading = 'Hypernet options'\n\n if prefix is None:\n prefix = ''\n pf_name = ''\n else:\n heading = 'Hypernet options for %s network' % pf_name\n pf_name += ' '\n\n # Abbreviations.\n p = prefix\n n = pf_name\n\n ### CHypernet options\n agroup = parser.add_argument_group(heading)\n agroup.add_argument('--%shyper_chunks' % p, type=str, default=dhyper_chunks,\n help='The output size of the %shypernet. If -1, ' % n +\n 'then the hypernet output dimensionality is the ' +\n 'number of weights in the main network. ' +\n 'If it is a positive integer, the weights are ' +\n 'split into chunks and the hypernet gets as ' +\n 'additional input a trainable chunk-specific ' +\n 'embedding (see class ' +\n 'ChunkedHyperNetworkHandler for details). ' +\n 'If a string of two or three comma-separated ' +\n 'integers is provided, then a chunk of weights ' +\n 'is generated by a transpose convolutional ' +\n 'network with self-attention layers (see ' +\n 'class SAHyperNetwork for details). Default: ' +\n '%(default)s.')\n agroup.add_argument('--%shnet_arch' % p, type=str, default=dhnet_arch,\n help='A string of comma-separated integers, each ' +\n 'denoting the size of a hidden layer of the ' +\n '%shypernetwork. This option is discarded ' % n +\n 'when using SAHyperNetwork class. Default: ' +\n '%(default)s.')\n ### We decided to discard remaining weights rather\n ### than generating them by a seperate network.\n #'Note, this option ' +\n #'also determines the architecture of the ' +\n #'\"remaining weight generator\" (see constructor ' +\n #'argument \"rem_layers\" of class SAHyperNetwork ' +\n #'for details). The option does not apply for a ' +\n #'full hypernetwork!')\n agroup.add_argument('--%shnet_act' % p, type=str, default=dhnet_act,\n help='Activation function used in the hypernetwork. ' +\n 'If \"linear\", no activation function is used. ' +\n 'Default: %(default)s.',\n choices=['linear', 'sigmoid', 'relu', 'elu'])\n agroup.add_argument('--%stemb_size' % p, type=int, default=dtemb_size,\n help='Size of the task embedding space (input to ' +\n 'hypernet). Default: %(default)s.')\n agroup.add_argument('--%semb_size' % p, type=int, default=demb_size,\n help='If using a hypernetwork that utilizes chunking' +\n ', then this option defines the size of the ' +\n 'chunk embeddings. Default: %(default)s.')\n agroup.add_argument('--%shnet_noise_dim' % p, type=int, default=-1,\n help='During training, a zero-mean noise vector will ' +\n 'be concatenated to the task embeddings to help ' +\n 'regularize the task embedding space and the ' +\n 'hypernetwork itself. During testing, zeros ' +\n 'will be concatenated. Default: %(default)s.')\n agroup.add_argument('--%shnet_dropout_rate' % p, type=float, default=-1,\n help='Use dropout in the hypernet with the given ' +\n 'dropout probability (dropout is deactivated ' +\n 'for a rate of -1). Default: %(default)s.')\n agroup.add_argument('--%stemb_std' % p, type=float, default=-1,\n help='If not -1, then this number will be ' +\n 'interpreted as the std of zero-mean Gaussian ' +\n 'noise that is used to perturb task embeddings ' +\n 'during training (as a regularization ' +\n 'technique). Default: %(default)s.')\n # Specific to self-attention network!\n agroup.add_argument('--%ssa_hnet_num_layers' % p, type=int, default=5,\n help='Number of layers in the self-attention ' +\n 'hypernet. ' +\n 'See constructor argument \"num_layers\" of ' +\n 'class SAHyperNetwork for details. ' +\n 'Default: %(default)s.')\n agroup.add_argument('--%ssa_hnet_filters' % p, type=str,\n default='128,512,256,128',\n help='A string of comma-separated integers, each ' +\n 'indicating the number of output channels for a ' +\n 'layer in the self-attention hypernet. ' +\n 'See constructor argument \"num_filters\" of ' +\n 'class SAHyperNetwork for details. ' +\n 'Default: %(default)s.')\n agroup.add_argument('--%ssa_hnet_kernels' % p, type=str, default=5,\n help='A string of comma-separated integers, ' +\n 'indicating kernel sizes in the self-attention ' +\n 'hypernet. Note, to specify a distinct kernel ' +\n 'size per dimension of each layer, just enter a ' +\n 'list with twice the number of elements as ' +\n 'convolutional layers in the hypernet. ' +\n 'See constructor argument \"kernel_size\" of ' +\n 'class SAHyperNetwork for details. ' +\n 'Default: %(default)s.')\n agroup.add_argument('--%ssa_hnet_attention_layers' % p, type=str,\n default='1,3',\n help='A string of comma-separated integers, ' +\n 'indicating after which layers of the hypernet' +\n 'a self-attention unit should be added. ' +\n 'See constructor argument \"sa_units\" of ' +\n 'class SAHyperNetwork for details. ' +\n 'Default: %(default)s.')\n return agroup", "def test_create_hyperflex_sys_config_policy(self):\n pass", "def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer", "def test_create_hyperflex_proxy_setting_policy(self):\n pass", "def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams", "def test_create_hyperflex_feature_limit_internal(self):\n pass", "def createSplineWarpNodeMI():\n return gt()", "def get_hyperparams(self):", "def get_configspace():\r\n cs = CS.ConfigurationSpace()\r\n\r\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\r\n\r\n # For demonstration purposes, we add different optimizers as categorical hyperparameters.\r\n # To show how to use conditional hyperparameters with ConfigSpace, we'll add the optimizers 'Adam' and 'SGD'.\r\n # SGD has a different parameter 'momentum'.\r\n optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD'])\r\n\r\n sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\r\n\r\n cs.add_hyperparameters([lr, optimizer, sgd_momentum])\r\n\r\n\r\n\r\n num_conv_layers = CSH.UniformIntegerHyperparameter('num_conv_layers', lower=1, upper=3, default_value=2)\r\n\r\n num_filters_1 = CSH.UniformIntegerHyperparameter('num_filters_1', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_2 = CSH.UniformIntegerHyperparameter('num_filters_2', lower=4, upper=64, default_value=16, log=True)\r\n num_filters_3 = CSH.UniformIntegerHyperparameter('num_filters_3', lower=4, upper=64, default_value=16, log=True)\r\n\r\n cs.add_hyperparameters([num_conv_layers, num_filters_1, num_filters_2, num_filters_3])\r\n\r\n\r\n dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\r\n num_fc_units = CSH.UniformIntegerHyperparameter('num_fc_units', lower=8, upper=256, default_value=32, log=True)\r\n\r\n cs.add_hyperparameters([dropout_rate, num_fc_units])\r\n\r\n\r\n # The hyperparameter sgd_momentum will be used,if the configuration\r\n # contains 'SGD' as optimizer.\r\n cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\r\n cs.add_condition(cond)\r\n\r\n # You can also use inequality conditions:\r\n cond = CS.GreaterThanCondition(num_filters_2, num_conv_layers, 1)\r\n cs.add_condition(cond)\r\n\r\n cond = CS.GreaterThanCondition(num_filters_3, num_conv_layers, 2)\r\n cs.add_condition(cond)\r\n\r\n return cs", "def test_create_hyperflex_capability_info(self):\n pass", "def get_space():\n return {}", "def default_space(cls) -> SlimBenchmarkSpace:\n raise NotImplementedError()", "def create_hyper_parameter_tuning_job(HyperParameterTuningJobName=None, HyperParameterTuningJobConfig=None, TrainingJobDefinition=None, WarmStartConfig=None, Tags=None):\n pass", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def __init__(self, space, exponent):\n super().__init__(space=space, linear=False)\n self.__norm = LpNorm(space, exponent)\n self.__exponent = float(exponent)", "def test_create_hyperflex_cluster_profile(self):\n pass", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def test_create_hyperflex_node_profile(self):\n pass", "def __init__(self, space, exponent):\n self.exponent = float(exponent)\n super().__init__(space=space, linear=False, grad_lipschitz=np.nan)", "def addHyperparam(self, est, param, hyperParam):\n self.hyperparams[param] = (est, hyperParam)\n return self", "def prepare_disk_space_hyper_param_results(directory,\n **kwargs):\n \n # Generate string to identify\n params_str = iopes.generate_string_identifier(**kwargs)\n \n # Generate identification based on hyperparameters,\n # using table in .hdf5\n identifier = iopes.get_eda_params_path(directory=directory,\n params_str = params_str)\n \n # Create the path, if it doesnt exist\n path = directory + identifier + '/'\n import os\n if not os.path.exists(path):\n print 'The path doesnt exist. Creating...'\n os.mkdir(path) \n \n if os.path.exists(path): \n print 'Be careful the path already exists!'\n \n # Generate .txt file to backup the hyper_parameters\n iopes.generate_txt_file_params(path, params_str)\n \n return path", "def train_hyperopt(params):\n lasagne.random.set_rng(RandomState(9859295))\n\n template_name = params.pop('template_name') \n params = adjust_params_for_hyperopt(params)\n \n config_strings = create_config_strings(template_name)\n config_objects = create_config_objects(config_strings)\n templates, _ = create_templates_variants_from_config_objects(\n config_objects)\n \n \n processed_templates, params_without_template_params = process_templates(\n templates, params)\n final_params = process_parameters_by_templates(params_without_template_params, \n processed_templates)\n \n # go to directory above this source-file\n main_template_filename = os.path.dirname(os.path.abspath(os.path.dirname(\n __file__)))\n # then complete path to config\n main_template_filename = os.path.join(main_template_filename, \"configs\", \n \"eegnet_template.yaml\")\n \n with (open(main_template_filename, 'r')) as main_template_file:\n main_template_str = main_template_file.read()\n \n \n final_params['original_params'] = 'dummy'\n train_str = Template(main_template_str).substitute(final_params)\n \n def do_not_load_constructor(loader, node):\n return None\n yaml.add_constructor(u'!DoNotLoad', do_not_load_constructor)\n modified_train_str = train_str.replace('layers: ', 'layers: !DoNotLoad ')\n train_dict = yaml_parse.load(modified_train_str) \n dataset = train_dict['dataset'] \n dataset.load()\n dataset_provider = train_dict['dataset_provider']\n \n assert 'in_sensors' in train_str\n assert 'in_rows' in train_str\n assert 'in_cols' in train_str\n \n train_str = train_str.replace('in_sensors',\n str(dataset.get_topological_view().shape[1]))\n train_str = train_str.replace('in_rows',\n str(dataset.get_topological_view().shape[2]))\n train_str = train_str.replace('in_cols', \n str(dataset.get_topological_view().shape[3]))\n \n train_dict = yaml_parse.load(train_str)\n layers = train_dict['layers']\n final_layer = layers[-1]\n\n # turn off debug/info logging\n logging.getLogger(\"pylearn2\").setLevel(logging.WARN)\n logging.getLogger(\"braindecode\").setLevel(logging.WARN)\n exp = Experiment()\n exp.setup(final_layer, dataset_provider, **train_dict['exp_args'])\n exp.run()\n final_misclass = exp.monitor_chans['test_misclass'][-1]\n print(\"Result for\")\n pprint(params)\n print(\"Final Test misclass: {:5.4f}\".format(float(final_misclass)))\n return final_misclass", "def make_space_elements(space):\n return [\n html.Div(html.Hr()),\n html.Div(\"%s\" % space.title(), id=\"%s_title\" % space, style={\"font-size\": \"200%\"}),\n dcc.Graph(id=\"histogram_%s\" % space, figure=show_histogram(space, 0)),\n make_space_table(space),\n ]", "def get_configspace():\n configspace = cs.ConfigurationSpace()\n\n memory = cs.hyperparameters.UniformIntegerHyperparameter(name='memory', lower=2, upper=25)\n configspace.add_hyperparameter(hyperparameter=memory)\n\n batch_size = cs.hyperparameters.UniformIntegerHyperparameter(\n name='batch_size', lower=32, upper=8192, log=True\n )\n configspace.add_hyperparameter(hyperparameter=batch_size)\n\n frequency = cs.hyperparameters.UniformFloatHyperparameter(\n name='frequency', lower=3e-2, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=frequency)\n\n learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=learning_rate)\n\n horizon = cs.hyperparameters.UniformIntegerHyperparameter(\n name='horizon', lower=1, upper=50\n )\n configspace.add_hyperparameter(hyperparameter=horizon)\n\n discount = cs.hyperparameters.UniformFloatHyperparameter(\n name='discount', lower=0.8, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=discount)\n\n ratio_based = cs.hyperparameters.CategoricalHyperparameter(\n name='ratio_based', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=ratio_based)\n\n clipping_value = cs.hyperparameters.UniformFloatHyperparameter(\n name='clipping_value', lower=0.05, upper=0.5\n )\n configspace.add_hyperparameter(hyperparameter=clipping_value)\n\n baseline = cs.hyperparameters.CategoricalHyperparameter(\n name='baseline',\n choices=('no', 'auto', 'same-network', 'same-policy', 'same-policy-noopt')\n )\n configspace.add_hyperparameter(hyperparameter=baseline)\n\n baseline_learning_rate = cs.hyperparameters.UniformFloatHyperparameter(\n name='baseline_learning_rate', lower=1e-5, upper=3e-2, log=True\n )\n configspace.add_hyperparameter(hyperparameter=baseline_learning_rate)\n\n estimate_advantage = cs.hyperparameters.CategoricalHyperparameter(\n name='estimate_advantage', choices=('no', 'yes')\n )\n configspace.add_hyperparameter(hyperparameter=estimate_advantage)\n\n entropy_regularization = cs.hyperparameters.UniformFloatHyperparameter(\n name='entropy_regularization', lower=1e-5, upper=1.0, log=True\n )\n configspace.add_hyperparameter(hyperparameter=entropy_regularization)\n\n configspace.add_condition(\n condition=cs.EqualsCondition(child=clipping_value, parent=ratio_based, value='yes')\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=baseline_learning_rate, parent=baseline, value='no'\n )\n )\n\n configspace.add_condition(\n condition=cs.NotEqualsCondition(\n child=estimate_advantage, parent=baseline, value='no'\n )\n )\n\n return configspace", "def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def __init__(self,func ,domain_space, max_evals = 10):\n self.func = func\n # optimizing for FLOAT values\n #self.space = hp.uniform('x', 36, 200)\n # optimizing for Integer values\n self.space = domain_space\n self.algorithm = tpe.suggest # creating algorithm\n self.trials = Trials() # to check records\n self.max_evals = max_evals", "def __init__(self):\n self.space = 1000\n self.hash_table = [Node(-1, -1)] * self.space", "def __init__(self,\n weight_decay,\n global_step,\n max_matrix_size=768,\n gbar_decay=0.0,\n gbar_weight=1.0,\n mat_gbar_decay=1.0,\n mat_gbar_weight=1.0,\n learning_rate=1.0,\n svd_interval=1,\n precond_update_interval=1,\n epsilon=1e-4,\n alpha=0.5,\n use_iterative_root=False,\n use_locking=False,\n name=\"ShampooW\"):\n super(ShampooWOptimizer, self).__init__(\n weight_decay,\n global_step=global_step,\n max_matrix_size=max_matrix_size,\n gbar_decay=gbar_decay,\n gbar_weight=gbar_weight,\n mat_gbar_decay=mat_gbar_weight,\n learning_rate=learning_rate,\n svd_interval=svd_interval,\n precond_update_interval=precond_update_interval,\n epsilon=epsilon,\n alpha=alpha,\n use_iterative_root=use_iterative_root,\n use_locking=use_locking,\n name=name)", "def create_optimizer(hparams):\n\n if hparams.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=hparams.learning_rate, momentum=hparams.momentum)\n elif hparams.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate=hparams.learning_rate)\n elif hparams.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n hparams.learning_rate, momentum=hparams.momentum)\n\n return optimizer", "def set_hyper_parameters(self, x):\n self.set_width(x[0])\n self.set_expansion_steps(x[1])", "def get_configspace() -> CS.Configuration:\n cs = CS.ConfigurationSpace(seed=0)\n # START TODO ################\n lr_hp = CS.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value=1e-2, log=True)\n optimizer_hp = CSH.CategoricalHyperparameter(name='optimizer', choices=['Adam', 'SGD', 'RMSprop'])\n sgd_momentum_hp = CS.UniformFloatHyperparameter('sgd_momentum', lower=0.00, upper=0.99, default_value=0.9)\n\n rms_momentum_hp = CS.UniformFloatHyperparameter('rms_momentum', lower=0.00, upper=0.99, default_value=0.9)\n rms_alpha_hp = CS.UniformFloatHyperparameter('rms_alpha', lower=0.00, upper=0.99, default_value=0.99)\n\n scheduler_hp = CSH.CategoricalHyperparameter(name='scheduler',\n choices=['CosineAnnealingLR', 'CosineAnnealingWarmRestarts'])\n cosine_max_t_hp = CS.UniformIntegerHyperparameter(name='cosine_max_t', lower=50, upper=300, default_value=150)\n cosine_warm_hp = CS.UniformIntegerHyperparameter(name='warm_t_0', lower=50, upper=300, default_value=150)\n\n sgd_cond = CS.EqualsCondition(sgd_momentum_hp, optimizer_hp, 'SGD')\n rms_cond1 = CS.EqualsCondition(rms_momentum_hp, optimizer_hp, 'RMSprop')\n rms_cond2 = CS.EqualsCondition(rms_alpha_hp, optimizer_hp, 'RMSprop')\n cosine_warm_cond = CS.EqualsCondition(cosine_warm_hp, scheduler_hp, 'CosineAnnealingWarmRestarts')\n cosine_cond = CS.EqualsCondition(cosine_max_t_hp, scheduler_hp, 'CosineAnnealingLR')\n cs.add_hyperparameters([lr_hp, optimizer_hp, sgd_momentum_hp, rms_momentum_hp,\n rms_alpha_hp, scheduler_hp, cosine_max_t_hp, cosine_warm_hp])\n cs.add_conditions([sgd_cond, rms_cond1, rms_cond2, cosine_cond, cosine_warm_cond])\n # END TODO ################\n return cs", "def __init__(self, env):\n self.env = env\n # set up observation space\n high = np.inf\n low = -high\n\n obs_spec = env.observation_spec()\n\n space_spec = {}\n\n for k,v in obs_spec.items():\n space_spec[k]=spaces.Box(low=low,high=high, shape=v)\n\n\n self.observation_space = spaces.Dict(space_spec)\n\n # setup action space\n low, high = self.env.action_spec\n self.action_space = spaces.Box(low=low, high=high)\n\n self.reward_range = self.env.reward_range", "def create_space(self):\n schema = SpaceSchema()\n data = {'show': str(self.show_id), 'show_title': self.show_title, 'space': self.location_array}\n space_object = schema.load(data)\n return space_object", "def getSpace(*args):", "def test_create_hyperflex_hxdp_version(self):\n pass", "def __init__(self, opt):\n self.scat = Scattering(M=opt.N, N=opt.N, J=opt.scat, pre_pad=False).cuda() \n super(ScatModule, self).__init__()", "def __init__(self, insert_cost=1, deletion_cost=1, subst_cost=1):\r\n self._insert_cost = insert_cost\r\n self._deletion_cost = deletion_cost\r\n self._subst_cost = subst_cost", "def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)", "def createSpSwConstraint(parents, target, enumNames, niceNames=['Space'],constrType='parent',constrTarget=''):\n if constrTarget == '':\n if target.endswith('_CTRL'):\n stripName=target.rpartition('_')\n constrTarget=stripName[0]+'Ctrl_ROOT'\n else:\n constrTarget=target\n\n if niceNames <= 1:\n niceName=niceNames\n else:\n niceName=''\n for i,x in enumerate(niceNames):\n if i < len(niceNames)-1:\n niceName=niceName+x+' / '\n else:\n niceName=niceName+x\n\n existingAttr=cmds.listAttr(target)\n constr=eval('cmds.'+constrType+'Constraint(parents,constrTarget,mo=True)')\n if 'spSwSep' not in existingAttr:\n cmds.addAttr(target, ln='spSwSep', nn='___ Space Switching', at='enum', en='___', k=True)\n cmds.addAttr(target, ln='spaceSwitch', nn=niceName+' Switch', at='enum', en=enumNames, k=True)\n for i,x in enumerate(parents):\n if not i == 1:\n rev=cmds.createNode('reverse', n=target+'spaceSwitch_REV')\n cmds.connectAttr(target+'.spaceSwitch',rev+'.inputX')\n cmds.connectAttr(rev+'.outputX', constr[0]+'.'+x+'W'+str(i))\n else:\n cmds.connectAttr(target+'.spaceSwitch', constr[0]+'.'+x+'W'+str(i))", "def optimize(opt, target, n_agents, n_variables, n_iterations, lb, ub, hyperparams):\n\n # Creating the SearchSpace\n space = SearchSpace(n_agents=n_agents, n_variables=n_variables,\n n_iterations=n_iterations, lower_bound=lb, upper_bound=ub)\n\n # Creating the Function\n function = Function(pointer=target)\n\n # Creating Optimizer\n if opt.__name__ is not 'BH':\n optimizer = opt(hyperparams=hyperparams)\n else:\n optimizer = opt()\n\n # Creating the optimization task\n task = Opytimizer(space=space, optimizer=optimizer, function=function)\n\n return task.start(store_best_only=True)", "def _setup_spaces(self):\n # Actions are the changes in weights of risky\n N = self.n_risky_assets\n self.action_space = gym.spaces.Box( low = -np.ones( (N,) ), \n high = +np.ones( (N,) ) )\n \n # Define the dimensions of the observation space, starting with the portfolio value & weights\n param_ranges = self.asset_process.get_parameter_ranges()\n min_asset_val, max_asset_val = -np.inf, np.inf\n low = min_asset_val * np.ones((N+1,))\n high = max_asset_val * np.ones((N+1,))\n \n if self.benchmark_weights is not None:\n # Repeat the low / high limits for the benchmark\n low = np.hstack( [ low, low ] )\n high = np.hstack( [ high, high ] )\n \n # Add the parameter ranges\n low = np.hstack( [ low, param_ranges.low ] )\n high = np.hstack( [ high, param_ranges.high ] )\n \n # Add the timestamp, for non-recurrent environments\n if not self.is_recurrent:\n low = np.hstack( [ 0, low ] )\n high = np.hstack( [ self.max_episode_steps, high ] )\n \n self.observation_space = gym.spaces.Box( low=low, high=high )", "def createGridWarpNodeMI():\n return gy()", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def createConstraint(*argv):", "def test_create_hyperflex_ext_iscsi_storage_policy(self):\n pass", "def initOpt(self):\n\t\tself.optNodes=[]\n\t\tself.optNode=-1\n\t\tif self.m.headType=='Bracke':\n\t\t\tbracke=True #one head per decice\n\t\telse:\n\t\t\tbracke=False\n\t\tif '2a' in self.m.type:\n\t\t\t#this is strictly for 2000 plants/ha, i.e 10 spots per half circle and [4,9]m crane dimensions\n\t\t\tw1 = 1.3\n\t\t\tw2 = 1.0\n\t\t\tif self.mountPoint is 'left':\n\t\t\t\tfor r in [self.m.craneMaxL-w2, self.m.craneMinL+w2]:\n\t\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth-=dth\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\t\t\t\n\t\t\t\tth-=3*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\telse:\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=pi-asin(w1/r)\n\t\t\t\tdth=(pi-asin(w1/r))/5. #outer\n\t\t\t\tth-=dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\tdth=(pi-2*asin(w1/r))/3.\n\t\t\t\tth=pi-asin(w1/r)-2.*dth\n\t\t\t\tfor th in [th, th-dth]:\n\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\tr=self.m.craneMaxL-w2\n\t\t\t\tth=asin(w1/r)\n\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\telse:\n\t\t\tassert len(self.m.pDevs)==0 or len(self.m.pDevs)==1 and self.m.pDevs[0]==self\n\t\t\tw1 = self.plantAreaW/2.\n\t\t\tw2 = self.plantAreaL/2.\n\t\t\tif bracke:\n\t\t\t\tspaceMin=self.m.plantMinDist\n\t\t\telse:\n\t\t\t\tspaceMin=self.plantAreaW-self.plantHeads[0].width+self.m.plantMinDist #minimum spacing for angular movements.\n\t\t\tn=ceil(self.m.nSeedlingsPWArea/len(self.plantHeads)) #due to several plantHeads per device\n\t\t\tnLeft=n\n\t\t\tlInner = (self.m.craneMinL+w2)*(pi-2*asin(w1/(self.m.craneMinL+w2)))\n\t\t\tsLength = sqrt(pow(self.m.craneMaxL-w2,2)-pow(w1,2))-sqrt(pow(self.m.craneMinL+w2,2)-pow(w1,2))\n\t\t\tlOuter =(self.m.craneMaxL-w2)*(pi-2*asin(w1/(self.m.craneMaxL-w2)))\n\t\t\tlMiddle=0\n\t\t\trList=[self.m.craneMinL+w2, 'border', self.m.craneMaxL-w2]\n\t\t\tlTot=lInner+sLength+lOuter\n\t\t\trMiddle=-1\n\t\t\tdr=self.m.craneMaxL-w2-(self.m.craneMinL+w2)\n\t\t\tif dr>2*self.m.plantMinDist: #create another sweep\n\t\t\t\trMiddle=(self.m.craneMaxL-w2)-dr/2.\n\t\t\t\tlMiddle=rMiddle*(pi-2*asin(w1/rMiddle))\n\t\t\t\trList.append(rMiddle)\n\t\t\t\tlTot+=lMiddle\n\t\t\tlCurr=0\n\t\t\tfor r in rList:\n\t\t\t\tif r is 'border':\n\t\t\t\t\tr=self.m.craneMinL+w2\n\t\t\t\t\tL=sLength\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr))\n\t\t\t\t\t#dr=(L-2*dr)/nSection =>\n\t\t\t\t\tdr=L/(nSection+2.)\n\t\t\t\t\tif dr<self.m.plantMinDist: dr=self.m.plantMinDist\n\t\t\t\t\ta=0\n\t\t\t\t\twhile r<(self.m.craneMaxL-w2)-2*dr:\n\t\t\t\t\t\tr+=dr\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\ta+=1\n\t\t\t\telse:\n\t\t\t\t\tL=r*(pi-2*asin(w1/r))\n\t\t\t\t\tnSection=nLeft*(L/(lTot-lCurr)) #how much to plant on this section\n\t\t\t\t\tdth=(pi-2*asin(w1/r))/nSection\n\t\t\t\t\tif dth*r < spaceMin: dth=spaceMin/r\n\t\t\t\t\tif r == self.m.craneMinL+w2 or r==rMiddle:\n\t\t\t\t\t\tdth=-dth\n\t\t\t\t\t\tth=pi-asin(w1/(r))\n\t\t\t\t\telse:\n\t\t\t\t\t\tth=asin(w1/(r))\n\t\t\t\t\ta=0\n\t\t\t\t\twhile abs(th-pi/2.)-0.00001<=(pi-2*asin(w1/r))/2.:\n\t\t\t\t\t\tself.optNodes.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tself.idealSpots.append(self.m.getCartesian([r,th]))\n\t\t\t\t\t\tth+=dth\n\t\t\t\t\t\ta+=1\n\t\t\t\tif a<nSection: #if spaceMin got into it and interfered.\n\t\t\t\t\tnSection=a\n\t\t\t\tnLeft-=nSection\n\t\t\t\tlCurr+=L", "def pgd_create_adv_graph(self, sess, X, y, eps, eta, scope):\n #with tf.variable_scope(scope, reuse = tf.AUTO_REUSE) as scope:\n #temp = set(tf.all_variables())\n init_delta = tf.random_uniform(shape = tf.shape(self.x), minval = -eps, maxval = eps)\n delta = tf.Variable(init_delta, name = \"delta\", validate_shape = False)\n x_tilde = self.x + delta\n\n #New predictions and loss - call to model will reuse learned weights\n activations, predictions = model(x_tilde, self.hidden_sizes, self.num_classes, self.sigma)\n loss_vector = tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=self.y)\n loss_tilde = tf.reduce_mean(loss_vector)\n\n #Gradient step, project step and then assign\n optimization_step = tf.assign(delta, tf.squeeze(tf.clip_by_value(delta + eta * tf.math.sign(tf.gradients(loss_tilde, delta)), clip_value_min = -eps, clip_value_max = eps)))\n\n #sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))\n return optimization_step, x_tilde, loss_tilde, delta", "def __init__(self, spacing, is_homogeneous):\n Layout.__init__(self)\n self.spacing = spacing\n self.is_homogeneous = is_homogeneous", "def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n use_locking=False,\n name=\"GGT\",\n window=10,\n eps=1e-4,\n svd_eps=1e-6,\n sigma_eps=1e-2):\n super(GGTOptimizer, self).__init__(use_locking, name)\n self._set_hyper(\"lr\", learning_rate)\n self._set_hyper(\"beta1\", beta1)\n self._set_hyper(\"window\", window)\n self._set_hyper(\"eps\", eps)\n self._set_hyper(\"svd_eps\", svd_eps)\n self._set_hyper(\"sigma_eps\", sigma_eps)\n\n self.index_dict = {}\n self.shape_dict = {}", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def _init_hyperparam(self, **p_par):\r\n \r\n try:\r\n p_input_size = self._input_space.get_num_dim()\r\n p_output_size = self._output_space.get_num_dim()\r\n except:\r\n raise ParamError('Input size and/or output size of the network are not defined.')\r\n \r\n if 'p_update_rate' not in p_par:\r\n p_par['p_update_rate'] = 1\r\n elif p_par.get('p_update_rate') < 1:\r\n raise ParamError(\"p_update_rate must be equal or higher than 1.\")\r\n \r\n if 'p_num_hidden_layers' not in p_par:\r\n raise ParamError(\"p_num_hidden_layers is not defined.\")\r\n \r\n if 'p_output_activation_fct' not in p_par:\r\n p_par['p_output_activation_fct'] = None\r\n \r\n if 'p_optimizer' not in p_par:\r\n raise ParamError(\"p_optimizer is not defined.\")\r\n \r\n if 'p_loss_fct' not in p_par:\r\n raise ParamError(\"p_loss_fct is not defined.\")\r\n\r\n if 'p_test_data' not in p_par:\r\n p_par['p_test_data'] = 0.3\r\n\r\n if 'p_batch_size' not in p_par:\r\n p_par['p_batch_size'] = 100\r\n\r\n if 'p_seed_buffer' not in p_par:\r\n p_par['p_seed_buffer'] = 1\r\n\r\n if 'p_learning_rate' not in p_par:\r\n p_par['p_learning_rate'] = 3e-4\r\n \r\n if 'p_hidden_size' not in p_par:\r\n raise ParamError(\"p_hidden_size is not defined.\")\r\n try:\r\n if len(p_par['p_hidden_size']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_hidden_size list must be equal to p_num_hidden_layers or an integer.\")\r\n except:\r\n p_par['p_hidden_size'] = [int(p_par['p_hidden_size'])] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_activation_fct' not in p_par:\r\n raise ParamError(\"p_activation_fct is not defined.\")\r\n try:\r\n if len(p_par['p_activation_fct']) != p_par['p_num_hidden_layers']:\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n except:\r\n if isinstance(p_par['p_activation_fct'], list):\r\n raise ParamError(\"length of p_activation_fct list must be equal to p_num_hidden_layers or a single activation function.\")\r\n else:\r\n p_par['p_activation_fct'] = [p_par['p_activation_fct']] * int(p_par['p_num_hidden_layers'])\r\n \r\n if 'p_weight_bias_init' not in p_par:\r\n p_par['p_weight_bias_init'] = True\r\n \r\n if p_par['p_weight_bias_init']:\r\n if 'p_weight_init' not in p_par:\r\n p_par['p_weight_init'] = torch.nn.init.orthogonal_\r\n \r\n if 'p_bias_init' not in p_par:\r\n p_par['p_bias_init'] = lambda x: torch.nn.init.constant_(x, 0)\r\n \r\n if 'p_gain_init' not in p_par:\r\n p_par['p_gain_init'] = np.sqrt(2)\r\n \r\n self._hyperparam_space.add_dim(HyperParam('p_input_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_update_rate','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_num_hidden_layers','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_hidden_size','Z'))\r\n self._hyperparam_space.add_dim(HyperParam('p_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_output_activation_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_optimizer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_loss_fct'))\r\n self._hyperparam_space.add_dim(HyperParam('p_test_data'))\r\n self._hyperparam_space.add_dim(HyperParam('p_batch_size'))\r\n self._hyperparam_space.add_dim(HyperParam('p_seed_buffer'))\r\n self._hyperparam_space.add_dim(HyperParam('p_learning_rate'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_weight_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_bias_init'))\r\n self._hyperparam_space.add_dim(HyperParam('p_gain_init'))\r\n self._hyperparam_tuple = HyperParamTuple(self._hyperparam_space)\r\n \r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n self.get_hyperparam().set_value(ids_[0], p_input_size)\r\n self.get_hyperparam().set_value(ids_[1], p_output_size)\r\n self.get_hyperparam().set_value(ids_[2], p_par['p_update_rate'])\r\n self.get_hyperparam().set_value(ids_[3], p_par['p_num_hidden_layers'])\r\n self.get_hyperparam().set_value(ids_[4], p_par['p_hidden_size'])\r\n self.get_hyperparam().set_value(ids_[5], p_par['p_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[6], p_par['p_output_activation_fct'])\r\n self.get_hyperparam().set_value(ids_[7], p_par['p_optimizer'])\r\n self.get_hyperparam().set_value(ids_[8], p_par['p_loss_fct'])\r\n self.get_hyperparam().set_value(ids_[9], p_par['p_test_data'])\r\n self.get_hyperparam().set_value(ids_[10], p_par['p_batch_size'])\r\n self.get_hyperparam().set_value(ids_[11], p_par['p_seed_buffer'])\r\n self.get_hyperparam().set_value(ids_[12], p_par['p_learning_rate'])\r\n self.get_hyperparam().set_value(ids_[13], p_par['p_weight_bias_init'])\r\n self.get_hyperparam().set_value(ids_[14], p_par['p_weight_init'])\r\n self.get_hyperparam().set_value(ids_[15], p_par['p_bias_init'])\r\n self.get_hyperparam().set_value(ids_[16], p_par['p_gain_init'])", "def create_boundary_hyp_space(n_features):\n hyp_space = []\n for i in range(n_features + 1):\n hyp = [1 for _ in range(n_features)]\n hyp[n_features-i:n_features] = [0 for _ in range(i)]\n hyp_space.append(hyp)\n hyp_space = np.array(hyp_space)\n return hyp_space", "def _starting_hparams():\n hparams = contrib_training.HParams()\n hparams.add_hparam('batch_style', 'bucket')\n hparams.add_hparam('gradient_clipping_decay', 0.9999)\n hparams.add_hparam('learning_rate', 0.0005)\n hparams.add_hparam('lr_decay_rate', .997)\n hparams.add_hparam('lr_decay_steps', 1000)\n hparams.add_hparam('lr_warmup_steps', 3000)\n hparams.add_hparam('model_type', 'cnn')\n hparams.add_hparam('resnet_bottleneck_factor', 0.5)\n hparams.add_hparam('decision_threshold', 0.5)\n hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.\n return hparams", "def create_dim(self, dimname, size=None):\n raise NotImplementedError", "def create(args):\n print('Creates an HPC fleet with given name \"{}\"'.format(args.fleet_name))", "def test_create_hyperflex_feature_limit_external(self):\n pass", "def state_space(self):\n sys = StateSpace(dqdt=nonlinear_model, signature='t,q,u,p', verbose=False)\n return sys", "def test_create(self):\n f = azplugins.restrain.plane(group=hoomd.group.all(), point=(0,0,0), normal=(1,0,0), k=2.0)\n\n f.set_params(k=5.0)\n f.set_params(k=8)\n\n f.set_params(point=(0,0,1))\n f.set_params(point=[0,0,1])\n f.set_params(point=np.array([0,0,1]))\n\n f.set_params(normal=(0,0,1))\n f.set_params(normal=[0,0,1])\n f.set_params(normal=np.array([0,0,1]))\n\n f.set_params(point=(0,0,0), normal=(1,0,0), k=10.0)", "def newPool(name: str, superPool, types: [], cls):\n try:\n if name == \"colorholder\":\n superPool = P0(len(types), cls)\n return superPool\n elif name == \"abstractnode\":\n superPool = P1(len(types), cls)\n return superPool\n elif name == \"node\":\n superPool = P2(len(types), superPool, cls)\n return superPool\n \n elif name == \"subnode\":\n superPool = P3(len(types), superPool, cls)\n return superPool\n \n else:\n if superPool is None:\n superPool = BasePool(len(types), name, StoragePool.noKnownFields, StoragePool.noAutoFields, cls)\n else:\n superPool = superPool.makeSubPool(len(types), name, cls)\n return superPool\n finally:\n types.append(superPool)", "def __init__(self, space, constant=0):\n self.__constant = constant\n super().__init__(space, linear=False)", "def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(self._height, self._width, num_colors),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3", "def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(self._height, self._width, num_colors),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3", "def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(num_colors, self._height, self._width),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def create_hparams(hparam_string=None):\n hparams = tf.contrib.training.HParams(\n # The name of the architecture to use.\n arch='resnet',\n lrelu_leakiness=0.2,\n batch_norm_decay=0.9,\n weight_decay=1e-5,\n normal_init_std=0.02,\n generator_kernel_size=3,\n discriminator_kernel_size=3,\n\n # Stop training after this many examples are processed\n # If none, train indefinitely\n num_training_examples=0,\n\n # Apply data augmentation to datasets\n # Applies only in training job\n augment_source_images=False,\n augment_target_images=False,\n\n # Discriminator\n # Number of filters in first layer of discriminator\n num_discriminator_filters=64,\n discriminator_conv_block_size=1, # How many convs to have at each size\n discriminator_filter_factor=2.0, # Multiply # filters by this each layer\n # Add gaussian noise with this stddev to every hidden layer of D\n discriminator_noise_stddev=0.2, # lmetz: Start seeing results at >= 0.1\n # If true, add this gaussian noise to input images to D as well\n discriminator_image_noise=False,\n discriminator_first_stride=1, # Stride in first conv of discriminator\n discriminator_do_pooling=False, # If true, replace stride 2 with avg pool\n discriminator_dropout_keep_prob=0.9, # keep probability for dropout\n\n # DCGAN Generator\n # Number of filters in generator decoder last layer (repeatedly halved\n # from 1st layer)\n num_decoder_filters=64,\n # Number of filters in generator encoder 1st layer (repeatedly doubled\n # after 1st layer)\n num_encoder_filters=64,\n\n # This is the shape to which the noise vector is projected (if we're\n # transferring from noise).\n # Write this way instead of [4, 4, 64] for hparam search flexibility\n projection_shape_size=4,\n projection_shape_channels=64,\n\n # Indicates the method by which we enlarge the spatial representation\n # of an image. Possible values include:\n # - resize_conv: Performs a nearest neighbor resize followed by a conv.\n # - conv2d_transpose: Performs a conv2d_transpose.\n upsample_method='resize_conv',\n\n # Visualization\n summary_steps=500, # Output image summary every N steps\n\n ###################################\n # Task Classifier Hyperparameters #\n ###################################\n\n # Which task-specific prediction tower to use. Possible choices are:\n # none: No task tower.\n # doubling_pose_estimator: classifier + quaternion regressor.\n # [conv + pool]* + FC\n # Classifiers used in DSN paper:\n # gtsrb: Classifier used for GTSRB\n # svhn: Classifier used for SVHN\n # mnist: Classifier used for MNIST\n # pose_mini: Classifier + regressor used for pose_mini\n task_tower='doubling_pose_estimator',\n weight_decay_task_classifier=1e-5,\n source_task_loss_weight=1.0,\n transferred_task_loss_weight=1.0,\n\n # Number of private layers in doubling_pose_estimator task tower\n num_private_layers=2,\n\n # The weight for the log quaternion loss we use for source and transferred\n # samples of the cropped_linemod dataset.\n # In the DSN work, 1/8 of the classifier weight worked well for our log\n # quaternion loss\n source_pose_weight=0.125 * 2.0,\n transferred_pose_weight=0.125 * 1.0,\n\n # If set to True, the style transfer network also attempts to change its\n # weights to maximize the performance of the task tower. If set to False,\n # then the style transfer network only attempts to change its weights to\n # make the transferred images more likely according to the domain\n # classifier.\n task_tower_in_g_step=True,\n task_loss_in_g_weight=1.0, # Weight of task loss in G\n\n #########################################\n # 'simple` generator arch model hparams #\n #########################################\n simple_num_conv_layers=1,\n simple_conv_filters=8,\n\n #########################\n # Resnet Hyperparameters#\n #########################\n resnet_blocks=6, # Number of resnet blocks\n resnet_filters=64, # Number of filters per conv in resnet blocks\n # If true, add original input back to result of convolutions inside the\n # resnet arch. If false, it turns into a simple stack of conv/relu/BN\n # layers.\n resnet_residuals=True,\n\n #######################################\n # The residual / interpretable model. #\n #######################################\n res_int_blocks=2, # The number of residual blocks.\n res_int_convs=2, # The number of conv calls inside each block.\n res_int_filters=64, # The number of filters used by each convolution.\n\n ####################\n # Latent variables #\n ####################\n # if true, then generate random noise and project to input for generator\n noise_channel=True,\n # The number of dimensions in the input noise vector.\n noise_dims=10,\n\n # If true, then one hot encode source image class and project as an\n # additional channel for the input to generator. This gives the generator\n # access to the class, which may help generation performance.\n condition_on_source_class=False,\n\n ########################\n # Loss Hyperparameters #\n ########################\n domain_loss_weight=1.0,\n style_transfer_loss_weight=1.0,\n\n ########################################################################\n # Encourages the transferred images to be similar to the source images #\n # using a configurable metric. #\n ########################################################################\n\n # The weight of the loss function encouraging the source and transferred\n # images to be similar. If set to 0, then the loss function is not used.\n transferred_similarity_loss_weight=0.0,\n\n # The type of loss used to encourage transferred and source image\n # similarity. Valid values include:\n # mpse: Mean Pairwise Squared Error\n # mse: Mean Squared Error\n # hinged_mse: Computes the mean squared error using squared differences\n # greater than hparams.transferred_similarity_max_diff\n # hinged_mae: Computes the mean absolute error using absolute\n # differences greater than hparams.transferred_similarity_max_diff.\n transferred_similarity_loss='mpse',\n\n # The maximum allowable difference between the source and target images.\n # This value is used, in effect, to produce a hinge loss. Note that the\n # range of values should be between 0 and 1.\n transferred_similarity_max_diff=0.4,\n\n ################################\n # Optimization Hyperparameters #\n ################################\n learning_rate=0.001,\n batch_size=32,\n lr_decay_steps=20000,\n lr_decay_rate=0.95,\n\n # Recomendation from the DCGAN paper:\n adam_beta1=0.5,\n clip_gradient_norm=5.0,\n\n # The number of times we run the discriminator train_op in a row.\n discriminator_steps=1,\n\n # The number of times we run the generator train_op in a row.\n generator_steps=1)\n\n if hparam_string:\n tf.logging.info('Parsing command line hparams: %s', hparam_string)\n hparams.parse(hparam_string)\n\n tf.logging.info('Final parsed hparams: %s', hparams.values())\n return hparams" ]
[ "0.62174714", "0.5995565", "0.59907234", "0.5958852", "0.58709556", "0.5789153", "0.5679872", "0.54301524", "0.5426742", "0.5412645", "0.5411937", "0.535265", "0.53177315", "0.5313377", "0.5309271", "0.52801293", "0.52665275", "0.524695", "0.524695", "0.52416915", "0.5237613", "0.52153486", "0.5191004", "0.51847523", "0.5163516", "0.5146146", "0.5138767", "0.5136337", "0.51354843", "0.5133475", "0.51218176", "0.510413", "0.50921196", "0.50891525", "0.5058459", "0.50247794", "0.50134903", "0.5002866", "0.498838", "0.49787772", "0.4970711", "0.49705747", "0.4965216", "0.4958488", "0.4955232", "0.49482107", "0.4931612", "0.4916958", "0.4907509", "0.49014747", "0.48920655", "0.48918298", "0.4891472", "0.48848343", "0.486712", "0.48627478", "0.4839411", "0.48292795", "0.4805559", "0.479762", "0.47943002", "0.47827777", "0.47785813", "0.47781596", "0.4774658", "0.47739673", "0.47723672", "0.47356418", "0.47348824", "0.4734753", "0.4734739", "0.47325626", "0.47314003", "0.4722704", "0.47181013", "0.47116554", "0.4711264", "0.4708128", "0.47074345", "0.4699823", "0.4691167", "0.46769413", "0.46763998", "0.46740133", "0.46731254", "0.46709704", "0.46662205", "0.46553376", "0.46543652", "0.46522138", "0.46467093", "0.4642212", "0.46400356", "0.46388555", "0.46349648", "0.46345282", "0.46345282", "0.463213", "0.4628324", "0.46273783" ]
0.53067887
15
Save score from a hyperparameter iteration.
def save_score(dic_path, hyperparams, metric, best_score): if os.path.isfile(dic_path): with open(dic_path, "r") as f: score_list = json.load(f) else: score_list = [] score_list.append(hyperparams) score_list[-1].update({metric: best_score}) with open(dic_path, "w") as f: json.dump(score_list, f, indent=4, sort_keys=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, score, model):\n if self.best_score is None:\n # assign the best score and save the model at the end of the first epoch\n self.best_score = score\n self.save_checkpoint(model)\n elif score < self.best_score + self.delta:\n # if the score not increase of at least delta, increment the counter and if it reach the patience early stops\n self.counter += 1\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n # otherwise the score is better that the saved one, so replace the best score and save the model\n self.best_score = score\n self.save_checkpoint(model)\n self.counter = 0", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def saveBestState(self, value, epoch, it):\n \n self.monitor.saveModel(self.agent)\n self.bestState = deepcopy(self.agent.state_dict())\n self.best_value = value\n self.last_save = epoch\n self.last_iter = it\n logger.info(f\"Model saved at epoch {epoch}\")", "def score(self, params):\n \n if self.output_file != None:\n with open(self.output_file, \"a\") as myfile:\n try:\n myfile.write(str(self.trials.losses()[-2])+'\\n')\n except IndexError:\n print 'Index error'\n myfile.write(str(params)+', ')\n\n print \"Training with params : \"\n print params\n num_round = int(params['n_estimators'])\n del params['n_estimators']\n\n score = 0.\n for train_index, valid_index in self.kf:\n\n df_train = self.df_train.iloc[train_index]\n df_valid = self.df_train.iloc[valid_index]\n\n # fit the model\n self.fit(df_train, self.features, self.target, params, num_round)\n\n # results of the model on validation data\n predictions = self.predict(df_valid[self.features])\n\n # computing the accuracy of predictited similar pictures\n accuracy = np.mean(df_valid[self.target].values == np.round(predictions))\n print 'accuracy:', accuracy\n score -= accuracy/float(len(self.kf))\n \n #score -= roc_auc_score(df_valid[self.target].values, predictions)\n\n print \"\\tScore {0}\\n\\n\".format(score)\n return {'loss': score, 'status': STATUS_OK}", "def learn(self):\n\n for i in range(self.args.n_iters):\n diff = self.iteration()\n\n if diff < self.args.epsilon:\n self.save(self.save_path, i)\n break\n elif (i + 1) % self.args.save_frequency == 0:\n self.save(self.save_path, i)", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def compute_score(self, observation, prediction, verbose=False):\n #print(observation)\n score = TScore.compute( self.observation, prediction )\n print(\"compute_score\")", "def scoring(self):\n pass", "def prediction(self, score: float):\n data = self.process_data()\n paramters = data[\"param\"]\n return {\"pred\": func(score, paramters[0], paramters[1], paramters[2]),\n \"acc\": data[\"acc\"]}", "def score(self):", "def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)", "def test_best_val(self, te_acc):\n self.test_val = te_acc", "def update_score():\n pass", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def hyperparameter_cv(X_data, y_data, hyperparameters):\n\n # Create Grid of hyperparameters\n grid = cartesian_product(hyperparameters)\n\n # Loop through hyperparameters \n best_score = 0\n for hyperparameter in grid:\n # Initialize Modle\n model = svm.SVC(kernel='linear', **hyperparameter)\n\n # Train and Get Accuracy\n print(f\"Training using hyperparameters: {hyperparameter}\")\n score = cross_validation_score(X_data, y_data, model, folds=5)\n print(f\"Accuracy Score: {score}\")\n\n if score > best_score:\n best_score = score\n best_parameters = hyperparameter\n \n return best_score, best_parameters", "def store_iter_weights(self):\n self.w_after_iter.append(self.nn.get_param_values())", "def get_score(self):\n return self.score", "def scoreEvaluationFunction(gameState):\n return gameState.getScore()", "def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score", "def analyze_hyperparameter(self, hyperparameter):\n\n\t\t# TODO: Clean up this logic. Sorry for the mess!\n\t\tsorted_configs = sorted(self._generated_configs, key=lambda x: x.sampled_config[hyperparameter])\n\n\t\tplt.figure()\n\t\tplt.xlabel('Epochs')\n\t\tplt.ylabel('Training Loss')\n\t\tplt.title('Average Training Loss Across Epochss')\n\t\tfor config in sorted_configs:\n\t\t\tplt.plot(*config.train_loss_saver.get_plot_points(),\n\t\t\t\tlabel='{0} = {1}'.format(hyperparameter, config.sampled_config[hyperparameter]))\n\t\ttrain_loss_output_path = os.path.join(self._plots_dir, 'train_loss.{0}.{1}.{2}.png'.format(\n\t\t\thyperparameter, self._model_config.experiment, time.time()))\n\t\tlogging.info('Saving {0} hyperparameter comparison plot to: {1}'.format(\n\t\t\thyperparameter, train_loss_output_path))\n\t\tplt.legend()\n\t\tplt.savefig(train_loss_output_path)\n\n\t\tplt.figure()\n\t\tplt.xlabel('Epoch')\n\t\tplt.ylabel('Validation Accuracy')\n\t\tplt.title('Validation Accuracy Across Epochs')\n\t\tfor config in sorted_configs:\n\t\t\tplt.plot(*config.valid_acc_saver.get_plot_points(),\n\t\t\t\tlabel='{0} = {1}'.format(hyperparameter, config.sampled_config[hyperparameter]))\n\t\tvalid_acc_output_path = os.path.join(self._plots_dir, 'valid_acc.{0}.{1}.{2}.png'.format(\n\t\t\thyperparameter, self._model_config.experiment, time.time()))\n\t\tlogging.info('Saving {0} hyperparameter comparison plot to: {1}'.format(\n\t\t\thyperparameter, valid_acc_output_path))\n\t\tplt.legend()\n\t\tplt.savefig(valid_acc_output_path)\n\n\t\tplt.figure()\n\t\tplt.xlabel('Epoch')\n\t\tplt.ylabel('Training Accuracy')\n\t\tplt.title('Training Accuracy Across Epochs')\n\t\tfor config in sorted_configs:\n\t\t\tplt.plot(*config.train_acc_saver.get_plot_points(),\n\t\t\t\tlabel='{0} = {1}'.format(hyperparameter, config.sampled_config[hyperparameter]))\n\t\ttrain_acc_output_path = os.path.join(self._plots_dir, 'train_acc.{0}.{1}.{2}.png'.format(\n\t\t\thyperparameter, self._model_config.experiment, time.time()))\n\t\tlogging.info('Saving {0} hyperparameter comparison plot to: {1}'.format(\n\t\t\thyperparameter, train_acc_output_path))\n\t\tplt.legend()\n\t\tplt.savefig(train_acc_output_path)", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def getScore(self, i):\n return self.scores[i - 1]", "def scores(self, value):\n self._scores = value", "def step(self):\n fit_default_config = {\"verbose\": self.verbose}\n fit_default_config.update(self.config.get(\"fit_config\", {}))\n\n history = self.model.fit(self.train_dataset, **fit_default_config)\n if history is None:\n stats = {}\n else:\n stats = {\"train_\" + k: v[-1] for k, v in history.history.items()}\n\n self.epoch += 1\n return stats", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def __call__(self, save_fct):\n eval_scores = [\"Not evaluated\"]\n if self.train:\n logger.info(\"> Training\")\n self.train.run_training(save_fct = save_fct)\n logger.info('reverting learned weights to best checkpoint..')\n try:\n ParamManager.param_col.revert_to_best_model()\n except RevertingUnsavedModelException:\n pass\n\n evaluate_args = self.evaluate\n if evaluate_args:\n logger.info(\"> Performing final evaluation\")\n eval_scores = []\n for evaluator in evaluate_args:\n eval_score = evaluator.eval()\n if type(eval_score) == list:\n eval_scores.extend(eval_score)\n else:\n eval_scores.append(eval_score)\n\n return eval_scores", "def updateScore(score):\n return score + 1", "def evaluate(results, name):\n \n new_results = results.copy()\n \n # String to dictionary\n new_results['hyperparameters'] = new_results['hyperparameters'].map(ast.literal_eval)\n \n # Sort with best values on top\n new_results = new_results.sort_values('score', ascending = False).reset_index(drop = True)\n \n # Print out cross validation high score\n print('The highest cross validation score from {} was {:.5f} found on iteration {}.'.format(name, \n new_results.loc[0, 'score'], new_results.loc[0, 'iteration']))\n \n # Create dataframe of hyperparameters\n hyp_df = pd.DataFrame(columns = list(new_results.loc[0, 'hyperparameters'].keys()))\n \n for i, row in enumerate(new_results['hyperparameters']):\n if 'hidden_layer_sizes' in row:\n new_results['hyperparameters'][i]['hidden_layer_sizes'] = str(new_results['hyperparameters'][i]['hidden_layer_sizes'])\n\n # Iterate through each set of hyperparameters that were evaluated\n for i, hyp in enumerate(new_results['hyperparameters']):\n hyp_df = hyp_df.append(pd.DataFrame(hyp, index = [0]), \n ignore_index = True)\n \n # Put the iteration and score in the hyperparameter dataframe\n hyp_df['iteration'] = new_results['iteration']\n hyp_df['score'] = new_results['score']\n \n return hyp_df", "def setScore(self, i, score):\n self.scores[i - 1] = score", "def __call__(self, trial):\n if _is_creator(self.model):\n model = self.model(trial)\n else:\n # copy model so that the original model is not changed\n # Need tests to check this path\n model = copy.deepcopy(self.model)\n\n self._pre_train(model, trial)\n self.searcher._run(model)\n if self.mo_hpo:\n scores = []\n for metric in self.target_metric:\n if metric == self.metric:\n if self.mode == \"last\":\n score = self.searcher.trainer.callback_metrics[self.target_metric].item()\n elif self.mode == \"best\":\n score = self.searcher.trainer.callback_metrics[\"_best_score\"].item()\n else:\n score = self.searcher.trainer.callback_metrics[metric].item()\n scores.append(score)\n else:\n if self.mode == \"last\":\n scores = self.searcher.trainer.callback_metrics[self.target_metric].item()\n elif self.mode == \"best\":\n scores = self.searcher.trainer.callback_metrics[\"_best_score\"].item()\n\n if self.acceleration:\n scores, optimization = self._auto_acceleration(model, scores)\n # via user_attr returns the choosed optimization corresponding\n # to the minimum latency\n trial.set_user_attr(\"optimization\", optimization)\n self._post_train(model)\n return scores", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def final(self, **kwargs):\n epoch = kwargs[\"epoch\"] + 1\n if epoch >= self.ignore_before:\n name = self.prepend + \"training_epoch_{}_FINAL.h5\".format(epoch)\n full_path = os.path.join(self.path, name)\n self.save_model(kwargs[\"trainer\"], full_path)\n else:\n print(\"Minimum iterations to store model not reached.\")\n\n if self.best_model is not None:\n best_model = deepcopy(self.best_model)\n best_res = self.best_res\n if self.window is not None:\n print(\"Best result during training: {:.2f}.\\n In a window of size {} \"\n \"starting in epoch {} with best mean value of {} \\n Saving model..\".format(best_res,\n self.window,\n self.best_window_start,\n self.best_mean_res))\n else:\n print(\n \"Best result during training: {:.2f}. Saving model..\".format(\n best_res\n )\n )\n name = self.prepend + \"BEST_ITERATION.h5\"\n torch.save(best_model, os.path.join(self.path, name))\n self.reset()", "def _store_results(self, accuracy):\n\n self.learning_curve_.append(accuracy)\n\n # update reward prior with the change in accuracy rate\n delta = self.learning_curve_[-1] - self.learning_curve_[-2]\n mu_0 = self.prior_mus[self.best_heuristic_idx]\n sigma_0 = self.prior_sigmas[self.best_heuristic_idx]\n sigma = self.likelihood_sigmas[self.best_heuristic_idx]\n\n self.prior_mus[self.best_heuristic_idx] = (mu_0 * sigma + delta * sigma_0) / (sigma + sigma_0)\n self.prior_sigmas[self.best_heuristic_idx] = (sigma_0 * sigma) / (sigma + sigma_0)\n\n self.all_prior_mus.append(self.prior_mus.copy())\n self.all_prior_sigmas.append(self.prior_sigmas.copy())", "def on_epoch_end(self):\n self.current_params = self.model.posterior_mean(self.params)\n self.current_epoch += 1\n self.parameter_values += [self.current_params]\n self.epochs += [self.current_epoch]", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def __call__(self, trainer, epoch):\n # do not store intermediate iterations\n if epoch >= self.ignore_before and epoch != 0:\n if not self.num_iters == -1:\n\n # counting epochs starts from 1; i.e. +1\n epoch += 1\n # store model recurrently if set\n if epoch % self.num_iters == 0:\n name = self.prepend + \"training_epoch_{}.h5\".format(epoch)\n full_path = os.path.join(self.path, name)\n self.save_model(trainer, full_path)\n\n # store current model if improvement detected\n if self.store_best:\n current_res = 0\n try:\n # check if value can be used directly or not\n if isinstance(self.retain_metric, str):\n current_res = trainer.val_metrics[self.retain_metric][-1]\n else:\n current_res = trainer.val_metrics[self.retain_metric.__name__][-1]\n except KeyError:\n print(\"Couldn't find {} in validation metrics. Using \\\n loss instead.\".format(self.retain_metric))\n current_res = trainer.val_metrics[\"loss\"][-1]\n\n # update\n if self.window is None: # old update style\n if self._has_improved(current_res):\n self.best_res = current_res\n self.best_model = deepcopy(trainer.model.state_dict())\n else: # new update style\n # get validation metrics in certain window\n try:\n if isinstance(self.retain_metric, str):\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n\n window_val_metrics = trainer.val_metrics[self.retain_metric][start:]\n else:\n start = len(trainer.val_metrics[self.retain_metric.__name__]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[self.retain_metric.__name__][start:]\n except KeyError:\n print(\n \"Couldn't find {} in validation metrics. Using \\\n loss instead.\".format(\n self.retain_metric\n )\n )\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[\"loss\"][start:]\n\n # build mean\n mean_window_res = np.mean(window_val_metrics)\n\n # only safe when improvement to previous epoch detected\n # only a value BETTER than before can be the minimum/maximum of a\n # window with better mean than a previously detected window\n if len(window_val_metrics) == 1 \\\n or self._first_val_better(window_val_metrics[-1], window_val_metrics[-2]) \\\n or self._current_window_save_idx == -1:\n if self._current_window_save_idx == -1:\n self._current_window_save_idx = 0\n self._state_dict_storage[self._current_window_save_idx] = deepcopy(trainer.model.state_dict())\n # increase save idx and take modulo\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n else: # only increase current_window_save_idx (for modulo index calculation to work)\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n\n # always update current window best result - it might be at some point overall best result\n current_window_best_idx = self._get_cur_win_best_idx(window_val_metrics)\n if current_window_best_idx == len(window_val_metrics) - 1 \\\n or len(window_val_metrics) == 1: # case of improvement or initialisation\n # overwrite model_state saved so far\n self._current_window_best_model_save_idx = self._current_window_save_idx\n self._current_window_best_epoch = epoch\n self._current_window_best_res = window_val_metrics[-1]\n\n # check if mean has improved and copy values as best model result\n if self._has_window_mean_improved(mean_window_res):\n self.best_mean_res = mean_window_res\n self.best_window_start = 0 if epoch - self.window + 1 < 0 else epoch - self.window + 1\n # save current window best as overall best\n self.best_res = self._current_window_best_res\n self.best_model = copy.deepcopy(self._state_dict_storage[self._current_window_best_model_save_idx])\n if self.info:\n print(\"Found a window with better validation metric mean:\")\n print(\"\\t metric mean: {}\".format(mean_window_res))\n print(\"\\t epoch start: {}\".format(self.best_window_start))\n print(\"\\t best result: {}\".format(self.best_res))", "def _store_results(self, accuracy):\n\n self.learning_curve_.append(accuracy)", "def save_improvement(obj, status):\n if np.isnan(model.parameters[0].get_value().sum()):\n print(\"NaN detected! Not saving the model. Crashing now.\")\n sys.exit()\n\n print(\"*** Best epoch: {0} ***\\n\".format(obj.best_epoch))\n model.save(experiment_path)", "def get_scores(self):\n return self.score", "def save_score_dict(self) -> None:\n self.CACHE.LOAD()\n score_dict = self.CACHE.GET('score_dict')\n with open(os.path.join(self.hyper_experiment_path, 'score_dict.json'), 'w') as f:\n json.dump(score_dict, f)", "def update(self, steps, rewards, i_episode):\n self.total_steps += steps\n self.score = sum(rewards)\n self.scores_window.append(self.score)\n self.scores.append(self.score)\n self.avg_score = np.mean(self.scores_window)\n self.avg_scores.append(self.avg_score)\n self.std_dev = np.std(self.scores_window)\n # update best average score\n if self.avg_score > self.best_avg_score and i_episode > 100:\n self.best_avg_score = self.avg_score", "def save(self, epoch, iteration):\n\n # store the weights\n filename = join(self.savedir, \"epoch{:03d}_iter{:05d}_weights.h5\".format(epoch, iteration))\n self.model.save_weights(filename)", "def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def evaluate_batch(self, batch, stage):\n if stage != sb.Stage.TEST:\n # Same as before\n out = self.compute_forward(batch, stage=stage)\n loss = self.compute_objectives(out, batch, stage=stage)\n out_prob = self.compute_forward(batch, stage=stage)\n out_prob = out_prob.squeeze(1)\n score, index = torch.max(out_prob, dim=-1)\n cm_scores = [out_prob[i].item() for i in range(out_prob.shape[0])]\n self.pd_out['files'] += batch.id\n self.pd_out['scores'] += cm_scores\n return loss.detach().cpu()\n else:\n out_prob = self.compute_forward(batch, stage=stage)\n out_prob = out_prob.squeeze(1)\n score, index = torch.max(out_prob, dim=-1)\n # text_lab = self.hparams.label_encoder.decode_torch(index)\n return out_prob, score, index\n # return out_prob, score, index, text_lab", "def val(self):\n self.set_eval()\n try:\n inputs = self.val_iter.next()\n except StopIteration:\n self.val_iter = iter(self.val_loader)\n inputs = self.val_iter.next()\n\n with torch.no_grad():\n outputs, losses = self.process_batch(inputs)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"val\", inputs, outputs, losses)\n del inputs, outputs, losses\n\n self.set_train()", "def _evaluate_during_fit(self, test_loader, epoch):", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def getScore(data):\n return score", "def val(self):\n self.set_eval()\n try:\n inputs = self.val_iter.next()\n except StopIteration:\n self.val_iter = iter(self.val_loader)\n inputs = self.val_iter.next()\n\n with torch.no_grad():\n outputs, losses = self.process_batch(inputs)\n\n if \"depth_gt_l\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"val\", inputs, outputs, losses)\n del inputs, outputs, losses\n\n self.set_train()", "def score(self, archi:ArchitectureNN):\n archi.fit_model(self.train_data, **self.train_params)\n \n return archi.compute_test_score(self.test_data)", "def scores_(self):\n return self.predictor.scores_", "def score(self, X, y=...):\n ...", "def parameter_optimization(self):\n out = open(self.csv_dir + self.strategy_id + '_gridsearch.csv', \"w\")\n spl = len(self.para_list)\n for i, sp in enumerate(self.para_list):\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self.portfolio.get_statistics()\n tot_profit = float(stats[0][1])\n sharpe = float(stats[1][1])\n max_dd = float(stats[2][1])\n win_rate = float(stats[7][1].replace(\"%\", \"\"))\n profit_factor = float(stats[8][1])\n\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" %\n (sp[\"takeprofit\"], sp[\"period\"], tot_profit, sharpe, max_dd, win_rate, profit_factor)\n )\n out.close()", "def update_score(self, concept: _Concept, result: _Result) -> None:\n\n score = self.make_score(concept, result)\n if score is None:\n pass\n else:\n self._vector.append(score)", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted", "def score(self):\n raise NotImplementedError()" ]
[ "0.62541574", "0.61163104", "0.6092135", "0.59902626", "0.5891033", "0.57781833", "0.5758972", "0.5758972", "0.5757687", "0.57543606", "0.5740902", "0.57180816", "0.5717469", "0.5684043", "0.5682911", "0.5615517", "0.5614505", "0.55993366", "0.5589203", "0.55578935", "0.55561125", "0.5555723", "0.55256414", "0.5511856", "0.5487138", "0.5481439", "0.54788876", "0.54788876", "0.54788876", "0.54788876", "0.54788876", "0.54788876", "0.54667395", "0.5452515", "0.54472643", "0.5445831", "0.5442537", "0.5442428", "0.54311764", "0.5429652", "0.5420918", "0.54191035", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.5388357", "0.53871644", "0.53839445", "0.53765965", "0.5374853", "0.5349595", "0.53473586", "0.53465956", "0.53460675", "0.5340904", "0.5340904", "0.5340904", "0.5334141", "0.53316754", "0.53311026", "0.5326949", "0.5325806", "0.53235775", "0.5313162", "0.52976614", "0.52956927", "0.52934206", "0.52875537", "0.52834135", "0.52811664", "0.52809757" ]
0.5386014
76
Make objective function that gets called by `hyperopt`.
def make_objective(model_path, param_names, param_types, job_path, prop_name, metric, dic_path): param_type_dic = {name: typ for name, typ in zip(param_names, param_types)} def objective(hyperparams): # clean up model folder from previous interation clean_up(model_path=model_path) # Convert hyperparams from float to int when necessary for key, typ in param_type_dic.items(): if typ == "int": hyperparams[key] = int(hyperparams[key]) # print hyperparameters being used val_str = " " + "\n ".join([f"{key}: {val}" for key, val in hyperparams.items()]) fprint(f"Hyperpameters used this round:\n{val_str}") # update config file, run, get the score, and save vals = [hyperparams[key] for key in param_names] update_info(job_path=job_path, vals=vals, param_names=param_names, prop_name=prop_name) # train the model and get the score best_score = run(job_path=job_path, model_path=model_path, metric=metric) # get the hyperparameter score, given that the aim is # to minimize whatever comes out metric_obj = METRIC_DIC[convert_metric(metric)] hyper_score = -best_score if (metric_obj == "maximize") else best_score # save the score save_score(dic_path=dic_path, hyperparams=hyperparams, metric=metric, best_score=best_score) return hyper_score return objective
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def objective_function(x):\n return x * 1 # change this to our actual function", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def objective(self):\n pass", "def _objective_decorator(func):\n def inner(preds, dmatrix):\n \"\"\"internal function\"\"\"\n labels = dmatrix.get_label()\n return func(labels, preds)\n return inner", "def add_objective(self): \n \n if \"CSS\" in self.algorithm:\n \n if self.num_hidden == 0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.add_css_approximation(data_term)\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n normalizer_term = self.compute_energy(self.x_gibbs, \n self.batch_size)\n \n normalizer_term = -T.mean(normalizer_term)\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.compute_free_energy(self.rbm_cd_samples)\n \n normalizer_term = -T.mean(normalizer_term)\n \n # cost is negative log likelihood \n self.cost = T.mean(data_term) + normalizer_term", "def objective(self, args: Dict[str, Any]) -> float:\n pass", "def objective(self, param):\n self.__init__(param, self.data)\n # return self.rmse() + self.penalty()\n return self.rmse() + self.penalty()", "def objective_function(num, x, fe_count, best):\n if num == 1:\n return sphere(x, fe_count, best)\n elif num == 2:\n return rastrigin(x, fe_count, best)\n elif num == 3:\n return rosenbrock(x, fe_count, best)\n elif num == 4:\n return schwefel(x, fe_count, best)\n elif num == 5:\n return quartic(x, fe_count, best)\n elif num == 6:\n return ackley(x, fe_count, best)\n elif num == 7:\n return schaffer(x, fe_count, best)\n elif num == 8:\n return griewank(x, fe_count, best)\n elif num == 9:\n return matyas(x, fe_count, best)\n elif num == 10:\n return trid(x, fe_count, best)\n else:\n pass", "def objective_function(self, y_true, y_predicted, X=None):", "def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float:\n # Paramters (to tune)\n args.embedding_dim = trial.suggest_int(\"embedding_dim\", 128, 512)\n args.num_filters = trial.suggest_int(\"num_filters\", 128, 512)\n args.hidden_dim = trial.suggest_int(\"hidden_dim\", 128, 512)\n args.dropout_p = trial.suggest_uniform(\"dropout_p\", 0.3, 0.8)\n args.lr = trial.suggest_loguniform(\"lr\", 5e-5, 5e-4)\n\n # Train (can move some of these outside for efficiency)\n logger.info(f\"\\nTrial {trial.number}:\")\n logger.info(json.dumps(trial.params, indent=2))\n artifacts = run(args=args, trial=trial)\n\n # Set additional attributes\n args = artifacts[\"args\"]\n performance = artifacts[\"performance\"]\n logger.info(json.dumps(performance[\"overall\"], indent=2))\n trial.set_user_attr(\"threshold\", args.threshold)\n trial.set_user_attr(\"precision\", performance[\"overall\"][\"precision\"])\n trial.set_user_attr(\"recall\", performance[\"overall\"][\"recall\"])\n trial.set_user_attr(\"f1\", performance[\"overall\"][\"f1\"])\n\n return performance[\"overall\"][\"f1\"]", "def custom_method(*args, **kwargs):\n return objective_return", "def custom_method(*args, **kwargs):\n return objective_return", "def set_objective_fn(self, objective_fn):\n self.objective_fn = objective_fn", "def _create_objective(self, meta, m):\n ## cashflow eval\n rule = partial(self._cashflow_rule, meta)\n m.obj = pyo.Objective(rule=rule, sense=pyo.maximize)", "def objective(self, adjacency=None, R=None):\n raise NotImplementedError()", "def optimize(opt, target, n_agents, n_variables, n_iterations, lb, ub, hyperparams):\n\n # Creating the SearchSpace\n space = SearchSpace(n_agents=n_agents, n_variables=n_variables,\n n_iterations=n_iterations, lower_bound=lb, upper_bound=ub)\n\n # Creating the Function\n function = Function(pointer=target)\n\n # Creating Optimizer\n if opt.__name__ is not 'BH':\n optimizer = opt(hyperparams=hyperparams)\n else:\n optimizer = opt()\n\n # Creating the optimization task\n task = Opytimizer(space=space, optimizer=optimizer, function=function)\n\n return task.start(store_best_only=True)", "def objective_func(self, topology, grad_func, tmax, eta):\n f = objective_function_numpy\n x_func = partial(self._optimize_form, topology=topology, tmax=tmax, eta=eta)\n return partial(f, x_func=x_func, grad_func=grad_func)", "def objective(\n self,\n parameters: object\n ) -> float:\n pass", "def objective_function(self, variables, mask, annuity_scalar=1):\n ice_gen = variables['ice_gen']\n self.costs = {'ice_fuel': cvx.sum(cvx.multiply(self.efficiency * self.fuel_cost * self.dt * annuity_scalar, variables['ice_gen'])),\n 'ice_fixed': self.fixed_om * annuity_scalar,\n 'ice_variable': cvx.sum(cvx.multiply(self.vari_om * self.dt * annuity_scalar, ice_gen)),\n 'ice_ccost': self.capital_cost * self.n + self.ccost_kw * self.rated_power * self.n\n }\n\n return self.costs", "def hyperopt_func(model_dict, model_param_names, training_param_names, param_space, datasets, max_evals=30):\n tester = fitness(model_dict, model_param_names, training_param_names, datasets)\n trials = Trials()\n \n timer_start = timer()\n best = fmin(fn=tester.objective, \n space=param_space, \n algo=tpe.suggest, \n max_evals=max_evals, \n trials=trials, \n rstate=np.random.RandomState(50))\n timer_end = timer()\n print('Total training time (min):',(timer_end-timer_start)/60)\n results = sorted(trials.results, key = lambda x: x['loss'])\n return results", "def objective(trial, \n bounds: Optional[Iterable]=None, \n func: Optional[Callable]=None, \n param_names: Optional[List[str]]=None):\n if param_names is None:\n param_names = PARAM_NAMES\n if (bounds is None):\n bounds = ((-10, 10) for _ in param_names)\n if not isinstance(bounds, dict):\n bounds = dict((p, (min(b), max(b))) \n for p, b in zip(param_names, bounds))\n if func is None:\n func = DEFAULT_METRIC_FUNC\n\n params = dict(\n (p, trial.suggest_float(p, bounds.get(p)[0], bounds.get(p)[1])) \n for p in param_names \n )\n # x = trial.suggest_float('x', -10, 10)\n return func((params[p] for p in param_names))", "def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1", "def define_objective(m):\r\n\r\n # Dual objective function\r\n m.OBJECTIVE = Objective(expr=m.DUAL_OBJECTIVE_EXPRESSION, sense=maximize)\r\n\r\n return m", "def _optimize(self, objective):\n # Initial value\n initial = self.get_initial()[0]\n\n if self.vector_to_matrix_transform is not None:\n initial = self.vector_to_matrix_transform(initial)\n\n if self.solver_type is 'NelderMead' or self.solver_type is 'ParticleSwarm':\n initial = None\n\n # Create tensorflow variable\n if self.matrix_manifold_dimension is None:\n x_tf = tf.Variable(tf.zeros(self.dimension, dtype=tf.float64))\n else:\n x_tf = tf.Variable(tf.zeros([self.matrix_manifold_dimension, self.matrix_manifold_dimension], dtype=tf.float64))\n\n # Cost function for pymanopt\n def objective_fct(x):\n if self.matrix_to_vector_transform_tf is not None:\n # Reshape x from matrix to vector form to compute the objective function (tensorflow format)\n x = self.matrix_to_vector_transform_tf(x, self.matrix_manifold_dimension)\n return objective(x)[0]\n\n # Transform the cost function to tensorflow function\n cost = tf.py_function(objective_fct, [x_tf], tf.float64)\n\n # Gradient function for pymanopt\n def objective_grad(x):\n if self.matrix_to_vector_transform is not None:\n # Reshape x from matrix to vector form to compute the gradient\n x = self.matrix_to_vector_transform(x)\n\n # Compute the gradient\n grad = np.array(objective(x)[1])[0]\n\n if self.vector_to_matrix_transform is not None:\n # Reshape the gradient in matrix form for the optimization on the manifold\n grad = self.vector_to_matrix_transform(grad)\n return grad\n\n # Define pymanopt problem\n problem = pyman.Problem(manifold=self.manifold, cost=cost, egrad=objective_grad, arg=x_tf, verbosity=2)\n\n # Optimize the parameters of the problem\n opt_x, opt_log = self.solver.solve(problem, x=initial)\n\n if self.matrix_to_vector_transform is not None:\n # Reshape the optimum from matrix to vector form\n opt_x = self.matrix_to_vector_transform(opt_x)\n\n # Format the result to fit with GPflowOpt\n result = sc_opt.OptimizeResult(x=opt_x, fun=opt_log['final_values']['f(x)'], nit=opt_log['final_values']['iterations'], message=opt_log['stoppingreason'], success=True)\n\n return result", "def _objective(self, params, model_ID, model_dict, X, y, **kwargs):\n model = model_dict['model']\n param_grid = model_dict['param_grid'].copy()\n params = params.copy()\n \n obj_verbose = max(0,self.verbose-2)\n \n type_X = str(type(X))\n \n if 'dask' in type_X:\n X = X.compute()\n y = y.compute()\n \n if obj_verbose>=2:\n print('params',params)\n \n params_transform, model = self._update_model_params(params, \n model_ID,\n model, \n param_grid)\n type_model = str(type(model))\n \n if obj_verbose>=2:\n print('params_transform',params_transform)\n if 'sklearn' in type_model or 'xgboost' in type_model:\n \n cv_scores = _sklearn_model_selection.cross_val_score(model, X, y,\n scoring= self.scoring['metric'],\n cv = self.cv,\n n_jobs= self.n_jobs,\n verbose = obj_verbose\n )\n\n else: #using neural net function\n import tensorflow as _tf\n #check for kwargs\n epochs = 100\n batch_size = 32\n callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]\n for item in kwargs.items():\n if 'epochs' in item[0]: \n epochs = item[1]\n elif 'batch_size' in item[0]: \n batch_size = item[1]\n elif 'callbacks' in item[0]: \n callbacks = item[1] \n cv_scores = _NeuralNet.cross_val_score(model,\n batch_size,\n epochs,\n X, y,\n callbacks,\n scoring = self.scoring['metric'],\n cv = self.cv,\n verbose= obj_verbose)\n \n cv_score = _np.mean(cv_scores)\n \n if 'sklearn' in type_model or 'xgboost' in type_model:\n if self.scoring['maximize']==True or self.scoring['metric']==None:\n cv_score = 1/cv_score \n else:\n if self.scoring['maximize']==True and self.scoring['metric']!=None :\n cv_score = 1/cv_score \n \n objective = {'loss': cv_score,\n 'params': params,\n 'status': _hyperopt.STATUS_OK,\n 'eval_time': _time.time()}\n return objective", "def objective(params):\n param0, param1, param2 = params\n model = Model(param0, param1, param3)\n\n train_loss = train(model)\n val_loss = validate(model)\n\n return val_loss", "def _define_objective(self, inputs, labels, criterion, sources, target_classes, true_classes):\n def closure(model, optimizer, source_grad, source_clean_grad, source_gnorm):\n \"\"\"This function will be evaluated on all GPUs.\"\"\" # noqa: D401\n input_indcs, source_indcs = self._index_mapping(model, inputs, sources)\n\n feature_model, last_layer = bypass_last_layer(model)\n new_inputs = torch.zeros_like(inputs)\n new_sources = torch.zeros_like(sources)\n for i in range(len(input_indcs)):\n new_inputs[i] = inputs[input_indcs[i]]\n new_sources[i] = sources[source_indcs[i]]\n\n outputs = feature_model(new_inputs)\n prediction = (last_layer(outputs).data.argmax(dim=1) == labels).sum()\n outputs_sources = feature_model(new_sources)\n prediction = (last_layer(outputs).data.argmax(dim=1) == labels).sum()\n feature_loss = (outputs - outputs_sources).pow(2).mean(dim=1).sum()\n feature_loss.backward(retain_graph=self.retain)\n return feature_loss.detach().cpu(), prediction.detach().cpu()\n return closure", "def objective(params):\n\t# hyperopt casts as float\n\tparams['num_boost_round'] = int(params['num_boost_round'])\n\tparams['num_leaves'] = int(params['num_leaves'])\n\n\t# need to be passed as parameter\n\tparams['is_unbalance'] = True\n\tparams['verbose'] = -1\n\tparams['seed'] = 1\n\n\tcv_result = lgb.cv(\n\t\tparams,\n\t\tdtrain,\n\t\tnum_boost_round=params['num_boost_round'],\n\t\tmetrics='binary_logloss',\n\t\tnfold=3,\n\t\tearly_stopping_rounds=20,\n\t\tstratified=False)\n\tearly_stop_dict[objective.i] = len(cv_result['binary_logloss-mean'])\n\terror = round(cv_result['binary_logloss-mean'][-1], 4)\n\tobjective.i+=1\n\treturn error", "def objective(self, objective):\n\n self._objective = objective", "def objective(self, params):\n \n model_params = dict()\n training_params = dict()\n for param_name in self.model_param_names:\n model_params[param_name] = params[param_name]\n for param_name in self.training_param_names:\n training_params[param_name] = params[param_name]\n \n copy = self.m['model']\n self.m['model'] = self.m['model'](**model_params)\n self.m.update(training_params)\n \n model = Model(**self.m)\n data = self.datasets[model.data_type]\n\n start = timer()\n res, full_res = CV_fit(model, data, self.datasets)\n run_time = timer()-start\n\n loss = res[0]\n self.m['model'] = copy\n \n return {'loss': loss, 'params': params, 'run_time': run_time, 'status': STATUS_OK}", "def set_obj_fun(self):\n\n # disable button \"Edit Objective Function\"\n # self.ui.pb_edit_obj_func.setEnabled(False)\n a_str = str(self.le_a.text())\n state_a = self.is_le_addr_ok(self.le_a)\n b_str = str(self.le_b.text())\n state_b = self.is_le_addr_ok(self.le_b)\n c_str = str(self.le_c.text())\n state_c = self.is_le_addr_ok(self.le_c)\n func = str(self.le_of.text())\n def get_value_exp():\n A = 0.\n B = 0.\n C = 0.\n if state_a:\n A = self.mi.get_value(a_str)\n if state_b:\n B = self.mi.get_value(b_str)\n if state_c:\n C = self.mi.get_value(c_str)\n if func == \"\":\n return 0\n return eval(func)\n\n self.objective_func = get_value_exp\n\n return self.objective_func", "def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()", "def _objective_cost(self):\n\n def obj_expression_simple(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n return -total\n\n def obj_expression(model):\n total = model.A_total + model.A2_total + model.A3_total + \\\n model.A4_total\n total += model.Completion_total\n total += model.Affinity_cognitive_total\n total += model.CTu_total + model.CTl_total + model.S_total\n return -total\n\n # self.model.exp_cost = Expression(rule=obj_expression)\n # self.model.obj_cost = Objective(rule=self.model.exp_cost)\n # self.model.obj_cost = Objective(rule=obj_expression_simple)\n self.model.obj_cost = Objective(rule=obj_expression)", "def _create_objective(self, vf_coeff, entropy_reg):\n actor, critic, mask = self.model.batch_outputs()\n dist = self.model.action_dist\n log_probs = dist.log_prob(actor, self._actions)\n entropies = dist.entropy(actor)\n critic_error = self._target_vals - critic\n self.actor_loss = -util.masked_mean(mask, log_probs * self._advs)\n self.critic_loss = util.masked_mean(mask, tf.square(critic_error))\n self.entropy = util.masked_mean(mask, entropies)\n self.objective = (entropy_reg * self.entropy - self.actor_loss -\n vf_coeff * self.critic_loss)\n self.explained_var = self._compute_explained_var(mask)", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def _define_objective(self, inputs, labels, targets, intended_classes, true_classes):\n def closure(model, criterion, optimizer, target_grad, target_gnorm):\n \"\"\"This function will be evaluated on all GPUs.\"\"\" # noqa: D401\n outputs = model(inputs)\n if self.args.target_criterion in ['cw', 'carlini-wagner']:\n criterion = cw_loss\n else:\n pass # use the default for untargeted or targeted cross entropy\n poison_loss = criterion(outputs, labels)\n prediction = (outputs.data.argmax(dim=1) == labels).sum()\n poison_grad = torch.autograd.grad(poison_loss, model.parameters(), retain_graph=True, create_graph=True, only_inputs=True)\n\n # add noise to samples\n self._hide_gradient(poison_grad)\n\n # Compute blind passenger loss\n passenger_loss = self._passenger_loss(poison_grad, target_grad, target_gnorm)\n if self.args.centreg != 0:\n passenger_loss = passenger_loss + self.args.centreg * poison_loss\n passenger_loss.backward(retain_graph=self.retain)\n return passenger_loss.detach().cpu(), prediction.detach().cpu()\n return closure", "def addObjective(self, *args):\n return _libsbml.FbcModelPlugin_addObjective(self, *args)", "def buildObjective(self):\r\n\r\n # self.z_prior might be the modified version\r\n self.L_elbo = T.mean(self.reconst + self.conditional_prior + self.w_prior + self.z_prior)\r\n\r\n self.L_elbo_modif = T.mean(self.reconst + self.conditional_prior + self.w_prior_modif + self.z_prior_modif)\r\n\r\n #---Getting model parameter---#\r\n cg = ComputationGraph(self.L_elbo)\r\n #self.phi_theta is the list of all the parameters in q and p.\r\n self.params = VariableFilter(roles=[PARAMETER])(cg.variables)", "def objective_function(params):\n\n\tenergy = 0 # Initialize the energy in 0\n\n\tqc = get_var_form(params) # Obtain a quantum circuit instance from the parameters\n\n\tfor key in pauli_weights.keys(): # Iterate over the pauli string in the Pauli weight\n\n\t\tmc, n_measures = measure_circuit_factory(key) # Obtain the measurement circuit from the Pauli string\n\t\tqc_final = qc.compose(mc) # Combine both circuits\n\n\t\t# Execute the quantum circuit to obtain the probability distribution associated with the current parameters\n\t\tt_qc = transpile(qc_final, backend)\n\t\tq_obj = assemble(t_qc, shots=NUM_SHOTS)\n\t\tcounts = backend.run(q_obj).result().get_counts(qc_final)\n\n\t\tdistribution = get_distribution(counts, n_measures) # Convert the measured counts into a probability vector\n\n\t\t# Weight each probability by the diagonal factor, them sum all of them, and later multiply by the Pauli Weight\n\t\tenergy += np.sum(distribution * generate_diagonal_factors(n_measures)) * pauli_weights[key]\n\n\tenergy_list.append(energy) # Append the new computed energy\n\n\t# Print the iteration of the VQE and the energy\n\tprint('Iteration {}, Energy: {:.4f}'.format(len(energy_list), energy))\n\n\treturn energy", "def _make_objective(ppci, net):\n\n ng = len(ppci[\"gen\"])\n\n # Determine length of gencost array\n if (net.piecewise_linear_cost.type == \"q\").any() or (net.polynomial_cost.type == \"q\").any():\n len_gencost = 2 * ng\n else:\n len_gencost = 1 * ng\n\n # get indices\n eg_idx = net._pd2ppc_lookups[\"ext_grid\"] if \"ext_grid\" in net._pd2ppc_lookups else None\n gen_idx = net._pd2ppc_lookups[\"gen\"] if \"gen\" in net._pd2ppc_lookups else None\n sgen_idx = net._pd2ppc_lookups[\"sgen_controllable\"] if \"sgen_controllable\" in \\\n net._pd2ppc_lookups else None\n load_idx = net._pd2ppc_lookups[\"load_controllable\"] if \"load_controllable\" in \\\n net._pd2ppc_lookups else None\n dc_gens = net.gen.index[(len(net.gen) - len(net.dcline) * 2):]\n from_gens = net.gen.loc[dc_gens[1::2]]\n if gen_idx is not None:\n dcline_idx = gen_idx[from_gens.index]\n\n # calculate size of gencost array\n if len(net.piecewise_linear_cost):\n n_coefficients = net.piecewise_linear_cost.p.values[0].shape[1] * 2\n else:\n n_coefficients = 0\n if len(net.polynomial_cost):\n n_coefficients = max(n_coefficients, net.polynomial_cost.c.values[0].shape[1], 4)\n\n if n_coefficients:\n # initialize array\n ppci[\"gencost\"] = zeros((len_gencost, 4 + n_coefficients), dtype=float)\n ppci[\"gencost\"][:, MODEL:COST + 4] = array([1, 0, 0, 2, 0, 0, 1, 0])\n\n if len(net.piecewise_linear_cost):\n\n for type in [\"p\", \"q\"]:\n if (net.piecewise_linear_cost.type == type).any():\n costs = net.piecewise_linear_cost[net.piecewise_linear_cost.type == type]\n p = concatenate(costs.p)\n f = concatenate(costs.f)\n\n if type == \"q\":\n shift_idx = ng\n else:\n shift_idx = 0\n\n for el in [\"gen\", \"sgen\", \"ext_grid\", \"load\", \"dcline\"]:\n\n if not costs.element[costs.element_type == el].empty:\n if el == \"gen\":\n idx = gen_idx\n if el == \"sgen\":\n idx = sgen_idx\n if el == \"ext_grid\":\n idx = eg_idx\n if el == \"load\":\n idx = load_idx\n if el == \"dcline\":\n idx = dcline_idx\n\n if not costs.element[costs.element_type == el].empty:\n elements = idx[costs.element[costs.element_type ==\n el].values.astype(int)] + shift_idx\n ppci[\"gencost\"][elements, COST::2] = p[\n costs.index[costs.element_type == el]]\n if el in [\"load\", \"dcline\"]:\n ppci[\"gencost\"][elements, COST + 1::2] = - \\\n f[costs.index[costs.element_type == el]] * 1e3\n else:\n ppci[\"gencost\"][elements, COST + 1::2] = f[\n costs.index[costs.element_type == el]] * 1e3\n\n ppci[\"gencost\"][elements, NCOST] = n_coefficients / 2\n ppci[\"gencost\"][elements, MODEL] = 1\n\n if len(net.polynomial_cost):\n\n for type in [\"p\", \"q\"]:\n if (net.polynomial_cost.type == type).any():\n costs = net.polynomial_cost[net.polynomial_cost.type == type]\n c = concatenate(costs.c)\n n_c = c.shape[1]\n c = c * power(1e3, array(range(n_c))[::-1])\n\n if type == \"q\":\n shift_idx = ng\n else:\n shift_idx = 0\n\n for el in [\"gen\", \"sgen\", \"ext_grid\", \"load\", \"dcline\"]:\n\n if not costs.element[costs.element_type == el].empty:\n if el == \"gen\":\n idx = gen_idx\n if el == \"sgen\":\n idx = sgen_idx\n if el == \"ext_grid\":\n idx = eg_idx\n if el == \"load\":\n idx = load_idx\n if el == \"dcline\":\n idx = dcline_idx\n\n elements = idx[costs.element[costs.element_type ==\n el].values.astype(int)] + shift_idx\n if el in [\"load\", \"dcline\"]:\n ppci[\"gencost\"][elements, COST:(COST + n_c):] = - \\\n c[costs.index[costs.element_type == el]]\n else:\n ppci[\"gencost\"][elements, COST:(\n COST + n_c):] = c[costs.index[costs.element_type == el]]\n\n ppci[\"gencost\"][elements, NCOST] = n_c\n ppci[\"gencost\"][elements, MODEL] = 2\n\n else:\n ppci[\"gencost\"] = zeros((len_gencost, 8), dtype=float)\n # initialize as pwl cost - otherwise we will get a user warning from\n # pypower for unspecified costs.\n ppci[\"gencost\"][:, :] = array([1, 0, 0, 2, 0, 0, 1, 1000])\n\n return ppci", "def get_objective(self, X_v, U_v, X_last_p, U_last_p):\n objective = None\n return objective", "def step_and_cost(self, objective_fn, *args, **kwargs):\n x_new = self.step(objective_fn, *args, **kwargs)\n\n return x_new, objective_fn(*args, **kwargs)", "def get_optimization_function(config):\n return get_pulp_optimization_function(build_problem, config)", "def createObjective(self):\n return _libsbml.FbcModelPlugin_createObjective(self)", "def _objective(self, trial, X, y, weights=None, split=None):\n\n # Generate even weights if none\n if weights is None:\n weights = pd.Series(np.ones(len(y)), index=y.index)\n else:\n weights = pd.Series(weights, index=y.index)\n\n # Execute trial function\n try:\n res = eval(self.function)\n except:\n raise RuntimeError(f\"Optuna execution error: {self.function}\")\n\n # If indicator result is tuple, select the one of interest\n if isinstance(res, tuple):\n res = res[self.idx]\n\n # Ensure result is a dataframe with same index as X\n res = pd.DataFrame(res, index=X.index)\n\n # If indicator result is dataframe, select the one of interest\n if len(res.columns) > 1:\n res = pd.DataFrame(res.iloc[:, self.idx])\n\n # y may be a subset of X, so reduce result to y and convert to series\n res_y = res.reindex(y.index).iloc[:, 0].replace([np.inf, -np.inf], np.nan)\n\n # Save all trial results for pruning and reporting\n # Only the best trial will eventually be saved to limit storage requirements\n self.res_y.append(res_y) # Save results\n\n # Indicator result may be all NANs based on parameter set\n # Return FALSE and alert\n if np.isnan(res_y).sum() / len(res_y) > .95: # Most or all NANs\n self.res_y_corr.append(np.zeros(len(y)))\n if split is not None:\n return tuple([False] * (len(split) - 1))\n else:\n return False\n\n # Obtain correlation for entire dataset\n if self.spearman:\n corr = _weighted_spearman(np.array(y), np.array(res_y), np.array(weights))\n else:\n corr = _weighted_pearson(np.array(y), np.array(res_y), np.array(weights))\n\n # Save correlation for res_y\n self.res_y_corr.append(corr)\n\n # Multi-objective optimization\n # Obtain correlation to target for each split for Optuna to maximize\n if split is not None:\n mo = []\n for i, e in enumerate(split):\n if i == 0:\n s = e\n continue\n\n # y could be a subset of X, use index of X to filter y\n idx = X[s:e].index\n\n # Filter y based on X split\n y_se = np.array(y[y.index.isin(idx)]).astype('float64')\n\n # Filter y predictions based on X split\n res_y_se = np.array(res_y[res_y.index.isin(idx)]).astype('float64')\n\n # Filter weights based on X split\n weights_se = np.array(weights[weights.index.isin(idx)]).astype('float64')\n\n if np.isnan(res_y_se).sum() / len(res_y_se) > .95:\n return tuple([False]*(len(split)-1))\n\n if self.spearman:\n mo.append(_weighted_spearman(y_se, res_y_se, weights_se))\n else:\n mo.append(_weighted_pearson(y_se, res_y_se, weights_se))\n s = e\n return tuple(mo)\n\n # Single objective optimization return corr for entire dataset\n else:\n return corr", "def min_scalar(objective, **kwargs):\n result = minimize_scalar(objective, **kwargs)\n return result.fun", "def _define_objective(self, inputs, labels, targets, intended_classes=None, true_classes=None):\n def closure(model, criterion, optimizer, target_grad, target_gnorm):\n \"\"\"This function will be evaluated on all GPUs.\"\"\" # noqa: D401\n outputs = model(inputs)\n if self.args.target_criterion in ['cw', 'carlini-wagner']:\n criterion = cw_loss\n else:\n pass # use the default for untargeted or targeted cross entropy\n poison_loss = criterion(outputs, labels)\n prediction = (outputs.data.argmax(dim=1) == labels).sum()\n poison_grad = torch.autograd.grad(poison_loss, model.parameters(), retain_graph=True, create_graph=True)\n\n passenger_loss = self._passenger_loss(poison_grad, target_grad, target_gnorm)\n if self.args.centreg != 0:\n passenger_loss = passenger_loss + self.args.centreg * poison_loss\n passenger_loss.backward(retain_graph=self.retain)\n return passenger_loss.detach().cpu(), prediction.detach().cpu()\n return closure", "def propose_optimize():\n pass", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def _resolve_objective_function(self) -> Scorer:\n\n objective = self.cfg_.objective\n if objective == 'accuracy':\n return make_scorer(ex.accuracy_score_round_inputs)\n if objective.startswith('precision'):\n if objective.endswith('macro'):\n return make_scorer(ex.precision_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.precision_score_round_inputs,\n average='weighted')\n if objective.startswith('f1'):\n if objective.endswith('macro'):\n return make_scorer(ex.f1_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.f1_score_round_inputs,\n average='weighted')\n elif objective.endswith('least_frequent'):\n return make_scorer(ex.f1_score_least_frequent_round_inputs)\n if objective == 'pearson_r':\n return make_scorer(pearson)\n if objective == 'spearman':\n return make_scorer(spearman)\n if objective == 'kendall_tau':\n return make_scorer(kendall_tau)\n if objective.startswith('uwk'):\n if objective == 'uwk':\n return make_scorer(ex.kappa_round_inputs)\n return make_scorer(ex.kappa_round_inputs,\n allow_off_by_one=True)\n if objective.startswith('lwk'):\n if objective == 'lwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='linear')\n return make_scorer(ex.kappa_round_inputs,\n weights='linear',\n allow_off_by_one=True)\n if objective.startswith('qwk'):\n if objective == 'qwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic')\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic',\n allow_off_by_one=True)\n return objective", "def _update_objective(self):\n # rewrap the cost if the solver has been run\n self.Finalize()\n return", "def objective_function(self, variables, mask, load, generation, annuity_scalar=1):\n size = sum(mask)\n # pay for reg down energy, get paid for reg up energy\n # paid revenue for capacity to do both\n\n p_regu = cvx.Parameter(size, value=self.p_regu.loc[mask].values, name='p_regu')\n p_regd = cvx.Parameter(size, value=self.p_regd.loc[mask].values, name='p_regd')\n p_ene = cvx.Parameter(size, value=self.price.loc[mask].values, name='price')\n\n regup_charge_payment = cvx.sum(variables['regu_c'] * -p_regu) * annuity_scalar\n regup_charge_settlement = cvx.sum(variables['regu_c'] * -p_ene) * self.dt * self.kru_avg * annuity_scalar\n\n regup_disch_payment = cvx.sum(variables['regu_d'] * -p_regu) * annuity_scalar\n regup_disch_settlement = cvx.sum(variables['regu_d'] * -p_ene) * self.dt * self.kru_avg * annuity_scalar\n\n regdown_charge_payment = cvx.sum(variables['regd_c'] * -p_regd) * annuity_scalar\n regdown_charge_settlement = cvx.sum(variables['regd_c'] * p_ene) * self.dt * self.krd_avg * annuity_scalar\n\n regdown_disch_payment = cvx.sum(variables['regd_d'] * -p_regd) * annuity_scalar\n regdown_disch_settlement = cvx.sum(variables['regd_d'] * p_ene) * self.dt * self.krd_avg * annuity_scalar\n\n return {'regup_payment': regup_charge_payment + regup_disch_payment,\n 'regdown_payment': regdown_charge_payment + regdown_disch_payment,\n 'fr_energy_settlement': regup_disch_settlement + regdown_disch_settlement + regup_charge_settlement + regdown_charge_settlement}", "def get_objective(self, sampler=None):\n def objective(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy(circuit, sampler)\n\n def obj_expect(params):\n circuit = self.get_circuit(params)\n circuit.make_cache()\n return self.get_energy_sparse(circuit)\n\n if sampler is not None:\n return objective\n if self.sparse is None:\n self.make_sparse()\n return obj_expect", "def get_objective(\n self,\n fun: bool = True,\n res: bool = True,\n max_sensi_order: int = 2,\n fim_for_hess: bool = False,\n ):\n if fim_for_hess:\n fhess = self.get_ffim()\n else:\n fhess = self.get_fs2nllh()\n\n return pypesto.Objective(\n fun=self.get_fnllh() if fun else None,\n grad=self.get_fsnllh() if fun and max_sensi_order >= 1 else None,\n hess=fhess if fun and max_sensi_order >= 2 else None,\n res=self.get_fres() if res else None,\n sres=self.get_fsres() if res and max_sensi_order >= 1 else None,\n )", "def _make_train(self):\n with context.context(training=True):\n prediction = self(*self.inputs)\n thecost = self.cost(self.target, prediction)\n return theano.function(self.inputs + [self.target], \n thecost, \n updates=self.updater.get_updates(self.params(), thecost))", "def rpt_objective(opt_objective):\n if opt_objective == \"lgst\": return \"logl\"\n else: return opt_objective", "def add_objective(self, objective):\n self.objectives.append(objective)", "def _make_train_function(self):\n if self.train_function is None:\n print('compiling train function...')\n start = time.time()\n inputs = self._estimator.inputs + [self.T_Y]\n\n training_updates = self.optimizer.get_updates(\n self._estimator.trainable_weights,\n {}, self.fqi_loss)\n\n # returns loss and metrics. Updates weights at each call.\n self.train_function = theano.function(inputs, [self.fqi_loss],\n updates=training_updates,\n name=\"trainer\")\n print('compiled in {}s'.format(time.time() - start))", "def __repr__(self):\n return ('ObjectiveFunction({}, {})').format(self.func.__name__, self.objective)", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def __init__(self, objective_function, constraint, dumper=None):\n self.total_iterations = 0\n self.maximum_iterations = 3000\n self.precision = np.sqrt(np.finfo(float).eps)\n self.constraint = constraint\n self.objective_fun = objective_function\n if dumper is None:\n self.use_dumper = False\n else:\n self.use_dumper = True\n self.dumper = dumper\n\n # Used to let the all parts of the solver be aware of the active constraints\n self.active_constraints_index = 0\n self.active_constraints_set = False\n\n # Used for exit information\n self.convergence_reached_tag = 1\n self.maximum_iterations_reached_tag = 2\n self.unknown_exit = 99\n return", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def _objective_function(self, thetas, X, Y):\n \n # Convert thetas vector to form total_cost can understand\n thetas = self.reshape_thetas(thetas, 'list')\n self.thetas = thetas\n \n # Get cost function value\n fval = self.total_cost(X, Y, thetas)\n \n # Get derivatives using back propagation\n Deltas = self.get_gradients(X, Y)\n dfval = self.reshape_thetas(Deltas, 'vector')\n \n return fval, dfval", "def get_Objective(self):\r\n \r\n ans = 0\r\n for i in range(len(self.X.shape)):\r\n ans += self.parameter['phi'][i] * self.parameter['lambda1'] *norm(tl.unfold(self.Wlist[i], i), 'nuc') + (1 / self.parameter['m']) * norm(tl.unfold(self.X - self.Wlist[i] - self.V, i))\r\n\r\n # Augmented part is calculated seperately. \r\n augment_part1 = 0.5 * self.parameter['rho1'] * norm(self.V - self.T + self.F1)\r\n augment_part2 = 0.5 * self.parameter['rho2'] * norm(tl.fold(np.dot(self.Dmatrix, tl.unfold(self.T, 0)), 0, self.T.shape) - self.S + self.F2)\r\n\r\n # Combine the result for final objective function\r\n ans += self.parameter['beta1'] * norm(self.V.reshape(self.totaldim), 1) + self.parameter['beta2'] * norm(self.S.reshape(self.totaldim), 1) + augment_part1 + augment_part2 \r\n return ans", "def __init__(self, objectiveFcn, gradientFcn, stepFcn):\n self.objective = objectiveFcn\n self.gradient = gradientFcn\n self.createStep = stepFcn", "def __init__(self, cost_func):\n super().__init__(cost_func)\n\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def addObjective(self, *args):\n return _libsbml.ListOfObjectives_addObjective(self, *args)", "def createObjective(self):\n return _libsbml.ListOfObjectives_createObjective(self)", "def __init__(self,func ,domain_space, max_evals = 10):\n self.func = func\n # optimizing for FLOAT values\n #self.space = hp.uniform('x', 36, 200)\n # optimizing for Integer values\n self.space = domain_space\n self.algorithm = tpe.suggest # creating algorithm\n self.trials = Trials() # to check records\n self.max_evals = max_evals", "def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik", "def slo(self, objective=99.99):\n self.objective = objective\n return objective", "def make_objective(name, data, pset, unconfined_objective=None, bset=None,\n num_points=101, withtail=True):\n h2o = SLD(-0.54, 'h2o')\n melCM = SLD(2.60, 'Melinex CM soln')\n melinex = SLD(2.56, 'Melinex') \n sio2 = SLD(3.47, 'sio2')\n si = SLD(2.07, 'si')\n polymer = SLD(0.85, 'PNIPAM')\n\n # Make sure nothing in our (already fitted) unconfined objective varys.\n\n\n si_l = si(0, 0)\n\n # Silica\n sio2_l = sio2(14.6, 2)\n sio2_l.vfsolv.setp (value=0.003)\n\n polymer_l_mel = area_slabT(adsorbed_amount=200, dry_sld=polymer, rough=2,\n thick=1500, name='polymer')\n\n mellinex_l = melinex(0,14)\n mellinex_l.rough.setp (vary=False, bounds=(1, 20))\n\n h2o_l = h2o(0,10)\n\n structure_mel = si_l | sio2_l | polymer_l_mel | mellinex_l\n\n if unconfined_objective is not None:\n for vp in unconfined_objective.varying_parameters().flattened():\n vp.setp(vary=False)\n unconfined_objective.model.structure[3].adsorbed_amount = polymer_l_mel.adsorbed_amount\n structure_h2o = si_l | sio2_l | unconfined_objective.model.structure[2] | unconfined_objective.model.structure[3] | h2o_l\n else:\n structure_h2o = si_l | h2o_l\n\n\n structure_mel.solvent = h2o\n structure_h2o.solvent = h2o\n\n if unconfined_objective is not None:\n structure_h2o.contract = 1.5\n\n distmodel = DistributionModel(structure_mel, loc_in_struct=2,\n num_structs=num_points, pdf=dist_pdf,\n pdf_kwargs={'loc':1, 'scale':1, 'a':1, 'tail':0.00, 'tail_len':400})\n distmodel.pdf_params[0].setp(value=210)\n distmodel.pdf_params[1].setp(value=6)\n distmodel.pdf_params[2].setp(value=3)\n\n if withtail:\n distmodel.pdf_params[3].setp(value=0.0001)\n distmodel.pdf_params[4].setp(value=400)\n else:\n distmodel.pdf_params[3].setp(value=0, vary=False)\n distmodel.pdf_params[4].setp(value=0, vary=False) \n\n h2omodel = ReflectModel(structure_h2o, name='h2o')\n\n\n sratio = Parameter(value=0.1, name='scale ratio')\n\n model = MetaModel([distmodel, h2omodel], [0.5, 0.5], add_params=[sratio])\n model.scales[0].setp(value=0.97)\n model.scales[1] = model.scales[0]*sratio\n model.bkg.setp(value=0)\n\n obj = Objective(model, data, name=name)\n for key in pset:\n set_param(obj.parameters, key, value=pset[key])\n\n if bset is not None:\n for key in bset:\n set_param(obj.parameters, key, bounds=bset[key])\n\n return obj", "def __init__(self, method=None, objective=None):\n self._FUNC_DICT = {'spring': self.spring, 'mi_spring': self.mi_spring, \n 'welded_beam': self.welded_beam, \n 'pressure_vessel': self.pressure_vessel, \n 'mi_pressure_vessel': self.mi_pressure_vessel, \n 'speed_reducer': self.speed_reducer, \n 'mi_chemical_process': self.mi_chemical_process, \n 'ackley': self.ackley, \n 'shifted_ackley': self.shifted_ackley, \n 'dejong': self.dejong, \n 'shifted_dejong': self.shifted_dejong, \n 'easom': self.easom, \n 'shifted_easom': self.shifted_easom, \n 'griewank': self.griewank, \n 'shifted_griewank': self.shifted_griewank, \n 'rastrigin': self.rastrigin, \n 'shifted_rastrigin': self.shifted_rastrigin, \n 'rosenbrock': self.rosenbrock, \n 'shifted_rosenbrock': self.shifted_rosenbrock, \n 'tsp': self.tsp}\n if method != None and type(method) == str:\n self.set_obj_func(method)\n else:\n self.func = method\n self.objective = objective\n return", "def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step", "def objective_function(self, parameters, train_y, validate_y, train_x=None, validate_x=None, random_seed=None):\n arguments = self.construct_arguments(self.range_bounds)\n\n # Build network \n esn = self.model(**arguments, activation_f = self.activation_function,\n plot = False, model_type = self.model_type,\n input_weight_type = self.input_weight_type, already_normalized = already_normalized)\n #random_seed = self.random_seed) Distance_matrix = self.Distance_matrix)\n #bs_idx = self.obs_index, resp_idx = self.target_index, \n\n # Train\n esn.fit(x=train_x, y=train_y, burn_in=self.esn_burn_in)\n\n # Validation score\n score = esn.test2(x=validate_x, y=validate_y, scoring_method=self.scoring_method, \n steps_ahead=self.steps_ahead, alpha=self.alpha)\n\n return score", "def costFun(self, S, x):", "def _construct_train_joint(self):\n # setup some symbolic variables for theano to deal with\n xi = T.matrix()\n xo = T.matrix()\n br = T.lscalar()\n # collect the outputs to return from this function\n outputs = [self.joint_cost, self.nll_cost, self.kld_cost, \\\n self.reg_cost, self.obs_costs]\n # compile the theano function\n _, hi_zmuv = self._construct_zmuv_samples(xi, br)\n func = theano.function(inputs=[ xi, xo, br ], \\\n outputs=outputs, \\\n givens={ self.x_in: xi.repeat(br, axis=0), \\\n self.x_out: xo.repeat(br, axis=0), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.joint_updates)\n return func", "def objective_function(self, x):\n self._set_params_transformed(x)\n return -self.log_likelihood() - self.log_prior()", "def objective(state, config, reporter):\n # Unpack config. Log parameters.\n lr = config[\"lr\"]\n decay = config[\"decay\"]\n print(\"LEARNING_RATE={}\\nDECAY={}\"\n \"\".format(lr, decay))\n\n # Build necessary grape arguments using parameters.\n U = state.unitary\n convergence = {'rate': lr,\n 'max_iterations': GRAPE_MAX_ITERATIONS,\n 'learning_rate_decay': decay}\n pulse_time = state.pulse_time\n steps = int(pulse_time * SPN)\n \n # Run grape.\n grape_start_time = time.time()\n print(\"GRAPE_START_TIME={}\".format(grape_start_time))\n grape_sess = Grape(U=U, total_time=pulse_time, steps=steps,\n convergence=convergence, file_name=state.file_name,\n data_path=state.data_path, **state.grape_config)\n grape_end_time = time.time()\n print(\"GRAPE_END_TIME={}\".format(grape_end_time))\n\n \n # Log results.\n loss = grape_sess.l\n print(\"LOSS={}\".format(loss))\n trial = {\n 'lr': lr,\n 'decay': decay,\n 'loss': loss,\n 'wall_run_time': grape_end_time - grape_start_time,\n }\n trial_file = state.file_name + \".json\"\n trial_file_path = os.path.join(state.data_path, trial_file)\n with open(trial_file_path, \"a+\") as trial_file:\n trial_file.write(json.dumps(trial, cls=CustomJSONEncoder)\n + \"\\n\")\n \n # Report results.\n reporter(neg_loss=-loss, done=True)", "def _objective(self,output, expected,device):\r\n #\r\n #output = output.to(device = expected.device) #\r\n \r\n output = torch.where(output == 0,self.saf,output)\r\n #output = torch.where(output == 1,torch.tensor([0.999999],device = device),output)\r\n out1 = torch.mul(-expected,torch.log(output))\r\n #out2 = torch.mul(expected-1,torch.log(1 - output))\r\n #out = torch.add(out1,out2)\r\n return torch.sum(out1, dim = 1)", "def objective(self):\n return self._objective", "def _optfun(theta):\n return optfun(theta)[:2]", "def _optfn(self, x):\n\n logger.debug(\" optfn(theta=%s)\", str(x))\n\n wmx = max(self.weights) * self.weighttrunc\n\n ip = []\n for i,w in enumerate(self.weights):\n if w < wmx:\n continue\n ip.append((i,w,x))\n\n if self.pool is None:\n itr = map(self.worker.loglik_grad, ip)\n else:\n itr = self.pool.imap_unordered(_pool_loglik_grad, ip, 10)\n\n if self._prior_shape is None:\n ll = 0.\n grad = np.zeros(len(x))\n else:\n ll = sum(sp.special.xlogy(self._prior_shape-1,x)-(x/self._prior_scale))\n grad = (self._prior_shape - 1)/x - 1/self._prior_scale\n\n for l,g in itr:\n ll += l\n grad += g\n\n logger.debug(\" optfn=%g\", ll)\n\n return -ll, -grad", "def create_cost_function(name, *args):\n module = import_module(\"torch.nn\")\n clazz = getattr(module, name)\n instance = clazz(*args)\n\n return instance", "def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta", "def get_scipy_minimizer(**kwargs):\n def minimizer(objective, n_params):\n params = [random.random() for _ in range(n_params)]\n result = scipy_minimizer(objective, params, **kwargs)\n return result.x\n\n return minimizer", "def __init__(self, cost_func):\n super(MinuitController, self).__init__(cost_func)\n self._popt = None\n self._initial_step = None\n self._minuit_problem = None\n self.algorithm_check = {\n 'all': ['minuit'],\n 'ls': [None],\n 'deriv_free': [None],\n 'general': ['minuit']}", "def vjp(func, x, backend='autograd'):\n if backend == 'autograd':\n return ag.make_vjp(func, x)\n elif backend == 'pytorch':\n raise NotImplementedError('VJP for Pytorch backend is not implemented yet.')", "def __init__(self, optimizer='BFGS', optimizer_kwargs=None,\n lossprime=True, max_iterations = 1000000):\n\n user_kwargs = optimizer_kwargs\n optimizer_kwargs = {}\n print(f\"in {optimizer}: max_iterations = {max_iterations}\")\n if optimizer == 'BFGS':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method' : 'BFGS',\n 'options': {'gtol': 1e-15,\n 'maxiter': max_iterations}\n }\n #optimizer_kwargs = {'method':'BFGS', 'gtol': 1e-15, }\n elif optimizer == 'L-BFGS-B':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'L-BFGS-B',\n 'options': {'ftol': 1e-05,\n 'gtol': 1e-08,\n 'maxfun': max_iterations,\n 'maxiter': max_iterations}\n }\n import scipy\n from distutils.version import StrictVersion\n if StrictVersion(scipy.__version__) >= StrictVersion('0.17.0'):\n optimizer_kwargs['options']['maxls'] = 2000\n elif optimizer == 'TNC':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'TNC',\n 'options': {'ftol': 0.,\n 'xtol': 0.,\n 'gtol': 1e-08,\n 'maxiter': max_iterations, }\n }\n elif optimizer == 'Newton-CG':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Newton-CG',\n 'options': {'xtol': 1e-15,\n 'maxiter': max_iterations,}\n }\n\n elif optimizer == 'Nelder-Mead':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Nelder-Mead',\n 'options': {'maxfun': max_iterations,\n 'maxiter': max_iterations, }\n }\n lossprime = False\n\n if user_kwargs:\n optimizer_kwargs.update(user_kwargs)\n self.optimizer = optimizer\n self.optimizer_kwargs = optimizer_kwargs\n self.lossprime = lossprime", "def proximal(self):\n functional = self\n\n class EntRegOptTransProximal(Operator):\n\n \"\"\"Proximal operator of entropy regularized optimal transport.\n\n The prox is given by::\n\n prox_[gamma*T_eps](mu1) = arg min_x (T_epsilon(mu0, x) +\n 1/(2*gamma) ||x - mu1||^2_2)\n \"\"\"\n\n def __init__(self, sigma):\n \"\"\"Initialize a new instance.\n\n Parameters\n ----------\n sigma : positive float\n \"\"\"\n self.sigma = float(sigma)\n super().__init__(domain=functional.domain,\n range=functional.domain, linear=False)\n\n # Setting up parameters\n self.const = 1 / (functional.epsilon * sigma)\n\n def _call(self, x):\n \"\"\"Apply the operator to ``x``.\"\"\"\n u = functional.tmp_u_prox\n v = functional.tmp_v_prox\n\n # Running generalized Sinkhorn iterations\n for j in range(functional.niter):\n # Safe-guarded u-update, to avoid divide-by-zero error.\n u_old = u.copy()\n tmp1 = functional.K_op(v)\n if np.min(tmp1) < 1e-30 or np.max(tmp1) > 1e+50:\n print('Numerical instability, truncation in Transport prox (Kv)',\n str(np.min(tmp1)), str(np.max(tmp1)))\n\n tmp = np.fmax(tmp1, 1e-30)\n\n\n u = functional.mu0 / tmp\n if np.min(u) < 1e-30 or np.max(u) > 1e+50:\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n # Safe-guarded v-update, to avoid divide-by-zero error.\n v_old = v.copy()\n\n tmp3 = functional.K_op_adjoint(u)\n if np.min(tmp3) < 1e-30 or np.max(tmp3) > 1e+50:\n print('Truncation in Transport prox (KTu)',\n str(np.min(tmp3)), str(np.max(tmp3)))\n print('u (min/max)', str(np.min(u)), str(np.max(u)))\n\n tmp4 = (self.const * tmp3 * np.exp(self.const * x))\n\n if np.min(tmp4) < 1e-30 or np.max(tmp4) > 1e+200:\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n v = np.exp(self.const * x - lambertw_fulfix(tmp4))\n\n v1 = np.exp(self.const * x - scipy.special.lambertw(\n tmp4))\n if (v-v1).norm() > 1e-10:\n print('diff pga ny lambderw omega funciton',\n str((v-v1).norm()))\n print('v (min/max)', str(np.min(v)), str(np.max(v)))\n print('Argument in lambdert omega (min/max)',\n str(np.min(tmp4)), str(np.max(tmp4)))\n\n # If the updates in both u and v are small, break the loop\n if ((np.log(v)-np.log(v_old)).norm() < 1e-8 and\n (np.log(u)-np.log(u_old)).norm() < 1e-8):\n break\n\n # Store the u and v in the internal temporary variables of the\n # functional\n functional.tmp_u_prox = u\n functional.tmp_v_prox = v\n\n return x - self.sigma * functional.epsilon * np.log(v)\n\n return EntRegOptTransProximal", "def _optimize(self, objective):\n points = self._get_eval_points()\n\n if self.matrix_to_vector_transform is not None:\n # Transform the sampled matrix points in vectors\n points = np.array([self.matrix_to_vector_transform(points[i]) for i in range(self._nb_samples)])\n\n evaluations = objective(points)\n idx_best = np.argmin(evaluations, axis=0)\n\n return sc_opt.OptimizeResult(x=points[idx_best, :], success=True, fun=evaluations[idx_best, :],\n nfev=points.shape[0], message=\"OK\")", "def _fitness_model__(self, solution=None, minmax=0):\n return self.objective_func(solution) if minmax == 0 else 1.0 / (self.objective_func(solution) + self.EPSILON)", "def objective(trial, feature_matrix, x_train, y_train, x_val, y_val, config, data_path):\n\n # Init params for trial\n params = suggest_parameters(trial, config)\n # Calculate avg_val_recall\n clf, avg_val_recall, _ = train_on_one_set(feature_matrix, x_train, y_train, x_val, y_val, config, params)\n # Save a trained model to a file.\n with open(data_path + 'interim/trial_{}.pickle'.format(trial.number), 'wb') as f:\n pickle.dump(clf, f)\n return avg_val_recall", "def add_objective(self, objective_type, **kwargs):\n if objective_type != \"custom\":\n self.objective_args = tuple(kwargs.values())\n self.objective = self.obj_creator.create_objective(objective_type, **kwargs)\n else:\n self.objective_args = tuple(kwargs.values())[1:]\n self.objective = tuple(kwargs.values())[0]", "def setup(self):\n declared = []\n for obj in Rt.objective:\n var_list = split(\"[+*/-]\", obj)\n for v in var_list:\n if v not in declared:\n self.add_input(v)\n declared.append(v)\n self.add_output(\"Objective function \" + obj)", "def function(fnc, *args, **kwargs):\n return Function(fnc, args=args, kwargs=kwargs).tunable()", "def get_objective(objective_terms, recipe):\n\n # <objective_terms>, \"stages__*__objective\"\n if isinstance(recipe, dict):\n return WeightedObjective(recipe, **objective_terms)\n\n if isinstance(recipe, str):\n return ExpressionObjective(recipe, **objective_terms)" ]
[ "0.7482436", "0.73444074", "0.7161457", "0.7161457", "0.6737644", "0.67088556", "0.6661867", "0.66453636", "0.6605384", "0.6507975", "0.6357", "0.6254179", "0.62437993", "0.62437993", "0.62322885", "0.6193143", "0.61185783", "0.6108092", "0.6072014", "0.6065767", "0.60194516", "0.59768355", "0.5975299", "0.5965762", "0.5937847", "0.593706", "0.5919299", "0.5902472", "0.58990926", "0.5859941", "0.5856501", "0.5855134", "0.5839152", "0.58290786", "0.5822685", "0.5820092", "0.58162916", "0.580262", "0.58001924", "0.5798556", "0.5736414", "0.5731427", "0.5685257", "0.5682856", "0.56800765", "0.56567764", "0.5655665", "0.5649047", "0.56453633", "0.56377167", "0.56192285", "0.5594158", "0.5589585", "0.55876094", "0.55732244", "0.5571356", "0.55696994", "0.5557222", "0.5524564", "0.5514595", "0.55142754", "0.55087453", "0.5504739", "0.54871273", "0.54676574", "0.5462792", "0.54616266", "0.5449569", "0.544222", "0.5439865", "0.54247075", "0.5417645", "0.54174", "0.54169333", "0.54112685", "0.5407651", "0.53767854", "0.5367121", "0.5356195", "0.535602", "0.5349816", "0.5338374", "0.53315103", "0.5327551", "0.5320623", "0.53192264", "0.5313125", "0.5311828", "0.52981347", "0.5296235", "0.52876145", "0.52830696", "0.5273434", "0.5270668", "0.5267222", "0.52666575", "0.52642655", "0.52642137", "0.5263183", "0.5259224" ]
0.65957934
9
Save the best parameters from the optimization.
def save_best(dic_path, metric, model_path): # load the scores with open(dic_path, "r") as f: score_list = json.load(f) # get the best parameters objective = METRIC_DIC[convert_metric(metric)] pref = 1 if (objective == "minimize") else (-1) hyper_scores = [pref * score_dic[metric] for score_dic in score_list] best_params = score_list[np.argmin(hyper_scores)] # print the best parameters save_path = os.path.join(model_path, "best_params.json") best_str = "\n ".join([f"{key}: {val}" for key, val in best_params.items()]) fprint(f"Best parameters are {best_str}") fprint(f"Saving to {save_path}") # save them with open(save_path, "w") as f: json.dump(best_params, f, indent=4, sort_keys=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_optimal_parameters(self):\n # Getting the best trial based on the test errors\n idx = self.trial_losses.index(min(self.trial_losses))\n self.best_trial = self.trial_list[idx]\n self.objective.parse_trial(self.best_trial)", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def save_improvement(obj, status):\n if np.isnan(model.parameters[0].get_value().sum()):\n print(\"NaN detected! Not saving the model. Crashing now.\")\n sys.exit()\n\n print(\"*** Best epoch: {0} ***\\n\".format(obj.best_epoch))\n model.save(experiment_path)", "def _save_trained_params(self):\n self.trained_model_params = self.sess_train.run([self.ent_emb, self.rel_emb])", "def save_best_params(output_dir, best_params, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir,\n 'params',\n '.pkl',\n gene,\n training_data,\n model_options.model,\n predictor,\n s=model_options.seed)\n\n with open(output_file, 'wb') as f:\n pkl.dump(best_params, f)", "def remember_state(self):\n self._best_params = deepcopy(self._param_store.get_state())", "def saveBestState(self, value, epoch, it):\n \n self.monitor.saveModel(self.agent)\n self.bestState = deepcopy(self.agent.state_dict())\n self.best_value = value\n self.last_save = epoch\n self.last_iter = it\n logger.info(f\"Model saved at epoch {epoch}\")", "def save_pruning_parameters(self, model):\n if (len(self.prune_parameters) == 0):\n self.set_pruning_parameters(model)\n self.parameters_snapshot = []\n for name, m in self.parameters:\n copy_weights = m.weight.data.copy()\n self.parameters_snapshot.append((name, copy_weights))", "def find_best_params(self, n_trials=120):\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Entered find_best_params method of HyperparametersTuner class.\",\r\n )\r\n try:\r\n optimization_function = partial(self.optimize)\r\n study = optuna.create_study(direction=\"maximize\")\r\n study.optimize(optimization_function, n_trials=n_trials)\r\n self.logger_object.log(\r\n self.file_object, f\"Successfully ran {n_trials} optuna study trials.\"\r\n )\r\n\r\n self.tuned_hyperparams[\"LGBM Regression\"].append(study.best_params)\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Successfully appended best model parameters as a dictionary.\",\r\n )\r\n\r\n with (open(str(Config.TUNED_HYPERPARAMS_FILE_PATH), \"w\")) as outfile:\r\n json.dump(self.tuned_hyperparams[\"LGBM Regression\"], outfile, indent=1)\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Successfully dumped the best parameters in best_params.json .\",\r\n )\r\n except Exception as e:\r\n self.logger_object.log(\r\n self.file_object,\r\n f\"Exception occured in find_best_params method of HyperparametersTuner class. Exception message: {e}\",\r\n )\r\n self.logger_object.log(\r\n self.file_object,\r\n \"Dumping best parameters unsuccessful. Exited find_best_params method of HyperparametersTuner class\",\r\n )\r\n raise Exception()", "def parameter_optimization(self):\n out = open(self.csv_dir + self.strategy_id + '_gridsearch.csv', \"w\")\n spl = len(self.para_list)\n for i, sp in enumerate(self.para_list):\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self.portfolio.get_statistics()\n tot_profit = float(stats[0][1])\n sharpe = float(stats[1][1])\n max_dd = float(stats[2][1])\n win_rate = float(stats[7][1].replace(\"%\", \"\"))\n profit_factor = float(stats[8][1])\n\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" %\n (sp[\"takeprofit\"], sp[\"period\"], tot_profit, sharpe, max_dd, win_rate, profit_factor)\n )\n out.close()", "def save_model_params(self):\n params_dict = self.get_model_params()\n if self.params_filepath is not None:\n file_params = data_functions.load_json(self.params_filepath)\n if file_params != params_dict: # cheking if the parametes for this\n # session are diffrent then those\n # in the source file\n self.session_number += 1\n\n curr_file_name = (\n self.params_file_name + PARAMS_UPDATE_FORMAT + 'json').format(\n sess=self.session_number,\n steps=self.samples_seen)\n\n data_functions.save_json(params_dict, curr_file_name, self.curr_folder)\n self.params_filepath = os.path.join(self.curr_folder, curr_file_name)", "def parameter_optimize(self, estimator, parameters, X_test, y_test):\n cv = cross_validation.ShuffleSplit(self.X.shape[0], n_iter=100,\n test_size=0.3, random_state=42)\n clf = grid_search.GridSearchCV(estimator, parameters[1], cv = cv, n_jobs =4)\n t1 = time.time()\n clf.fit(self.X, self.y)\n print \"The optimize parameters for %s is: %s\"%(parameters[0], clf.best_params_)\n y_pred = clf.predict(X_test)\n t2 = time.time()\n print \"The running time for %s is: %f sec\"%(parameters[0], t2 - t1)\n score = metrics.accuracy_score(y_test, y_pred)\n print \"The accuracy score for %s is: %f\"%(parameters[0], score), \"\\n\"\n return {\"%s\"%parameters[0]: {\"estimator_parameters\": clf.best_params_, \n \"running_time\": t2-t1, \"accuracy_score\": score}}", "def update(self, globalBest: list):\n try:\n vNext: list = []\n xNext: list = []\n\n for i in range(self.dimension):\n r1: float = random.uniform(0, 1)\n r2: float = random.uniform(0, 1)\n\n vNext.append(\n self.inertia * self.v[i]\n + self.aCognitive * (self.bestPosition[i] - self.x[i]) * r1\n + self.aSocial * (globalBest[i] - self.x[i]) * r2\n )\n xNext.append(self.x[i] + vNext[i])\n\n self.x: list = xNext\n self.v: list = vNext\n\n if self.dataset is not None:\n currentFitness: float = self.func(*self.x, self.dataset)\n else:\n currentFitness: float = self.func(*self.x)\n\n if currentFitness <= self.bestValue:\n self.bestValue: float = currentFitness\n self.bestPosition: list = self.x\n\n # DEBUG\n self.coordinatesX.append(self.bestPosition[0])\n self.coordinatesY.append(self.bestPosition[1])\n self.coordinatesZ.append(self.bestValue)\n\n except IndexError:\n print(\n \"WARN: Dimensions of global best must match amount of parameters to be optimized.\"\n )\n raise IndexError", "def optimize_parameters(self):\r\n # forward\r\n self.forward() # compute fake image/video and reconstruction image/video\r\n\r\n # D_A\r\n self.set_requires_grad([self.D_V], True)\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], False)\r\n self.optimizer_D.zero_grad() # set D_V's gradients to zero\r\n self.backward_D_V() # calculate graidents for D_V\r\n self.optimizer_D.step() # update D_A's weights\r\n\r\n # G_A and G_B\r\n self.set_requires_grad([self.D_V], False) # Ds require no gradients when optimizing Gs\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], True)\r\n self.optimizer_G.zero_grad() # set G_t,G_u,Att,classifier's gradients to zero\r\n self.backward_G() # calculate gradients for G_A and G_B\r\n self.optimizer_G.step() # update G_A and G_B's weights\r", "def getOptimalParams(self):\n\t\t# Load calibration chain and find optimal for like1\n\t\tcal_data = pd.read_csv(self.database_path, sep=',')\n\t\tparams = cal_data.ix[cal_data['like1'].idxmax()].to_dict()\n\t\tcost = params['like1']\n\t\t# reformat parameters to match original naming\n\t\tparams_reformatted = {}\n\t\tfor k, p in self.cal_params.items():\n\t\t\tparams_reformatted[k] = params['par'+k]\n\n\t\treturn params_reformatted, cost", "def save_model_params(self, full_path):\n \n file_to_save = file(full_path, 'wb')\n \n print(\"Saving model parameters to %s\"%full_path)\n \n cPickle.dump(self.theta, \n file_to_save, \n protocol=cPickle.HIGHEST_PROTOCOL)\n \n file_to_save.close()", "def best_bat(self):\n\n i = 0\n j = 0\n for i in range(self.NP):\n if self.Fitness[i] < self.Fitness[j]:\n j = i\n for i in range(self.D):\n self.best[i] = self.Sol[j][i]\n self.f_min = self.Fitness[j]", "def tune_parameters(self, model, param_set, train, predictor_var, target_var):\n \n grid_search = GridSearchCV(estimator = model, param_grid = param_set,n_jobs=-1, cv=5)\n grid_search.fit(train[predictor_var],train[target_var])\n \n print(grid_search.best_params_, grid_search.best_score_)\n \n return grid_search.best_params_", "def save_parameters(self):\n self.read_parameters()\n group = NXprocess()\n group['model'] = self.composite_model\n group['data'] = self.data\n for m in self.models:\n group[m['name']] = self.get_model(m['model'])\n parameters = NXparameters(attrs={'model': m['class']})\n for n, p in m['parameters'].items():\n n = n.replace(m['model'].prefix, '')\n parameters[n] = NXfield(p.value, error=p.stderr,\n initial_value=p.init_value,\n min=str(p.min), max=str(p.max),\n vary=p.vary, expr=p.expr)\n group[m['name']].insert(parameters)\n group['title'] = 'Fit Model'\n group['model'] = self.get_model()\n self.write_group(group)", "def best_params(self):\n return self.X[np.argmax(self.y.numpy())]", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def save_best(self, sess, score):\n if score > self.best_score:\n self.best_score = score\n path_prefix = self.saver.save(sess, self.config.save_path,\n self.global_step)\n self.best_model_path = path_prefix\n return path_prefix\n return \"Skip saving\"", "def save_h5(self, filename):\n try:\n shutil.copyfile(filename, '{0}_bak'.format(filename))\n except IOError:\n print 'could not make backup of trainer param file (which is \\\n normal if we haven\\'t saved one until now)'\n paramfile = tables.openFile(filename, 'w')\n paramfile.createArray(paramfile.root, 'learningrate',\n self.learningrate)\n paramfile.createArray(paramfile.root, 'verbose', self.verbose)\n paramfile.createArray(paramfile.root, 'loadsize', self.loadsize)\n paramfile.createArray(paramfile.root, 'batchsize', self.batchsize)\n paramfile.createArray(paramfile.root, 'momentum',\n self.momentum)\n paramfile.createArray(paramfile.root, 'epochcount',\n self.epochcount)\n paramfile.createArray(paramfile.root, 'momentum_batchcounter',\n self.momentum_batchcounter)\n incsgrp = paramfile.createGroup(paramfile.root, 'incs', 'increments')\n for p in self._params:\n paramfile.createArray(incsgrp, p.name, self._incs[p].get_value())\n if self.rmsprop is not None:\n avg_grad_sqrs_grp = paramfile.createGroup(paramfile.root, 'avg_grad_sqrs')\n for p in self._params:\n paramfile.createArray(avg_grad_sqrs_grp, p.name, self._avg_grad_sqrs[p].get_value())\n paramfile.close()", "def save_checkpoint(self):\n if not self.save_ckpt:\n return\n\n lookup = None\n is_best = False\n checkpoint = self.create_checkpoint()\n\n # save best only or not?\n if self.save_best_only:\n if self.valid_dataloader:\n for item in [self.valid_metric_meters, self.valid_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n else:\n for item in [self.train_metric_meters, self.train_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n if lookup:\n value = lookup[self.primary_indicator].avg\n if self.best_mode == 'min':\n if value < self.best_indicator:\n self.best_indicator = value\n is_best = True\n else:\n if value > self.best_indicator:\n self.best_indicator = value\n is_best = True\n\n # TODO: better naming convention\n if self.valid_dataloader:\n metric_string = '-'.join([\n f'{metric}-[{self.valid_metric_meters[metric].avg:.5f}]'\n for metric in self.valid_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.valid_loss_meters[loss].avg:.5f}]'\n for loss in self.valid_loss_meters\n ])\n else:\n metric_string = '-'.join([\n f'{metric}-[{self.train_metric_meters[metric].avg:.5f}]'\n for metric in self.train_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.train_loss_meters[loss].avg:.5f}]'\n for loss in self.train_loss_meters\n ])\n # TODO: use config for paths\n # make subdir\n folder = Path(self.save_path, str(self.fold_idx))\n folder.mkdir(parents=True, exist_ok=True)\n if not self.save_best_only or (self.save_best_only and is_best):\n torch.save(checkpoint,\n f'{folder}/ep-[{self.epoch}]-iter-[{self.iter}]-{loss_string}-{metric_string}.pth')", "def save_best_model(self, scores):\n if not self.params.is_master:\n return\n for metric, biggest in self.metrics:\n if metric not in scores:\n logger.warning(\"Metric \\\"%s\\\" not found in scores!\" % metric)\n continue\n factor = 1 if biggest else -1\n if factor * scores[metric] > factor * self.best_metrics[metric]:\n self.best_metrics[metric] = scores[metric]\n logger.info('New best score for %s: %.6f' % (metric, scores[metric]))\n self.save_model('best-%s' % metric)\n self.save_checkpoint('best-%s' % metric, include_optimizers=True)", "def save_parameters(self, session, out_dict=None):\n if out_dict is None:\n out_dict = {}\n for w in self.weights:\n out_dict[w.name.rsplit(':', 1)[0]] = session.run([w])[0]\n return out_dict", "def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value", "def save_cma_optimization_results(self, es):\n # code extra verbose to understand what is going on\n generation = es.result.iterations\n evals = es.result.evaluations # number of evals at start of each gen\n xfavorite = es.result.xfavorite # center of distribution, best est\n stds = es.result.stds # stds of distribution, stds of xfavorite\n fbest = es.result.fbest # best ever measured\n xbest = es.result.xbest # coordinates of best ever measured\n evals_best = es.result.evals_best # index of best measurement\n\n if not self.minimize_optimization:\n fbest = -fbest\n\n results_array = np.concatenate([[generation, evals],\n xfavorite, stds,\n [fbest], xbest, [evals_best]])\n if (not 'optimization_result'\n in self.data_object[EXPERIMENTAL_DATA_GROUP_NAME].keys()):\n opt_res_grp = self.data_object[EXPERIMENTAL_DATA_GROUP_NAME]\n self.opt_res_dset = opt_res_grp.create_dataset(\n 'optimization_result', (0, len(results_array)),\n maxshape=(None, len(results_array)),\n dtype='float64')\n\n # FIXME: Jan 2018, add the names of the parameters to column names\n self.opt_res_dset.attrs['column_names'] = h5d.encode_to_utf8(\n 'generation, ' + 'evaluations, ' +\n 'xfavorite, ' * len(xfavorite) +\n 'stds, '*len(stds) +\n 'fbest, ' + 'xbest, '*len(xbest) +\n 'best evaluation,')\n\n old_shape = self.opt_res_dset.shape\n new_shape = (old_shape[0]+1, old_shape[1])\n self.opt_res_dset.resize(new_shape)\n self.opt_res_dset[-1, :] = results_array", "def optimize_parameters(model, grid, X_train, y_train):\n \n tss = TimeSeriesSplit(n_splits=10)\n \n \n print(\"[INFO] performing random search...\")\n searcher = RandomizedSearchCV(estimator=model, n_jobs=-1, n_iter=10, cv=tss,\n param_distributions=grid, scoring=('neg_mean_squared_error','neg_mean_absolute_error'), refit='neg_mean_squared_error')\n search_results = searcher.fit(X_train, y_train)\n best_params = search_results.best_params_\n print(\"Best parameters are: {}\".format(best_params))\n \n return best_params", "def optimize(self, X, y):\n mod = GridSearchCV(estimator=self.model,\n param_grid=self.params,\n scoring='roc_auc',\n verbose=2,\n n_jobs=-1,\n cv=15)\n mod.fit(X, y)\n self.model = mod.best_estimator_\n self.optimized = True\n print(\"Stored best model.\")", "def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def save(self):\r\n # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))\r\n torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_INN.pt'))", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def save_parameters(self):\n paramfile = os.path.join(self._datadir, self.id.lower() + '.cfg')\n \n params_var = {}\n params_var['eta'] = self.system_param['eta']\n params_var['cov'] = self.system_param['cov']\n \n with open(paramfile, 'w') as paramjson:\n json.dump(params_var, paramjson)", "def save_checkpoint(self, name, include_optimizers=True):\n if not self.params.is_master:\n return\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'n_total_iter': self.n_total_iter,\n 'best_metrics': self.best_metrics,\n 'best_stopping_criterion': self.best_stopping_criterion,\n }\n\n for name in self.MODEL_NAMES:\n logger.warning(\"Saving %s parameters ...\" % name)\n data[name] = getattr(self, name).state_dict()\n if include_optimizers:\n for name in self.optimizers.keys():\n logger.warning(\"Saving %s optimizer ...\" % name)\n data['%s_optimizer' % name] = self.optimizers[name].state_dict()\n\n # data['dico_id2word'] = self.data['dico'].id2word\n # data['dico_word2id'] = self.data['dico'].word2id\n # data['dico_counts'] = self.data['dico'].counts\n data['params'] = {k: v for k, v in self.params.__dict__.items()}\n\n torch.save(data, path)", "def _restore(self):\n self._logger = LOGGER\n self._param_store = pyro.get_param_store()\n self.set_state(self.best_params)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )", "def get_optimization_parameters(self):\n pass", "def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)", "def save(self):\n with open(os.path.join(self.save_path, \"experiment.delira.pkl\"),\n \"wb\") as f:\n pickle.dump(self, f)\n\n self.params.save(os.path.join(self.save_path, \"parameters\"))", "def get_best_params(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n try:\n # Prepare the response\n self.response = pd.DataFrame([[self.model.name, utils.dict_to_sse_arg(self.model.best_params)]])\n except AttributeError:\n err = \"Best parameters are not available as a parameter grid was not provided for cross validation.\"\n raise Exception(err)\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"best_params\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def optimization_parameters():\n param_distributions = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_features\": [\"auto\", \"log2\"],\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n param_grid = {\n \"n_estimators\": list(range(50, 300, 50)),\n \"max_depth\": list(range(1, 21, 2)),\n \"min_samples_leaf\": list(range(4, 22, 2)),\n \"min_samples_split\": list(range(5, 30, 5)),\n \"criterion\": [\"gini\", \"entropy\"],\n }\n\n rfc = RandomForestClassifier()\n\n # 5 * 10 * 9 * 5 * 2 = 4500 iterations\n # will take a lot of time\n model = GridSearchCV(\n estimator=rfc,\n param_grid=param_grid,\n scoring=\"accuracy\",\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n # initiates Randomized Search \n model = RandomizedSearchCV(\n estimator=rfc,\n param_distributions=param_distributions,\n n_iter=20,\n scoring='accuracy',\n verbose=10,\n n_jobs=1,\n cv=5,\n )\n \n # fit and predict the model\n model.fit(x_train, y_train)\n pred = model.predict(x_test)\n \n # define evaluation metric as accuracy score\n acc = accuracy_score(y_test, pred) * 100\n print(f\"RandomForestClassifier with GridSearchCV: {acc:0.2f}%\")\n print(\"Best parameters set:\")\n\n # extract best parameters \n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(f\"\\t{param_name}: {best_parameters[param_name]}\")", "def checkBest(data):\n global filename, hyp\n if data.newBest is True:\n bestReps = hyp['bestReps']\n rep = np.tile(data.best[-1], bestReps)\n fitVector = batchMpiEval(rep, gen=None, sp_count=None, sameSeedForEachIndividual=False)\n trueFit = np.mean(fitVector)\n if trueFit > data.best[-2].fitness: # Actually better!\n data.best[-1].fitness = trueFit\n data.fit_top[-1] = trueFit\n data.bestFitVec = fitVector\n else: # Just lucky!\n prev = hyp['save_mod']\n data.best[-prev:] = data.best[-prev]\n data.fit_top[-prev:] = data.fit_top[-prev]\n data.newBest = False\n return data", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def save_checkpoint(self, state, is_best, filename='checkpoint.pth.tar'):\n filename = os.path.join(self.experiment_dir, filename)\n torch.save(state, filename)\n if is_best:\n filename_best = os.path.join(self.experiment_dir,'best.pth.tar')\n torch.save(state,filename_best)\n best_pred = state['best_pred']\n with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'w') as f:\n f.write(str(best_pred))\n if not os.path.exists(os.path.join(self.directory,'best_pred.txt')):\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))\n else:\n with open(os.path.join(self.directory,'best_pred.txt'),'r') as f:\n max_iou = float(f.readline())\n if best_pred > max_iou:\n with open(os.path.join(self.directory,'best_pred.txt'),'w') as f:\n f.write(str(best_pred))\n shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth.tar'))", "def save(self):\n filename = os.path.join(self.directory, 'experiment.json')\n with open(filename, 'w') as f:\n json.dump(self.report, f, indent=2, sort_keys=True)\n filename = os.path.join(self.directory, 'training_progress.csv')\n with open(filename, 'w') as csvfile:\n csv.writer(csvfile).writerows(self.history)\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n parameters = lasagne.layers.get_all_param_values(self.__network)\n parameters = parameters\n numpy.save(filename, parameters)", "def optimize_parameters(self):\n self.loss_total.backward() # calculate gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n torch.cuda.empty_cache()", "def on_save_parameters(self):\n obj_points = self.get_object_points()\n cam_pos = self.get_camera_position()\n distortion = self.get_distortion_coeeficients()\n\n d = {\n 'object positions': obj_points,\n 'camera positions': cam_pos,\n 'distortion coefficients': distortion\n }\n\n jsn = json.dumps(d)\n h = hashlib.sha1(jsn.encode('utf-8')).hexdigest()\n fn = f'{h}.json'\n\n with open(fn, 'w') as f:\n f.write(jsn)\n\n self.statusBar().showMessage(f'Parameters have been save to {fn}.')\n self.param_file = fn", "def saving_parameters(num_features, best_params, auc_training, auc_validation, model_name,logs_file):\n name = pd.DataFrame({'model_name':model_name}, index=[0])\n num_features = pd.DataFrame({'num_features':num_features}, index=[0])\n auc_training = pd.DataFrame({'auc_training': auc_training}, index = [0])\n auc_validation = pd.DataFrame({'auc_validation': auc_validation}, index = [0])\n best_params = pd.DataFrame({'best_params': best_params})\n frames = [name, auc_training, auc_validation, best_params]\n resultado = pd.concat(frames, axis = 1)\n output_file = model_name +'_parameters.csv'\n output_file = os.path.join(logs_file,str(output_file))\n resultado.to_csv(output_file)", "def Save(self,val=0):\n u,p = self.problem.up_next.split(True,**self.extra_kwarg)\n if self.first_save:\n self.u_file = self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val)\n self.p_file = self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val)\n # self.nuT_file = self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val)\n self.first_save = False\n else:\n self.params.Save(u,\"velocity\",subfolder=\"solutions/\",val=val,file=self.u_file)\n self.params.Save(p,\"pressure\",subfolder=\"solutions/\",val=val,file=self.p_file)\n # self.params.Save(self.nu_T,\"eddy_viscosity\",subfolder=\"solutions/\",val=val,file=self.nuT_file)", "def save_parameters(gp, target):\n pdict = {}\n pdict['likelihood'] = gp.likelihood.get_free_state()[0]\n pdict['kern_variance'] = gp.kern.variance.get_free_state()[0]\n pdict['kern_lengthscale'] = list(gp.kern.lengthscales.get_free_state())\n pdict['log_likelihood'] = gp._objective(gp.get_free_state())[0][0]\n #pdict = {n:list(gp[n].flatten()) for n in gp.parameter_names()}\n with open(target, 'w') as f:\n json.dump(pdict, f)", "def _update_optimizer(self, hyperparameters, score, fit=True):\n if self.do_maximize:\n score = -score\n self.optimizer_result = self.optimizer.tell(hyperparameters, score, fit=fit)", "def final(self, **kwargs):\n epoch = kwargs[\"epoch\"] + 1\n if epoch >= self.ignore_before:\n name = self.prepend + \"training_epoch_{}_FINAL.h5\".format(epoch)\n full_path = os.path.join(self.path, name)\n self.save_model(kwargs[\"trainer\"], full_path)\n else:\n print(\"Minimum iterations to store model not reached.\")\n\n if self.best_model is not None:\n best_model = deepcopy(self.best_model)\n best_res = self.best_res\n if self.window is not None:\n print(\"Best result during training: {:.2f}.\\n In a window of size {} \"\n \"starting in epoch {} with best mean value of {} \\n Saving model..\".format(best_res,\n self.window,\n self.best_window_start,\n self.best_mean_res))\n else:\n print(\n \"Best result during training: {:.2f}. Saving model..\".format(\n best_res\n )\n )\n name = self.prepend + \"BEST_ITERATION.h5\"\n torch.save(best_model, os.path.join(self.path, name))\n self.reset()", "def param_tune(self):\n grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_grid = GridSearchCV(estimator=rf, param_distributions=grid, verbose=2, n_jobs=-1)\n rf_grid.fit(self.X_train, self.y_train)\n self.results.write(str(rf_grid.best_params_) + \"\\n\")", "def _save_params(self, output_folder: str, checkpoint: int):\n arg_params, aux_params = self.module.get_params() # sync aux params across devices\n self.module.set_params(arg_params, aux_params)\n self.params = arg_params\n params_base_fname = C.PARAMS_NAME % checkpoint\n self.save_params_to_file(os.path.join(output_folder, params_base_fname))", "def tune_and_find_parameter(self,algo_name, algo, rating_data,param_grid):\n\n\n print(\"tuning for\", algo_name, \"hyperparameters\")\n\n # algo: algo class name\n grid_search = GridSearchCV(algo, param_grid, measures=['rmse', 'mae'])\n grid_search.fit(rating_data)\n\n print('best RMSE for ', algo_name, ' ', grid_search.best_score['rmse'])\n\n best_params = grid_search.best_params['rmse']\n # print the best set of parameters\n print(\"best params:\", best_params)\n return best_params", "def save(self, path):\n file = open(path, 'wb')\n pickle.dump(optimizers.unpack_optimizer_state(self.opt_state), file, -1)\n file.close()", "def save_pkl(self, filename):\n param_dict = {}\n param_dict['learningrate'] = self.learningrate\n param_dict['verbose'] = self.verbose\n param_dict['loadsize'] = self.loadsize\n param_dict['batchsize'] = self.batchsize\n param_dict['momentum'] = self.momentum\n param_dict['epochcount'] = self.epochcount\n param_dict['momentum_batchcounter'] = self.momentum_batchcounter\n param_dict['incs'] = dict(\n [(p.name, self._incs[p].get_value()) for p in self._params])\n if self.rmsprop is not None:\n param_dict['avg_grad_sqrs'] = dict(\n [(p.name, self._avg_grad_sqrs[p].get_value()) for p in self._params])\n pickle.dump(param_dict, open(filename, 'wb'))", "def _save_best(self, epoch, holdout_loss):\n updated = False\n\n current = holdout_loss\n _, best = self._snapshot\n improvement = (best - current) / best # Notice this is different with the one used in bnn._save_best\n print(\"improvement {} and updates steps {} and current holdout_loss {}, best loss {}\".format(improvement,\n self._epochs_since_update,\n current, best))\n if improvement > 0.01:\n self._snapshot = (epoch, current)\n # save current state\n # saver.save(self.sess_ssm, '')\n updated = True\n\n # early stopping\n if updated:\n self._epochs_since_update = 0\n else:\n self._epochs_since_update += 1\n\n if self._epochs_since_update > self._early_stop_patience:\n return True\n else:\n return False", "def optimize(self, best_func):\n nb_clf = Pipeline(steps=[('vect', TfidfVectorizer()), ('clf', best_func)])\n parameters = {\n 'vect__stop_words': [None, 'english'],\n }\n gs_clf = GridSearchCV(nb_clf, parameters, scoring='accuracy')\n gs_clf = gs_clf.fit(self.train.text, self.train.gender)\n print(\"Best parameters: \" + str(gs_clf.best_params_))\n print('Best score: ' + str(gs_clf.best_score_))\n print('=' * 80)\n return gs_clf.best_params_", "def save_checkpoint(state, is_best, filename='checkpoint/chpt.tar'):\n if is_best:\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save_weights(self, save_dir: Path, is_best: bool = False) -> None:\n if is_best:\n save_path = save_dir.expanduser() / self.BEST_SAVE_NAME\n else:\n save_path = save_dir.expanduser() / self.SAVE_NAME\n torch.save(self.model.state_dict(), save_path)", "def save(self, objectivefunctions, parameter, simulations):\r\n # Save the effiency and the parameters in outfile_param\r\n line=str(objectivefunctions[0])+ \",\" + str(objectivefunctions[1])+ \",\" +str(objectivefunctions[2])+ ','+str(list(parameter)).strip('[]')\r\n self.outfile_param.write(line+'\\n')\r\n self.outfile_param.flush()\r\n # If the model run is ok save the results in outfile_sim\r\n if objectivefunctions[0] > self.simthreshold_NS and abs(objectivefunctions[1]) <= self.simthreshold_pbias and objectivefunctions[2] <= self.simthreshold_rsr:\r\n # shift the whole timeseries by one day to hit peaks better\r\n if self.shift_one_day:\r\n self.outfile_sim.write(line + \",\" + ',' + str(list(simulations)).strip('[]')+'\\n')\r\n self.outfile_sim.flush() \r\n else:\r\n self.outfile_sim.write(line + ',' + str(list(simulations)).strip('[]')+'\\n')\r\n self.outfile_sim.flush()", "def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs", "def save(self, model, ema_model, optimizer, epoch, step, best_wer,\n is_best=False):\n rank = 0\n if dist.is_initialized():\n dist.barrier()\n rank = dist.get_rank()\n\n if rank != 0:\n return\n\n # Checkpoint already saved\n if not is_best and epoch in self.tracked:\n return\n\n unwrap_ddp = lambda model: getattr(model, 'module', model)\n state = {\n 'epoch': epoch,\n 'step': step,\n 'best_wer': best_wer,\n 'state_dict': unwrap_ddp(model).state_dict(),\n 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict() if self.use_amp else None,\n }\n\n if is_best:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_best_checkpoint.pt\")\n else:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_epoch{epoch}_checkpoint.pt\")\n\n print_once(f\"Saving {fpath}...\")\n torch.save(state, fpath)\n\n if not is_best:\n # Remove old checkpoints; keep milestones and the last two\n self.tracked[epoch] = fpath\n for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):\n try:\n os.remove(self.tracked[epoch])\n except:\n pass\n del self.tracked[epoch]", "def save_parameters():\n global node_list,neighbours\n for l in range(len(node_list)):\n print(l)\n name=\"node\"+str(l)\n print(name,node_list[l])\n pycom.nvs_set(name,node_list[l])\n leng=l\n pycom.nvs_set(\"len_node\",leng+1)\n for l in range(len(neighbours[1])):\n name=\"neighbour\"+str(l)\n name2=\"power\"+str(l)\n pycom.nvs_set(name,neighbours[0][l])\n pycom.nvs_set(name2,neighbours[1][l])\n leng=l\n pycom.nvs_set(\"len_neighbours\",leng+1)\n com.savestate()\n return", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def best(self, protocol_name: str, subset: str = \"development\"):\n\n train_dir = Path(\n self.TRAIN_DIR.format(\n experiment_dir=self.experiment_dir,\n protocol=protocol_name,\n subset=subset,\n )\n )\n\n study_name = \"default\"\n optimizer = Optimizer(\n self.pipeline_, db=train_dir / \"trials.journal\", study_name=study_name\n )\n\n try:\n best_loss = optimizer.best_loss\n except ValueError as e:\n print(\"Still waiting for at least one iteration to succeed.\")\n return\n\n best_params = optimizer.best_params\n\n print(f\"Loss = {100 * best_loss:g}% with the following hyper-parameters:\")\n\n content = yaml.dump(best_params, default_flow_style=False)\n print(content)", "def optimize_params(self, qnodes=None):\n #logger.debug(\"optimize_params of baseclass --> no optimization available!!!\")\n return {}", "def load_slow_weights(self):\n for group in self.optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n param_state[\"backup_params\"] = torch.zeros_like(p.data)\n param_state[\"backup_params\"].copy_(p.data)\n p.data.copy_(param_state[\"slow_params\"])", "def save(self, path=None):\n path = self.opt.get('model_file', None) if path is None else path\n\n if path and hasattr(self, 'model'):\n model = {'model': self.model.state_dict(),\n 'longest_label': self.model.longest_label,\n 'optimizer': self.optimizer.state_dict(),\n 'optimizer_type': self.opt['optimizer']}\n\n with open(path, 'wb') as write:\n torch.save(model, write)\n\n # save opt file as json\n with open(path + \".opt\", 'wb') as handle:\n pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def move(self, global_best):\n m = self.manipulator\n #print \"cfg length check:\", len(self.velocity), len(self.position)\n for p in m.params:\n if p.is_permutation(): #TODO: ALL parameters that require probablistic intepretation\n if random.uniform(0,1)>self.omega:\n if random.uniform(0,1)<self.phi_l:\n # Select crossover operator\n getattr(p, self.crossover_choice)(self.position, self.position, global_best, d=p.size/3)\n else:\n getattr(p, self.crossover_choice)(self.position, self.position, self.best, d=p.size/3)\n else:\n # Continuous representation regardless of param type\n v = self.velocity+(-self.phi_l-self.phi_g)*self.position+ self.best*self.phi_l+ global_best*self.phi_g\n self.position = min(max([self.position+v, 0]),1)", "def obtain_best_model(optimal_weights):\n gnn = NeuralNetwork(optimal_weights)\n gnn.compile_train(5)\n\n gnn.save_accuracy_chart()\n\n gnn.model.save('spy_classifier')", "def update(self, global_best):\n self._update_velocity(global_best)\n\n next_position = self._get_new_position()\n\n loop_count = 0\n while not self._check_constraint(next_position):\n if loop_count < 5:\n self._resize_velocity(0.5)\n loop_count += 1\n else:\n self._spawn(position=False)\n next_position = self._get_new_position()\n\n self.position = next_position\n self.value = self.optimization_object.func(self.position)\n if is_better(self.best[0], self.value,\n self.optimization_object.find_max):\n self.best = (self.value, deepcopy(self.position))", "def restore_fast_weights(self):\n for group in self.optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.copy_(param_state[\"backup_params\"])\n del param_state[\"backup_params\"]", "def save_params():\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"JSON\", \"*.json\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n self.parent_class.classes[\"fractal\"].curve.store_curve_tofile(\n file_name)", "def search(self, optimal):\n \n for idx, params in enumerate(self.params):\n bar_length = 20\n percent = float(idx) / self.size\n hashes = '#' * int(round(percent * bar_length))\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\"\\rPerforming grid search: [{0}] {1}%\".format(hashes + spaces, int(round(percent * 100))))\n sys.stdout.flush()\n\n best_fitnesses = []\n\n for x in xrange(self.repeat):\n self.genetic_algorithm.set_params(**params)\n self.genetic_algorithm.init_population()\n self.genetic_algorithm.evolve()\n\n individual = self.genetic_algorithm.result()\n genotype = individual.get_genotype()\n best_individual = self.genetic_algorithm.result()\n best_fitness = best_individual.get_fitness()\n best_fitnesses.append(best_fitness)\n\n info_mean = pandas.DataFrame([[params, numpy.mean(best_fitness)]], columns=[\"params\", \"best_fitness\"])\n self.grid_scores = self.grid_scores.append(info_mean, ignore_index=True)\n\n sys.stdout.write(\"\\rPerforming grid search: [{0}] {1}%\\n\\n\".format(hashes + spaces, int(round(100))))\n sys.stdout.flush()", "def GetPts(self):\n return self.best", "def optimize(self, X, y):\n print(\"Performing TPOT genetic optimization.\")\n self.model.fit(X, y)\n self.optimized = True", "def optimize(self): \n if self.model == 'ARD':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha, self.beta]),\n args=(self,),\n method='L-BFGS-B',\n bounds=((0, 50), (0, 50)),\n )\n # logger.info(estimate)\n\n # organize into a dict\n result = {\n \"alpha\": estimate.x[0],\n \"beta\": estimate.x[1],\n \"Lik\": estimate.fun,\n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n elif self.model == 'ER':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha]),\n args=(self,),\n method='L-BFGS-B',\n bounds=[(0, 50)],\n )\n\n result = {\n \"alpha\": estimate.x[0],\n \"Lik\": estimate.fun, \n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n else:\n raise Exception('model must be specified as either ARD or ER')\n\n # get scaled likelihood values\n self.log_lik = result[\"negLogLik\"]\n self.tree = self.tree.set_node_values(\n 'likelihood',\n values={\n node.idx: np.array(node.likelihood) / sum(node.likelihood)\n for node in self.tree.idx_dict.values()\n }\n )", "def save_params(self):\n try:\n with open(self.json_file, \"w\") as fl:\n json.dump(self.params, fl, indent=4)\n except KeyError as inst:\n print(inst)", "def save_params(self):\n sh = shelve.open(os.path.expanduser('~/.config/scheduler/params'))\n sh['params'] = self.params\n sh.close()", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def _backup_and_load_cache(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])", "def save(self):\n for name, param in self.components.items():\n param_path = os.path.join(self.model_path, \"%s.mat\" % name)\n if hasattr(param, 'params'):\n param_values = {p.name: p.get_value() for p in param.params}\n else:\n param_values = {name: param.get_value()}\n scipy.io.savemat(param_path, param_values)", "def _optimise(self):\n better = True\n self.solutions = set()\n\n # Rebuild the neighbours\n self.neighbours = {}\n\n for i in self.heuristic_path:\n self.neighbours[i] = []\n\n for j, dist in enumerate(TSP.edges[i]):\n if dist > 0 and j in self.heuristic_path:\n self.neighbours[i].append(j)\n\n # Restart the loop each time we find an improving candidate\n while better:\n better = self.improve()\n # Paths always begin at 0 so this should manage to find duplicate\n # solutions\n self.solutions.add(str(self.heuristic_path))\n\n self.save(self.heuristic_path, self.heuristic_cost)", "def random_param_tune(self):\n random_grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=250, cv=3, verbose=2, n_jobs=-1)\n rf_random.fit(self.X_train, self.y_train)\n self.results.write(str(rf_random.best_params_) + \"\\n\")", "def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint", "def save_parameters(self, session, out_dict=None):\n if out_dict is None:\n out_dict = {}\n for layer in self.layers:\n layer.save_parameters(session, out_dict)\n return out_dict", "def cache_parameters(self):\n sw_parameter_dir = self.args.parameter_dir + '/sw'\n waterer = Waterer(self.args, self.input_info, self.reco_info, self.germline_seqs, parameter_dir=sw_parameter_dir, write_parameters=True)\n waterer.run()\n self.sw_info = waterer.info\n self.write_hmms(sw_parameter_dir)\n parameter_out_dir = self.args.parameter_dir + '/hmm'\n self.run_hmm('viterbi', parameter_in_dir=sw_parameter_dir, parameter_out_dir=parameter_out_dir, count_parameters=True)\n self.write_hmms(parameter_out_dir)", "def save_params():\n out_json = os.path.join(OUTPUT_DIR, OUT_JSON)\n out_dict = {\n \"librosa_version\": librosa.__version__,\n \"numpy_version\": np.__version__,\n \"SR\": SR,\n \"N_MELS\": N_MELS,\n \"N_FFT\": N_FFT,\n \"HOP_LENGTH\": HOP_LENGTH,\n \"MEL_FMIN\": MEL_FMIN,\n \"MEL_FMAX\": MEL_FMAX\n }\n with open(out_json, 'w') as f:\n json.dump(out_dict, f, indent=4)", "def find_best_solution_and_score(self):\r\n best_score = MAXSIZE\r\n best_solution = self.simulation.solutions[0]\r\n for solution in self.simulation.solutions:\r\n score = self.simulation.fitting_function.fit_score(solution)\r\n if score < best_score:\r\n best_score = score\r\n best_solution = solution\r\n return best_solution, best_score", "def saveParameters(self):\n # save current scale values into the ball state for the current ball\n x = self.initialXScale.get()\n y = self.initialYScale.get()\n xVel = self.initialXVelScale.get()\n yVel = self.initialYVelScale.get()\n self.ballStates[self.currentBall] = [x, y, xVel, yVel]\n # set new currentBall if changed\n self.currentBall = int(self.ballSelector.get())\n # the states of all the balls to be simulated\n self.simArgs['balls']=self.ballStates\n self.simArgs['playbackSpeed'] = self.playbackSpeedScale.get()\n self.simArgs['trace'] = self.toTrace.get()\n self.simArgs['friction'] = self.friction.get()\n # get number of balls from formation string\n self.simArgs['nBalls'] = self.nBalls\n # for s in self.numberOfBallsSelector.get().split():\n # if s.isdigit():\n # self.simArgs['nBalls']=int(s)", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def savings_algorithm(self):\n self.generate_trivial_tours() # generate trivial solution\n while True: # endless loop\n maxSavings = 0 # values for best savings decision\n bestr1 = None\n bestr2 = None\n for r1 in self.routes: # loop through all route combinations\n for r2 in self.routes:\n if r1 != r2:\n currentSavings = self.savings2routes(r1,r2)\n if currentSavings > maxSavings: # if the savings are greater than the so far best savings\n bestr1 = r1 # store the routes and the savings value\n bestr2 = r2\n maxSavings = currentSavings\n if (bestr1 == None): # if no savings or no feasible joins exist break out of the loop\n break\n newRoute = VRP_Route(bestr1.route+bestr2.route) # generate new route and delete old routes\n self.routes.remove(bestr1)\n self.routes.remove(bestr2)\n self.routes.append(newRoute)\n self.get_objective()\n return self.objective", "def save_fit(self):\n if self.fit is None:\n self.fit_status.setText('Fit not available for saving')\n return\n self.read_parameters()\n group = NXprocess()\n group['model'] = self.composite_model\n group['data'] = self.data\n for m in self.models:\n group[m['name']] = self.get_model(m['model'])\n parameters = NXparameters(attrs={'model': m['class']})\n for name in m['parameters']:\n p = self.fit.params[name]\n name = name.replace(m['model'].prefix, '')\n parameters[name] = NXfield(p.value, error=p.stderr,\n initial_value=p.init_value,\n min=str(p.min), max=str(p.max),\n vary=p.vary, expr=p.expr)\n group[m['name']].insert(parameters)\n group['program'] = 'lmfit'\n group['program'].attrs['version'] = lmfit_version\n group['title'] = 'Fit Results'\n group['fit'] = self.get_model(fit=True)\n fit = NXparameters()\n fit.nfev = self.fit.result.nfev\n fit.chisq = self.fit.result.chisqr\n fit.redchi = self.fit.result.redchi\n fit.message = self.fit.result.message\n group['statistics'] = fit\n group.note = NXnote(\n self.fit.result.message,\n f'Chi^2 = {self.fit.result.chisqr}\\n'\n f'Reduced Chi^2 = {self.fit.result.redchi}\\n'\n f'No. of Function Evaluations = {self.fit.result.nfev}\\n'\n f'No. of Variables = {self.fit.result.nvarys}\\n'\n f'No. of Data Points = {self.fit.result.ndata}\\n'\n f'No. of Degrees of Freedom = {self.fit.result.nfree}\\n'\n f'{self.fit.fit_report()}')\n self.write_group(group)", "def _hopt_pickle(self, space):\n if self.pp['net']:\n trials_step = 1 # Number of trials to run before saving\n else:\n trials_step = 4\n f_name = self.pp['hopt_fname'].replace('.pkl', '') + '.pkl'\n try:\n with open(f_name, \"rb\") as f:\n trials = pickle.load(f)\n prev_best = trials.argmin\n self.logger.error(f\"Found {len(trials.trials)} saved trials\")\n except FileNotFoundError:\n trials = Trials()\n prev_best = None\n\n add_pp_pickle(trials, self.pp)\n fn = partial(hopt_proc, self.stratclass, self.pp, mongo_uri=None)\n while True:\n n_trials = len(trials)\n self.logger.error(f\"Running trials {n_trials+1}-{n_trials+trials_step}\")\n best = fmin(\n fn=fn,\n space=space,\n algo=tpe.suggest,\n max_evals=n_trials + trials_step,\n trials=trials)\n if prev_best != best:\n bp = trials.best_trial['result']['loss']\n self.logger.error(f\"Found new best params: {best} with block prob: {bp}\")\n prev_best = best\n with open(f_name, \"wb\") as f:\n pickle.dump(trials, f)" ]
[ "0.6838676", "0.6757881", "0.6757881", "0.6757881", "0.6701749", "0.6567907", "0.6534331", "0.6523155", "0.6520775", "0.6517323", "0.65010434", "0.64957666", "0.6460189", "0.6451601", "0.639737", "0.6354237", "0.63056695", "0.6299728", "0.62912434", "0.62902164", "0.62139994", "0.62084854", "0.6188932", "0.6188932", "0.6177312", "0.61645275", "0.6157125", "0.6149795", "0.61072606", "0.61021274", "0.60887754", "0.6060137", "0.6049303", "0.60463405", "0.6035145", "0.6031718", "0.6028721", "0.6021942", "0.60185367", "0.6011407", "0.6010856", "0.60082036", "0.6006291", "0.59948236", "0.59910834", "0.5980241", "0.5970757", "0.59612304", "0.59556174", "0.5955113", "0.5954021", "0.5950602", "0.5946058", "0.594281", "0.59368294", "0.5925652", "0.5912916", "0.5891663", "0.5885712", "0.5884051", "0.58716327", "0.58507526", "0.5845581", "0.5838725", "0.5836935", "0.58355", "0.5828563", "0.5819404", "0.58085346", "0.5803372", "0.5791499", "0.5766923", "0.5762606", "0.57587177", "0.5756758", "0.5753166", "0.57498634", "0.5746238", "0.5742201", "0.5739064", "0.572821", "0.5725639", "0.572205", "0.5710147", "0.5705916", "0.5700503", "0.5700503", "0.5693377", "0.56898695", "0.568626", "0.56840533", "0.56787133", "0.5672438", "0.56643176", "0.56588066", "0.5656264", "0.5653663", "0.56499124", "0.5648744", "0.5642914" ]
0.6462199
12
Sample hyperparmeters and save scores for each combination.
def main(job_path, model_path, options, num_samples, metric, score_file, param_names, prop_name, param_types, seed, **kwargs): dic_path = os.path.join(model_path, score_file) space = get_space(options=options, param_types=param_types, names=param_names) objective = make_objective(model_path=model_path, param_names=param_names, param_types=param_types, job_path=job_path, prop_name=prop_name, metric=metric, dic_path=dic_path) # sample hyperparameters with the aim to minimize the # result of `objective` fmin(objective, space, algo=tpe.suggest, max_evals=num_samples, rstate=np.random.RandomState(seed)) # save best results save_best(dic_path, metric, model_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config", "def __init__(self):\n self.param_names = []\n self.param_values = []\n self.param_settings = []\n self.result = []\n self.best_params = None\n self.best_score = None\n self.max_reps = 5\n self.num_values = False\n self.algorithm_done = False", "def explore(self):\n for k, v in self._hyperparameters.items():\n mutation = random.choice([0.8, 1.2])\n self._hyperparameters[k] = mutation * v", "def hyperparameter_optimization_random(X, y, *argv):\n\n clf_best_params = {}\n\n # Iterate over all (classifier, hyperparameters) pairs\n for clf, params in argv:\n\n # Run randomized search\n n_iter_search = 10\n random_search = RandomizedSearchCV(\n clf, param_distributions=params, n_iter=n_iter_search, cv=10, iid=False\n )\n random_search.fit(X, y)\n\n # Save results\n clf_best_params[clf] = random_search.best_params_\n\n return clf_best_params", "def score_samples(self, X):\n ...", "def _sample_likelihood_params(self):\r\n self._sample_omega()\r\n self._sample_beta()\r\n self._sample_r()", "def _generate(self, **kwargs):\n N = self.parameter_schema['N']\n parameter_count = len(self._parameter_names)\n common_override_kwargs = {}\n override_kwargs = self._sampler_overrides(common_override_kwargs)\n if kwargs:\n kwargs.update(override_kwargs)\n else:\n kwargs = override_kwargs\n __import__(\"SALib.sample\", fromlist=[self.sampler_class])\n sampler = getattr(SALib.sample, self.sampler_class)\n problem = self.parameter_schema[\"problem\"]\n self._samples = sampler.sample(problem, N, **kwargs)\n self._samples = numpy.unique(self._samples, axis=0)\n super()._generate()", "def scoring_sampling_half(INPUT_PATH=r'./result/data_split_half_reliability/split_data_features_and_trait_scores',\n method='predefined_parameters'):\n parameters = None\n if method == 'itself':\n calculate_scoring_parameters(INPUT_PATH)\n parameters = read(os.path.join(INPUT_PATH, 'scoring_parameters.csv'))\n\n if method == 'predefined_parameters':\n parameters = read(r'./model/trait/auxiliary_data/scoring_parameters.csv')\n\n for root, dirs, files in os.walk(INPUT_PATH):\n if files:\n features_df_train = read(os.path.join(root, root.split('\\\\')[-1] + '_train_features_group.csv'))\n score_train = logistic_trait_scores(features_df_train, parameters)\n write(os.path.join(root, root.split('\\\\')[-1] + '_train_item_and_trait_scores.csv'), score_train)\n\n features_df_test = read(os.path.join(root, root.split('\\\\')[-1] + '_test_features_group.csv'))\n score_test = logistic_trait_scores(features_df_test, parameters)\n write(os.path.join(root, root.split('\\\\')[-1] + '_test_item_and_trait_scores.csv'), score_test)", "def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)", "def hyperparameter_cv(X_data, y_data, hyperparameters):\n\n # Create Grid of hyperparameters\n grid = cartesian_product(hyperparameters)\n\n # Loop through hyperparameters \n best_score = 0\n for hyperparameter in grid:\n # Initialize Modle\n model = svm.SVC(kernel='linear', **hyperparameter)\n\n # Train and Get Accuracy\n print(f\"Training using hyperparameters: {hyperparameter}\")\n score = cross_validation_score(X_data, y_data, model, folds=5)\n print(f\"Accuracy Score: {score}\")\n\n if score > best_score:\n best_score = score\n best_parameters = hyperparameter\n \n return best_score, best_parameters", "def score(self, params):\n \n if self.output_file != None:\n with open(self.output_file, \"a\") as myfile:\n try:\n myfile.write(str(self.trials.losses()[-2])+'\\n')\n except IndexError:\n print 'Index error'\n myfile.write(str(params)+', ')\n\n print \"Training with params : \"\n print params\n num_round = int(params['n_estimators'])\n del params['n_estimators']\n\n score = 0.\n for train_index, valid_index in self.kf:\n\n df_train = self.df_train.iloc[train_index]\n df_valid = self.df_train.iloc[valid_index]\n\n # fit the model\n self.fit(df_train, self.features, self.target, params, num_round)\n\n # results of the model on validation data\n predictions = self.predict(df_valid[self.features])\n\n # computing the accuracy of predictited similar pictures\n accuracy = np.mean(df_valid[self.target].values == np.round(predictions))\n print 'accuracy:', accuracy\n score -= accuracy/float(len(self.kf))\n \n #score -= roc_auc_score(df_valid[self.target].values, predictions)\n\n print \"\\tScore {0}\\n\\n\".format(score)\n return {'loss': score, 'status': STATUS_OK}", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def __call__(self, params):\r\n return self.sample(params)", "def importance_sampling(self, params):\n i_part = params\n\n theta_star = weighted_sampling( self.theta_t_1, self.w_t_1 ) \n np.random.seed()\n\n theta_starstar = multivariate_normal( theta_star, self.sig_t_1 ).rvs(size=1)\n model_starstar = self.simz( theta_starstar )\n\n rho = test_dist(self.data, model_starstar) \n\n while rho > self.eps_t: \n theta_star = weighted_sampling( self.theta_t_1, self.w_t_1 )\n theta_starstar = multivariate_normal(theta_star, self.sig_t_1).rvs(size=1)\n\n model_starstar = self.simz( theta_starstar )\n\n rho = test_dist(self.data, model_starstar) \n\n p_theta = self.prior_of_priors(theta_starstar)\n\n pos_t = np.dstack(self.theta_t_1)\n\n tmp_w_t = p_theta / np.sum(self.w_t_1 * multivariate_normal(self.theta_t[:,i_part], self.sig_t_1).pdf(pos_t))\n\n data_list = [np.int(i_part)]\n\n for i_param in xrange(self.n_params): \n\n data_list.append(theta_starstar[i_param])\n\n data_list.append(tmp_w_t)\n data_list.append(rho)\n\n return np.array(data_list)", "def parameter_optimization(self):\n out = open(self.csv_dir + self.strategy_id + '_gridsearch.csv', \"w\")\n spl = len(self.para_list)\n for i, sp in enumerate(self.para_list):\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self.portfolio.get_statistics()\n tot_profit = float(stats[0][1])\n sharpe = float(stats[1][1])\n max_dd = float(stats[2][1])\n win_rate = float(stats[7][1].replace(\"%\", \"\"))\n profit_factor = float(stats[8][1])\n\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" %\n (sp[\"takeprofit\"], sp[\"period\"], tot_profit, sharpe, max_dd, win_rate, profit_factor)\n )\n out.close()", "def run_sampler(iterations, X, alpha, epsilon, lamb, p, max_newK):\n \n Z, Y = initialize_ZY(X, alpha, max_newK)\n\n for iter in xrange(1, iterations+1):\n \n processing_time = time.time()\n \n Z, Y = sample_Z(X, Z, Y, alpha, epsilon, lamb, p, max_newK)\n Y = sample_Y(X, Z, Y, alpha, epsilon, lamb, p)\n Z, Y = sort_ZY(Z, Y)\n Z, Y = remove_empties_ZY(Z, Y)\n\n lhood = log_lhood(X, Z, Y, alpha, epsilon, lamb) \n \n processing_time = time.time() - processing_time \n\n print(\"iteration %d finished in %d seconds with log-likelihood %g\"\n % (iter, processing_time, lhood))\n \n return Z, Y", "def initial_sampling(self, params):\n i = params\n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n while rho > self.eps0: \n theta_star = self.priors_sample()\n model = self.simz( theta_star )\n rho = test_dist(self.data, model)\n data_list = [np.int(i)]\n\n for i_param in xrange(self.n_params): \n data_list.append(theta_star[i_param])\n data_list.append(1./np.float(self.N))\n data_list.append(rho)\n\treturn np.array(data_list)", "def tune_parameters(emails, ham, spam):\n emails = get_test(test)\n results = []\n hammy = training_set(ham, V)\n spammy = training_set(spam, V)\n print(\"Testing 9x9 combinations of d and class prior.\")\n print(\"May take around 30 seconds.\")\n for d in range(1,10):\n ham_model = hammy.train_model(d=d/10)\n spam_model = spammy.train_model(d=d/10)\n for class_prior in range(1,10):\n accuracy = test_accuracy(emails, ham_model, spam_model, class_prior=class_prior/10)\n results += [(accuracy,d/10,class_prior/10)]\n return results", "def hyperparameter_grid_search(classifier_type, features, labels, \n results_file_name, train_ratio=0.7,\n num_processes=4, **parameter_subsets): \n \n # Ensure the directory for the results file exists.\n create_dirs(results_file_name)\n \n # Retrieve a list of all of the parameter keys. This is necessary to \n # ensure that the keys are always in the same order.\n names = sorted(parameter_subsets.keys())\n \n # If the file already exists, determine the order of the hyperparameters\n # from the file and grab the sets that are already evaluated.\n if (os.path.isfile(results_file_name)):\n with open(results_file_name) as file:\n reader = csv.reader(file)\n \n # Read headers\n # Note: The last two columns are accuracy and logloss.\n headers = next(reader)[:-2]\n \n # If the labels do not match the names, trying to write the new\n # results to the file will only cause problems.\n if (sorted(headers) != names):\n raise ValueError('The specified results file already exists '\n 'and already contains entries that have '\n 'different set hyperparameters than the '\n 'ones specified.')\n \n names = headers\n sets_in_file = [tuple(float(x) for x in row[:-2]) \n for row in reader]\n \n # If the file does not already exist, we need to create it and write the\n # headers.\n else:\n with open(results_file_name, 'w') as file:\n headers = names + ['Accuracy', 'Logloss']\n csv.writer(file, lineterminator='\\n').writerow(headers) \n \n # Retrieve the cartesion product of all of the sets of values.\n parameter_sets = list(itertools.product(*[parameter_subsets[key] \n for key in names]))\n \n # If the file exists and parameter sets were found, remove any of the sets\n # already evaluated.\n if (sets_in_file):\n removed = [set for set in sets_in_file if set in parameter_sets]\n for set in removed:\n parameter_sets.remove(set)\n \n print('Removed %d sets that were already in the file.' % len(removed))\n \n # Each set is currently a list of all of the parameters. In order to \n # set the parameters, each list will need to be converted to a \n # dictionary to associate the parameter names with their values.\n parameter_sets = [dict(zip(names, set)) for set in parameter_sets]\n \n # Indicate how many classifiers need to be trained.\n print('Training %d classifiers...' % len(parameter_sets))\n \n # Create a pool of processes to train the classifiers.\n pool = multiprocessing.Pool(num_processes)\n args = [[classifier_type, features, labels, results_file_name, train_ratio,\n names, parameter_set] for parameter_set in parameter_sets]\n pool.map(__hgs, args)", "def sample(self, global_step, logging):\n if self.record_size < self.learn_start:\n sys.stderr.write('Record size less than learn start! Sample failed\\n')\n return False, False\n\n dist_index = int(math.floor(float(self.record_size) / float(self.size) * float(self.partition_num))) \n partition_size = int(math.floor(self.size / self.partition_num))\n partition_max = dist_index * partition_size\n distribution = self.distributions[dist_index]\n rank_list = []\n # sample from k segments\n for n in range(1, self.batch_size + 1):\n if(distribution['strata_ends'][n] + 1 < distribution['strata_ends'][n + 1]):\n index = np.random.randint(distribution['strata_ends'][n] + 1,\n distribution['strata_ends'][n + 1])\n else:\n index = distribution['strata_ends'][n + 1]\n \n rank_list.append(index)\n\n \n # beta, increase by global_step, max 1\n #beta = min(self.beta_zero + (global_step - self.learn_start - 1) * self.beta_grad, 1)\n beta = self.beta_zero + (1.0 - self.beta_zero) / 2 + (1.0 - self.beta_zero) / 2 * np.tanh((global_step - self.total_steps/2) / (self.total_steps/6.0))\n #beta = (1.0 - self.beta_zero) * np.exp(float(global_step) / float(self.total_steps)) / (np.exp(1) - 1) + (self.beta_zero * np.exp(1) - 1) / (np.exp(1) - 1)\n # find all alpha pow, notice that pdf is a list, start from 0\n alpha_pow = [distribution['pdf'][v - 1] for v in rank_list]\n # w = (N * P(i)) ^ (-beta) / max w\n w = np.power(np.array(alpha_pow) * partition_max, -beta)\n w_max = max(w)\n w = np.divide(w, w_max)\n \n logging.info(\"current beta is: {0}\".format(beta))\n\n # get experience id according rank_list\n experience, priority = self.retrieve(rank_list)\n return experience, rank_list, w, priority", "def hyperparams():\n H = 6\n return Munch(N=500, H=H, D=(H // 2) ** 2, batch_size=10, precision=to.float32)", "def hyperparameter_tune(X_train_all, y_train_all, X_val_all, y_val_all, hyperparameters, train_size=None):\n # Initialize train_size to be the full train data if no parameters passed\n if train_size is None:\n train_size = len(X_train_all)\n\n # Select Subset of Data\n X_train = X_train_all[:train_size]\n X_val = X_val_all[:train_size]\n y_train = y_train_all[:train_size]\n y_val = y_val_all[:train_size]\n\n # Create Grid of hyperparameters\n grid = cartesian_product(hyperparameters)\n\n # Loop through hyperparameters \n best_score = 0\n for hyperparameter in grid:\n # Initialize Model\n model = svm.SVC(kernel='linear', **hyperparameter)\n\n # Fit Model\n print(f\"Training using hyperparameters: {hyperparameter}\")\n model.fit(X_train, y_train)\n\n # Predict Values on Validation Set\n val_pred = model.predict(X_val)\n\n # Get Accuracy\n score = accuracy_score(y_val, val_pred)\n print(f\"Accuracy Score: {score}\")\n\n if score > best_score:\n best_score = score\n best_parameters = hyperparameter\n \n return best_score, best_parameters", "def train(self): \n start_time = time()\n\n # reset previous results\n self.best_result = pd.DataFrame()\n self.result = pd.DataFrame()\n\n # Generate dictionaries of all posible parameter permutations\n keys, values = zip(*self.params.items())\n self.permutations_dict = [dict(zip(keys, v)) for v in itertools.product(*values)] \n\n # Run through all models in parallel threads\n with Pool(self.thread_cnt) as p:\n result = p.map(self.analyze_model, self.permutations_dict)\n\n\n # wrap up results\n if self.classes_names: # acts as trigger for computation of cms\n for i, dic in enumerate(result):\n dic[\"id\"] = i\n self.cms = [(dic[\"id\"] ,dic.pop(\"cm\")) for dic in result]\n\n self.result = pd.DataFrame(result)\n self.best_result = self.result.iloc[self.result[\"score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"f1_score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"recall\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"precision\"].argmax()] # store row with the best score\n end_time = time()\n print(\"Finished evaluation\")\n print(\"Best parameteters found with:\", self.best_parameter_set())\n print(\"score=\", self.best_score())\n #print(\"f1_score=\", self.best_f1_score())\n #print(\"recall_score=\", self.best_recall_score())\n #print(\"precision_score=\", self.best_precision_score())\n print(\"Total evaluation time = {:.2f}s\".format(end_time-start_time))\n\n return self.best_parameter_set(), self.best_score()", "def priors_sample(self):\n \n theta_star = np.zeros(self.n_params)\n\n for i in xrange(self.n_params): \n np.random.seed() \n theta_star[i] = self.param_obj.prior()[i].rvs(size=1)[0]\n\n return theta_star", "def run(self,mc_sample=None):\n if mc_sample:\n self.mc_sample = mc_sample\n\n total_scores = 0.0\n total_scores_square = 0.0\n self.scores_list =[]\n \n for i in range(self.num_runs): #runs the specified number of Monte Carlo samples\n score = next(self.mc_sample) #next score\n self.scores_list.append(score) \n total_scores += score\n total_scores_square += score**2\n\n self.xhat = total_scores / self.num_runs #mean of score\n self.x2hat = total_scores_square / self.num_runs #mean of score^2\n\n self.sample_variance = (self.num_runs / (self.num_runs - 1.0)) * (self.x2hat - (self.xhat**2))\n self.sample_stddev = np.sqrt(self.sample_variance)\n self.mean_variance = self.sample_variance / (self.num_runs - 1.0)\n self.mean_stddev = np.sqrt(self.mean_variance)", "def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob", "def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs", "def main():\n\trelations = [json.loads(x) for x in open('tutorial/pdtb_trial_data.json')]\n\toutput_relations = [convert_to_output(x) for x in relations]\n\toutput_relations[1]['Connective']['TokenList'] = [0]\n\toutput_relations[3]['Arg1']['TokenList'].pop(4)\n\toutput_relations[4]['Arg2']['TokenList'].pop(4)\n\toutput_relations[5]['Arg2']['TokenList'].pop(4)\n\toutput_relations[6]['Sense'] = [u'Contingency.Condition'] # This will hurt sense recall\n\toutput_relations.pop(0) # This will hurt all precision\n\tscorer.evaluate(relations, output_relations)\n\treturn output_relations", "def batch_gs_cv(self, pt_rew=False):\n #get all possible HP sets from permutations of the above dict\n hp_perms = self.get_hp_perms()\n #submit job to the batch for the given HP range:\n for hp_string in hp_perms:\n Utils.sub_lstm_hp_script(self.eq_train, self.batch_boost, hp_string, pt_rew=pt_rew)", "def profile_example_cluster(self):\n self.profile_arnaud_bestfit() # Set default shape parameters\n self.M500 = 1e14\n self.r500 = 1.\n self.z = 0.5", "def train_and_report_metrics(xs, ys, num_repeat, extractor_class, useless_var_for_hparam_search=None):\n\n all_val_auc = []\n all_val_accuracy = []\n\n for i in range(num_repeat):\n single_train_metrics = extractor_class().train_single_run(xs, ys, i)\n\n all_val_auc.append(single_train_metrics['val_auc'])\n all_val_accuracy.append(single_train_metrics['val_accuracy'])\n\n metrics = {\n \"mean_val_auc\": np.mean(all_val_auc),\n \"mean_val_accuracy\": np.mean(all_val_accuracy),\n \"val_auc_std\": np.std(all_val_auc),\n \"val_accuracy_std\": np.std(all_val_accuracy)\n }\n\n print(metrics, flush=True)\n\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='mean_val_auc',\n metric_value=metrics['mean_val_auc'])\n\n return metrics", "def scoring(INPUT_PATH=r'./result/trajectory_profiles', method='predefined_parameters'):\n features_df = read(os.path.join(INPUT_PATH, 'features_group.csv'))\n parameters = None\n if method == 'itself':\n if len(features_df) < 2:\n print(\n \"The subjects in this dataset is less than 2, so cannot calculate the scoring parameters \"\n \"by itself. Please try setting method='predefined_parameters' to use predefined parameters.\")\n return False\n calculate_scoring_parameters(INPUT_PATH)\n parameters = read(os.path.join(INPUT_PATH, 'scoring_parameters.csv'))\n\n if method == 'predefined_parameters':\n parameters = read(r'./model/trait/auxiliary_data/scoring_parameters.csv')\n\n score, label = logistic_trait_scores_and_label(features_df, parameters, method)\n write(os.path.join(INPUT_PATH, 'item_and_trait_scores.csv'), score)\n write(os.path.join(INPUT_PATH, 'item_and_trait_labels.csv'),\n label)", "def test_results_with_constant_sample_weights(strategy: str) -> None:\n n_samples = len(X_toy)\n mapie0 = MapieClassifier(**STRATEGIES[strategy])\n mapie1 = MapieClassifier(**STRATEGIES[strategy])\n mapie2 = MapieClassifier(**STRATEGIES[strategy])\n mapie0.fit(X_toy, y_toy, sample_weight=None)\n mapie1.fit(X_toy, y_toy, sample_weight=np.ones(shape=n_samples))\n mapie2.fit(X_toy, y_toy, sample_weight=np.ones(shape=n_samples)*5)\n y_pred0, y_ps0 = mapie0.predict(X_toy, alpha=0.2)\n y_pred1, y_ps1 = mapie1.predict(X_toy, alpha=0.2)\n y_pred2, y_ps2 = mapie2.predict(X_toy, alpha=0.2)\n np.testing.assert_allclose(y_pred0, y_pred1)\n np.testing.assert_allclose(y_pred0, y_pred2)\n np.testing.assert_allclose(y_ps0, y_ps1)\n np.testing.assert_allclose(y_ps0, y_ps2)", "def sample(params):\n\n config = {}\n\n for param, value in params.items():\n if hasattr(value, 'rvs'):\n # this is a scipy.stats distribution\n config[param] = value.rvs()\n else:\n # this is a tuple\n config[param] = random.choice(value)\n\n return config", "def score_grid():\r\n\t\r\n\tp = 'results\\\\mnist_filter'\r\n\t(tr_x, tr_y), (te_x, te_y) = load_mnist()\r\n\t\r\n\t# Get the SPs\r\n\tsps = [load(os.path.join(p, sp)) for sp in os.listdir(p) if sp[2] == '0']\r\n\tsp2 = load(os.path.join(p, 'sp1-0.pkl'))\r\n\t\r\n\tnwindows = 26 ** 2\r\n\tnfeat = 100 * nwindows\r\n\t\r\n\t# w = [sp2.p[sp2.syn_map == j] for j in xrange(nfeat)]\r\n\t# ms = max(wi.shape[0] for wi in w)\r\n\t# with open(os.path.join(p, 'data.pkl'), 'wb') as f:\r\n\t\t# cPickle.dump((w, ms), f, cPickle.HIGHEST_PROTOCOL)\r\n\twith open(os.path.join(p, 'data.pkl'), 'rb') as f:\r\n\t\tw, ms = cPickle.load(f)\r\n\t\r\n\t# Get training data\r\n\ttr_x2 = np.zeros((tr_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(tr_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\ttr_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Get testing data\r\n\tte_x2 = np.zeros((te_x.shape[0], nfeat))\r\n\tfor i, x in enumerate(te_x):\r\n\t\tnx = extract_patches_2d(x.reshape(28, 28), (3, 3)).reshape(\r\n\t\t\tnwindows, 9)\r\n\t\tx = np.array(np.zeros(nfeat), dtype='bool')\r\n\t\tfor j, (xi, sp) in enumerate(izip(nx, sps)):\r\n\t\t\tsp.step(xi)\r\n\t\t\tx[j*100:(j*100)+100] = sp.y[:, 0]\r\n\t\t\r\n\t\ty = sp2.p * x[sp2.syn_map]\r\n\t\tw = np.zeros((nfeat, ms))\r\n\t\tfor j in xrange(nfeat):\r\n\t\t\ta = y[sp2.syn_map == j]\r\n\t\t\tw[j][:a.shape[0]] = a\r\n\t\tte_x2[i] = np.mean(w, 1)\r\n\t\r\n\t# Classify\r\n\tclf = LinearSVC(random_state=123456789)\r\n\tclf.fit(tr_x2, tr_y)\r\n\tprint 'SVM Accuracy : {0:2.2f} %'.format(clf.score(te_x2, te_y) * 100)", "def _evaluate_performance__static_winners(self):\n # | - _evaluate_performance__\n\n # | - class attributes #################################################\n AL = self\n al_gen = self.al_gen\n verbose = self.verbose\n seed_ids = self.seed_ids\n acquisition_bin = self.acquisition_bin\n completed_ids = self.completed_ids\n CandidateSpace = self.CandidateSpace\n RegressionModel = self.RegressionModel\n DuplicateFinder = self.DuplicateFinder\n al_gen_dict = self.al_gen_dict\n\n stop_mode = self.stop_mode\n stop_num_generations = self.stop_num_generations\n\n index_acq_gen_dict = self.index_acq_gen_dict\n #__| #################################################################\n\n # #####################################################################\n mode = \"lowest_N\" # 'lowest_N' or 'lowest_perc'\n\n N_ids = 10\n lowest_perc = 5\n\n # Number of consecutive generations that the Nth best systems must\n # remain static\n M_gens = 3\n # #####################################################################\n\n if mode == \"lowest_perc\":\n num_candidates = CandidateSpace.FingerPrints.df_pre.shape[0]\n N_ids = int(num_candidates * (lowest_perc * 0.01))\n\n gen_keys = list(AL.al_gen_dict.keys())\n\n if len(gen_keys) > M_gens:\n latest_M_keys = gen_keys[-(M_gens + 1):]\n last_gen_key = gen_keys[-1]\n\n al_gen_dict_subset_i = dict(zip(\n latest_M_keys,\n [AL.al_gen_dict.get(i, None) for i in latest_M_keys]))\n\n indices_list = []\n iterator = enumerate(al_gen_dict_subset_i.items())\n for i_cnt, (gen_i, AL_i) in iterator:\n model_i = AL_i.model\n\n model_i = AL.add_main_Y_to_model(\n model_i, plot_dft_instead_of_pred=True)\n model_i = model_i[(model_i[\"duplicate\"] == False)]\n model_i = model_i.sort_values(\"Y_main\")\n\n indices_i = model_i.index.tolist()\n\n indices_list.append(indices_i)\n\n if i_cnt >= M_gens:\n indices_i = indices_list[i_cnt][0:N_ids]\n ids_static_list = []\n for j in range(M_gens):\n indices_j = indices_list[i_cnt - (j + 1)][0:N_ids]\n ids_static = indices_j == indices_i\n ids_static_list.append(ids_static)\n\n ids_are_static = all(ids_static_list)\n\n self.performance__static_winners[last_gen_key] = ids_are_static\n #__|", "def score_samples(self, x):\n raise NotImplementedError", "def tune_params(classifier, grid, prior, X_train, y_train, save_folder, n_iter=100):\n scoring = ['f1', 'precision', 'recall']\n # Stratified 5 fold cross validation\n res = RandomizedSearchCV(classifier, grid, refit=False, n_iter=n_iter,\n cv=UndersampleStratifiedKFold(n_splits=5, prior=prior),\n scoring=scoring, verbose=3, n_jobs=-1)\n res.fit(X_train, y_train)\n\n df = pd.concat([pd.DataFrame(res.cv_results_[\"params\"]),\n pd.DataFrame(res.cv_results_[\"mean_test_precision\"], columns=[\"prec\"]),\n pd.DataFrame(res.cv_results_[\"mean_test_f1\"], columns=[\"f1\"]),\n pd.DataFrame(res.cv_results_[\"mean_test_recall\"], columns=[\"recall\"])], axis=1)\n\n df.to_csv(save_folder)\n display(df)\n return df", "def __call__(self, hyperparameters: dict) -> dict:\n result = self.perturb(hyperparameters)\n\n if random.random() < self.resample_probability:\n result = self.resample(result)\n\n return result", "def tpe_sampler_search(feature_matrix, x_train, y_train, x_val, y_val, config, data_path):\n\n # Init sampler and n_trials\n sampler = optuna.samplers.TPESampler()\n n_trials = config['number_of_trials']\n # Create study\n study = optuna.create_study(sampler=sampler, direction='maximize')\n # Disable output\n optuna.logging.disable_default_handler()\n # Optimize\n study.optimize(lambda trial: objective(trial, feature_matrix, x_train, y_train,\n x_val, y_val, config, data_path),\n n_trials=n_trials)\n # Init model with best parameters\n print(\"Best trial: \", study.best_trial.number)\n print(\"Best parameters: \", study.best_params)\n # Load the best model.\n with open(data_path + 'interim/trial_{}.pickle'.format(study.best_trial.number), 'rb') as f:\n clf = pickle.load(f)\n # Delete all trials\n for trial_num in range(config['number_of_trials']):\n if os.path.exists(data_path + 'interim/trial_{}.pickle'.format(trial_num)):\n os.remove(data_path + 'interim/trial_{}.pickle'.format(trial_num))\n print(\"***Train***\")\n output_report(x_train, y_train, clf)\n print(\"***Validation***\")\n output_report(x_val, y_val, clf)\n # Remove keys from dict\n best_params_model = remove_keys_from_dict(study.best_params, keys=['ratio', 'sampling_strategy'])\n best_clf = model_init(feature_matrix, best_params_model, config)\n return best_clf, study.trials_dataframe()", "def derive_sample_params(self, global_state):\n return self._numerator.derive_sample_params(global_state.sum_state)", "def tgt_samples(self, params):\r\n def save_json(save_path, file_id, samples):\r\n init_logger()\r\n for i, sample in enumerate(samples):\r\n save_ = os.path.join(save_path, \"{:s}_{:d}.json\".format(file_id, i))\r\n with open(save_, 'w') as file:\r\n json.dump(sample, file)\r\n logger.info(\"{:s} saved at {:s}\".format(save_, save_path))\r\n\r\n\r\n json_file, save_path = params\r\n init_logger()\r\n _, tgt = self.load_json(json_file)\r\n\r\n file_id = json_file.split(\"/\")[-1].split(\".\")[0]\r\n if len(tgt) >= self.args.min_sents_num and len(tgt) <= self.args.max_sents_num:\r\n tgt_ = list(tgt)\r\n random.seed(66)\r\n random.shuffle(tgt_)\r\n\r\n # make sentence pair and write in a single file\r\n positive_sents = tgt\r\n positive_pairs = [(positive_sents[i], positive_sents[i+1]) for i in range(len(positive_sents)-1)]\r\n\r\n negative_sents = tgt_\r\n negative_pairs = [(negative_sents[i], negative_sents[i+1]) for i in range(len(negative_sents)-1)]\r\n\r\n positive_samples = [{\"tgt\": pair, \"coherence\": 0} for pair in positive_pairs] # 0 represents coherent\r\n negative_samples = [{\"tgt\": pair, \"coherence\": 1} for pair in negative_pairs] # 1 represents incoherent\r\n\r\n save_json(save_path, file_id, positive_samples)\r\n save_json(save_path, file_id+\"_r\", negative_samples)", "def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted", "def gethist(self, *args, **kwargs):\n variables, selection, issingle = unwrap_gethist_args(*args)\n verbosity = LOG.getverbosity(kwargs)\n name = kwargs.get('name', self.name )\n name += kwargs.get('tag', \"\" )\n title = kwargs.get('title', self.title )\n parallel = kwargs.get('parallel', False )\n kwargs['cuts'] = joincuts(kwargs.get('cuts'), self.cuts )\n kwargs['weight'] = joinweights(kwargs.get('weight', \"\"), self.weight ) # pass weight down\n kwargs['scale'] = kwargs.get('scale', 1.0) * self.scale * self.norm # pass scale down\n \n # HISTOGRAMS\n allhists = [ ]\n garbage = [ ]\n hargs = (variables, selection)\n hkwargs = kwargs.copy()\n if parallel and len(self.samples)>1:\n hkwargs['parallel'] = False\n processor = MultiProcessor()\n for sample in self.samples:\n processor.start(sample.gethist,hargs,hkwargs,name=sample.title) \n for process in processor:\n allhists.append(process.join())\n else:\n for sample in self.samples:\n if 'name' in kwargs: # prevent memory leaks\n hkwargs['name'] = makehistname(kwargs.get('name',\"\"),sample.name)\n allhists.append(sample.gethist(*hargs,**hkwargs))\n \n # SUM\n sumhists = [ ]\n if any(len(subhists)<len(variables) for subhists in allhists):\n LOG.error(\"MergedSample.gethist: len(subhists) = %s < %s = len(variables)\"%(len(subhists),len(variables)))\n for ivar, variable in enumerate(variables):\n subhists = [subhists[ivar] for subhists in allhists]\n sumhist = None\n for subhist in subhists:\n if sumhist==None:\n sumhist = subhist.Clone(\"%s_%s\"%(variable.filename,name))\n sumhist.SetTitle(title)\n sumhist.SetDirectory(0)\n sumhist.SetLineColor(self.linecolor)\n sumhist.SetFillColor(self.fillcolor)\n sumhist.SetMarkerColor(self.fillcolor)\n sumhists.append(sumhist)\n else:\n sumhist.Add(subhist) \n if verbosity>=4:\n printhist(sumhist,pre=\">>> \")\n deletehist(subhists)\n \n # PRINT\n if verbosity>=2:\n nentries, integral = -1, -1\n for sumhist in sumhists:\n if sumhist.GetEntries()>nentries:\n nentries = sumhist.GetEntries()\n integral = sumhist.Integral()\n print \">>>\\n>>> MergedSample.gethist - %s\"%(color(name,color=\"grey\"))\n print \">>> entries: %d (%.2f integral)\"%(nentries,integral)\n \n if issingle:\n return sumhists[0]\n return sumhists", "def get_candidates(self, sess, avg1, avg2, batch_size=512, swap_score=False):\n all_scores = []\n all_targets = []\n for i in range(0, self.max_dict_size, batch_size):\n src_ids = [x for x in range(i, min(i + batch_size, self.max_dict_size))]\n dict_dict = {self.src_ph: src_ids, self.tgt_ph: self.tgt_ids}\n if swap_score:\n temp_score = sess.run(self.csls_subgraphs[\"ScoreG_T2S\"], feed_dict=dict_dict)\n else:\n temp_score = sess.run(self.csls_subgraphs[\"ScoreGraph\"], feed_dict=dict_dict)\n batch_score = 2 * temp_score - (avg1[src_ids][:, None] + avg2[None, :])\n top_matches = sess.run(\n self.csls_subgraphs[\"Top2\"], feed_dict={self.score_ph: batch_score}\n )\n all_scores.append(top_matches[0])\n all_targets.append(top_matches[1])\n all_scores = np.concatenate(all_scores)\n all_targets = np.concatenate(all_targets)\n all_pairs = np.concatenate(\n [np.arange(0, self.max_dict_size, dtype=np.int64)[:, None], all_targets[:, 0][:, None]],\n 1,\n )\n\n # Scores with high confidence will have large difference between first two guesses\n diff = all_scores[:, 0] - all_scores[:, 1]\n reordered = np.argsort(diff, axis=0)\n reordered = reordered[::-1]\n all_pairs = all_pairs[reordered]\n\n # Select words which are in top max_dict\n selected = np.max(all_pairs, axis=1) <= self.max_dict_size\n all_pairs = all_pairs[selected]\n\n # Make sure size is less than max_dict\n all_pairs = all_pairs[: self.max_dict_size]\n return all_pairs", "def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info", "def train_self(self):\n # for each numeric column, we need to record mean and std for both classes\n for col in self.num_cols:\n self.prob_hub[col] = {}\n for claz in self.class_list:\n mean, std = get_mean_std(self.data[self.data[self.class_column] == claz][col])\n self.prob_hub[col][claz] = (mean, std)\n\n # for each categorical columns, we need to record P(X=x|Y=y)\n for col in self.cat_cols:\n ulist = unique_list(self.data[col])\n self.prob_hub[col] = {}\n stat = self.data.groupby(self.class_column)[col].value_counts() / self.data.groupby(self.class_column)[col].count()\n # for each class\n for claz in self.class_list:\n self.prob_hub[col][claz] = {}\n for uni_element in ulist:\n self.prob_hub[col][claz][uni_element] = stat[claz][uni_element]\n\n self.predict(self.data, True)", "def api_output(self):\n\n params = ParamSet()\n\n if self.sampler in ['random', 'lowdiscrepancy']:\n params.add_integer('pixelsamples', self.pixelsamples)\n params.add_string('pixelsampler', self.pixelsampler)\n\n # if self.sampler == 'metropolis':\n # params.add_bool('adaptive_largemutationprob', self.adaptive_largemutationprob)\n # if not self.adaptive_largemutationprob:\n # params.add_float('largemutationprob', self.largemutationprob)\n # params.add_bool('usecooldown', self.usecooldown)\n\n if self.sampler == 'metropolis':\n params.add_float('largemutationprob', self.largemutationprob)\n\n params.add_bool('noiseaware', self.noiseaware)\n\n if self.advanced:\n if self.sampler == 'metropolis':\n params.add_integer('maxconsecrejects', self.maxconsecrejects)\n params.add_bool('usecooldown', self.usecooldown)\n\n if self.usersamplingmap_filename:\n params.add_string('usersamplingmap_filename', self.usersamplingmap_filename)\n\n return self.sampler, params", "def tune_parameters(self, parameters, search_alg, num_trials=5, metric=\"f1\", direction=\"maximize\", train_ratio=0.7, num_times=1, export_metrics=True):\n self._clear_cache()\n model_id = self.model_id\n if self.comet_key != None:\n exp = init_experiment(self.comet_key, \"model-performance\", \"covid-vaccine\")\n exp.log_parameters({\n \"model_id\":model_id,\n \"model_type\":self.embedding_type,\n \"multiclass\":self.class_label,\n \"train_ratio\":train_ratio,\n \"num_samples\":num_trials,\n \"metric\":metric,\n \"direction\":direction,\n \"search_alg\":search_alg\n })\n log_fixed_params(parameters, exp)\n exp.add_tag(\"multi\" if self.class_label == \"Multiclass\" else \"binary\")\n start = time.time()\n tr_text, tr_label, self.tr_meta, te_text, te_label, self.te_meta, _ = get_train_test_data(self.seed_fp, self.label_fp, train_ratio=train_ratio, meta_cols=self.meta_cols, drop_irrelevant=self.drop_irrelevant, visualize=False, verbose=self.verbose)\n self._transform_labels(tr_label, te_label)\n print(\"data loading:\", time.time() - start, \"seconds\\n\")\n start = time.time()\n self._prepare_feature_components(tr_text, te_text, parameters)\n print(\"total preprocessing:\", time.time() - start, \"seconds\\n\")\n metric_df_parts = []\n def objective(trial):\n config = suggest_config(parameters, trial)\n instances = []\n for _ in range(num_times):\n instance_df = self._run_single_config(train_ratio, config)\n instance_df = instance_df[instance_df[\"part\"] == \"test\"]\n instances.append(instance_df)\n tmp_df = pd.concat(instances, axis=0)\n print(\"metrics 1\", tmp_df.shape)\n group_cols = list(tmp_df.drop(\"score\", axis=1).columns)\n print(group_cols)\n tmp_df = tmp_df.groupby(group_cols)[\"score\"].agg([\"mean\",\"std\"]).reset_index()\n print(\"metrics 2\", tmp_df.shape)\n metric_df_parts.append(tmp_df)\n metrics = dict(zip(tmp_df[\"metric\"],tmp_df[\"mean\"]))\n return metrics[metric]\n if search_alg == \"GRID\":\n algo = GridSampler(extract_grid(parameters))\n elif search_alg == \"RND\":\n algo = RandomSampler()\n elif search_alg == \"TPE\":\n algo = TPESampler(n_startup_trials=int(num_trials*0.3))\n else:#default optuna setting\n algo = None\n study = optuna.create_study(direction=\"maximize\", sampler=algo)\n study.optimize(objective, n_trials=num_trials, n_jobs=1)\n metrics_df = pd.concat(metric_df_parts)\n best_config = study.best_params\n print(\"Best config: \", best_config)\n if export_metrics:\n result_dir = os.path.join(self.model_dir, \"results\")\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n study_fp = os.path.join(result_dir, \"%s.pkl\" % model_id)\n print(\"Study file:\", study_fp)\n joblib.dump(study, study_fp)\n result_fp = os.path.join(result_dir, \"%s.csv\" % model_id)\n print(\"Output file:\", result_fp)\n metrics_df.to_csv(result_fp, index=False)\n if self.comet_key != None:\n exp.log_parameters(best_config)\n exp.log_metrics({\n \"train_size\":len(tr_text),\n \"test_size\":len(te_text)\n })\n best_results = dict(metrics_df.groupby(\"metric\")[\"mean\"].max()[[\"f1\",\"acc\",\"auc\"]])\n exp.log_metrics(best_results)\n exp.end()\n return best_config", "def posterior_sample(self):\n pass", "def __init__(self, model_list, sat_features=None):\n self.model_list = model_list\n self.scores = {'kNN': [], 'Kriging': [], 'RmSense': [], 'Ensamble': []}\n self.results = {'kNN': [], 'Kriging': [], 'RmSense': [], 'Ensamble': []}\n self.kNN = None\n self.Kriging = None\n self.RmSense = None\n self.sat_features = sat_features", "def learn(self):\n metrics_hist = dict()\n max_runs = 3\n for run in range(max_runs):\n all_indices, initial_indices = self._init_al_dataset()\n\n metrics_hist[str(run)] = dict()\n\n current_indices = list(initial_indices)\n \n for split in self.data_splits_frac:\n print(f'\\nRUN {run} - SPLIT - {split*100:0.0f}%')\n\n # Initialise models\n self._init_models(mode='svaal')\n\n # Do some label stuff\n unlabelled_indices = np.setdiff1d(list(all_indices), current_indices)\n unlabelled_sampler = data.sampler.SubsetRandomSampler(unlabelled_indices)\n unlabelled_dataloader = data.DataLoader(self.datasets['train'],\n sampler=unlabelled_sampler,\n batch_size=64,\n drop_last=False)\n\n print(f'Labelled: {len(current_indices)} Unlabelled: {len(unlabelled_indices)} Total: {len(all_indices)}')\n\n # TODO: Make the SVAAL allow 100% labelled and 0% unlabelled to pass through it. Breaking out of loop for now when data hits 100% labelled.\n if len(unlabelled_indices) == 0:\n break\n\n metrics, svae, discriminator = self.train(dataloader_l=self.labelled_dataloader,\n dataloader_u=unlabelled_dataloader,\n dataloader_v=self.val_dataloader,\n dataloader_t=self.test_dataloader,\n mode='svaal') \n print(f'Test Eval.: F1 Scores - Macro {metrics[0]*100:0.2f}% Micro {metrics[1]*100:0.2f}%') \n \n # Record performance at each split\n metrics_hist[str(run)][str(split)] = metrics\n\n \n sampled_indices = self.sample_adversarial(svae, discriminator, unlabelled_dataloader, indices=unlabelled_indices, cuda=True) # TODO: review usage of indices arg\n current_indices = list(current_indices) + list(sampled_indices)\n sampler = data.sampler.SubsetRandomSampler(current_indices)\n self.labelled_dataloader = data.DataLoader(self.datasets['train'], sampler=sampler, batch_size=self.batch_size, drop_last=True)\n \n # write results to disk\n with open('results.json', 'w') as fj:\n json.dump(metrics_hist, fj, indent=4)", "def sample(self, existing_results, num_samples):\n new_samples = set()\n existing_samples = list(existing_results.keys())\n existing_samples.sort()\n\n if self.max is not None and self.max not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.max)\n elif self.max is None and len(new_samples) < num_samples:\n new_samples.add(max(existing_samples) * 2)\n\n if self.min is not None and self.min not in existing_results and len(new_samples) < num_samples:\n new_samples.add(self.min)\n elif self.min is None and len(new_samples) < num_samples:\n new_samples.add(min(existing_samples) * 2)\n\n if (self.max is not None and self.min is not None and (self.max - self.min) / 2 not in existing_results and\n len(new_samples) < num_samples):\n new_samples.add(0.5 * (self.max - self.min))\n\n if len(existing_results) > 2 and len(new_samples) < num_samples:\n gradients = [(existing_results[existing_samples[i]] - existing_results[existing_samples[i-1]]) /\n (existing_samples[i] - existing_samples[i-1]) for i in range(1, len(existing_samples))]\n\n candidate_samples = []\n for i in range(1, len(existing_samples)):\n candidate_sample = 0.5 * (existing_samples[i] - existing_samples[i-1])\n gradient = gradients[i-1]\n if i > 2:\n score +=\n\n # Sort the candidate samples by score\n candidate_samples.sort(key=operator.itemgetter(1), reverse=True)\n for i in range(0, min(len(candidate_samples), ))\n\n return new_samples", "def load_or_init(self) -> None:\r\n if os.path.exists(os.path.join(self.settings.save_path, 'current.json')):\r\n self.load()\r\n return\r\n\r\n self.generation = 0\r\n _approach = self.problem.approaches.sample(None, 1)[0]\r\n self.approach = (self.problem.approaches.values.index(_approach), _approach)\r\n del _approach\r\n\r\n self.approach_params = dict()\r\n\r\n self.logger.debug('Initializing generation 0: Approach: %s', str(self.approach[1]))\r\n\r\n longest_name = max(len(i.name) for i in chain(self.approach[1], self.problem.sensitives))\r\n\r\n for cat in self.approach[1].categoricals:\r\n val = cat.sample(None, 1)[0]\r\n self.approach_params[cat.name] = val\r\n self.logger.debug(' %s: %s', cat.name.rjust(longest_name), str(val))\r\n for i in self.approach[1].integrals:\r\n val = np.random.randint(i.domain[0], i.domain[1] + 1)\r\n self.approach_params[i.name] = val\r\n self.logger.debug(' %s: %s', i.name.rjust(longest_name), str(val))\r\n for cont in self.approach[1].continuous:\r\n val = np.random.uniform(cont.domain[0], cont.domain[1])\r\n self.approach_params[cont.name] = val\r\n self.logger.debug(' %s: %s', cont.name.rjust(longest_name), str(val))\r\n\r\n self.logger.debug('')\r\n\r\n self.sensitive_params = dict()\r\n for cat in self.problem.sensitives.categoricals:\r\n val = cat.sample(None, 1)[0]\r\n self.sensitive_params[cat.name] = val\r\n self.logger.debug(' %s: %s', cat.name.rjust(longest_name), str(val))\r\n for i in self.problem.sensitives.integrals:\r\n val = np.random.randint(i.domain[0], i.domain[1] + 1)\r\n self.sensitive_params[i.name] = val\r\n self.logger.debug(' %s: %s', i.name.rjust(longest_name), str(val))\r\n for cont in self.problem.sensitives.continuous:\r\n val = np.random.uniform(cont.domain[0], cont.domain[1])\r\n self.sensitive_params[cont.name] = val\r\n self.logger.debug(' %s: %s', cont.name.rjust(longest_name), str(val))\r\n\r\n self.save()", "def pred_hyper(self, xprime=None):\n\t\tif xprime != None:\n\t\t\tself.x_test = xprime\n\t\t\n\t\tsamples_val = []\n\t\tC_valid = []\n\t\tmu_val = []\n\t\tmeans_val = []\n\t\tstd2s_val = []\n\t\t\n\t\tdivby = self.samples.shape[0]\n\t\t\n\t\t\n\t\tfor i in xrange(self.samples.shape[0]):\n\t\t\tself.ph.setGpHypers(self.samples[i])\n\t\t\tpre = self.ph.predict_asy(xprime)\n\t\t\t#print 'pre: ', pre\n\t\t\tif pre!=None:\n\t\t\t\tmean_one, std2_one, C, mu = pre\n\t\t\t\t#print 'mean_one: ', mean_one\n\t\t\t\tmeans_val.append(mean_one.flatten())\n\t\t\t\t#print 'means_val: ', means_val\n\t\t\t\tstd2s_val.append(std2_one.flatten())\n\t\t\t\tC_valid.append(C)\n\t\t\t\tmu_val.append(mu)\n\t\t\t\tsamples_val.append(self.samples[i])\n\t\t\telse:\n\t\t\t\tdivby -= 1 \n\t\t\t\t#print 'bad: ', divby\n\t\t\n\t\tmean_temp = np.zeros((divby, xprime.shape[0]))\n\t\tstd2_temp = np.zeros((divby, xprime.shape[0]))\n\t\t\n\t\tif(divby < self.samples.shape[0]):\n\t\t\tself.C_samples = np.zeros((divby, self.C_samples.shape[1], self.C_samples.shape[2]))\n\t\t\tself.mu_samples = np.zeros((divby, self.mu_samples.shape[1], self.mu_samples.shape[2]))\n\t\t\tself.samples = np.zeros((divby, self.samples.shape[1]))\n\t\t\n\t\t\n\t\tfor j in xrange(divby):\n\t\t\tmean_temp[j,:] = means_val[j]\n\t\t\tstd2_temp[j,:] = std2s_val[j]\n\t\t\tself.C_samples[j, ::] = C_valid[j]\n\t\t\tself.mu_samples[j, ::] = mu_val[j]\n\t\t\tself.samples[j, ::] = samples_val[j]\n\t\t\n\n\t\t\n\n\t\tmean = np.mean(mean_temp, axis=0)\n\t\tstd2 = np.mean(std2_temp, axis=0) + np.mean(mean_temp**2, axis=0)\n\t\tstd2 -= mean**2\n\t\t\n\t\tself.activated = True\n\t\tself.asy_mean = mean\n\t\t\n\t\treturn mean, std2, divby", "def _generate_results(self, clf, para_key, value):\n for att in dir(clf):\n if not att.startswith('_'):\n if att == 'X' or att == 'Y' or att == 'W':\n continue\n else:\n if 'param_' + att in self.results.keys():\n if att == para_key:\n self.results['param_' + att].append(value)\n else:\n self.results['param_' + att].append(getattr(clf, att))\n else:\n if att == para_key:\n self.results['param_' + att] = [value]\n else:\n self.results['param_' + att] = [getattr(clf, att)]", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def test_results_with_constant_sample_weights(strategy: str) -> None:\n n_samples = len(X)\n mapie0 = MapieRegressor(**STRATEGIES[strategy])\n mapie1 = MapieRegressor(**STRATEGIES[strategy])\n mapie2 = MapieRegressor(**STRATEGIES[strategy])\n mapie0.fit(X, y, sample_weight=None)\n mapie1.fit(X, y, sample_weight=np.ones(shape=n_samples))\n mapie2.fit(X, y, sample_weight=np.ones(shape=n_samples)*5)\n y_pred0, y_pis0 = mapie0.predict(X, alpha=0.05)\n y_pred1, y_pis1 = mapie1.predict(X, alpha=0.05)\n y_pred2, y_pis2 = mapie2.predict(X, alpha=0.05)\n np.testing.assert_allclose(y_pred0, y_pred1)\n np.testing.assert_allclose(y_pred1, y_pred2)\n np.testing.assert_allclose(y_pis0, y_pis1)\n np.testing.assert_allclose(y_pis1, y_pis2)", "def _fit_and_score_multimodal(estimator, X, modality, y, scorer, train, test, verbose,\n parameters, return_train_score=False,\n return_parameters=False, return_n_test_samples=False,\n return_times=False, default_parameters=None):\n if verbose > 1 and modality != 'all':\n if parameters is None:\n msg = ''\n else:\n msg = '%s' % (', '.join('%s=%s' % (k, v)\n for k, v in parameters.items()))\n print(\"[CV] %s %s\" % (msg, (64 - len(msg)) * '.'))\n\n y = np.array(y)\n\n # Adjust length of sample weights\n # fit_params = fit_params if fit_params is not None else {}\n # fit_params = dict([(k, _index_param_value(X, v, train))\n # for k, v in fit_params.items()])\n\n train_scores = {}\n\n clfind = [v[0] for v in estimator.steps].index('clf')\n if modality == 'all':\n for k in estimator.steps[clfind][1].base_estimators.keys():\n estimator.steps[clfind][1].base_estimators[k].set_params(**parameters[k])\n elif parameters is not None:\n for k in estimator.steps[clfind][1].base_estimators.keys():\n if k == modality:\n estimator.steps[clfind][1].base_estimators[k].set_params(**parameters)\n else:\n estimator.steps[clfind][1].base_estimators[k].set_params(**default_parameters)\n\n\n start_time = time.time()\n\n y_train = y[train]\n y_test = y[test]\n\n X_train = OrderedDict({k: np.array(x)[train] for k, x in X.items()})\n X_test = OrderedDict({k: np.array(x)[test] for k, x in X.items()})\n # X_train, X_test = dict(), dict()\n # for k, X_ in X.items():\n # x_train, x_test = X_[train], X_[test]\n # valid_train = [i for i, x in enumerate(x_train) if ~np.any(np.isnan(x))]\n # X_train[k] = [x for i, x in enumerate(x_train) if i in valid_train]\n # valid_test = [i for i, x in enumerate(x_test) if ~np.any(np.isnan(x))]\n # X_test[k] = [x for i, x in enumerate(x_test) if i in valid_test]\n\n is_multimetric = not callable(scorer)\n\n if y_train is None:\n # estimator.fit(X_train, **fit_params)\n estimator.fit(X_train)\n else:\n # estimator.fit(X_train, y_train, **fit_params)\n estimator.fit(X_train, y_train)\n\n fit_time = time.time() - start_time\n # _score will return dict if is_multimetric is True\n test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)\n score_time = time.time() - start_time - fit_time\n if return_train_score:\n train_scores = _score(estimator, X_train, y_train, scorer,\n is_multimetric)\n\n if verbose > 2:\n if is_multimetric:\n for scorer_name, score in test_scores.items():\n msg += \", %s=%s\" % (scorer_name, score)\n else:\n msg += \", score=%s\" % test_scores\n if verbose > 1:\n total_time = score_time + fit_time\n end_msg = \"%s, total=%s\" % (msg, logger.short_format_time(total_time))\n print(\"[CV] %s %s\" % ((64 - len(end_msg)) * '.', end_msg))\n\n ret = [train_scores, test_scores] if return_train_score else [test_scores]\n\n if return_n_test_samples:\n ret.append(_num_samples(X_test))\n if return_times:\n ret.extend([fit_time, score_time])\n if return_parameters:\n ret.append(parameters)\n return ret", "def init_params():\n p = {}\n \n # p['rootFolder'] = 'C:/Users/Umberto Gostoli/SPHSU/Social Care Model II'\n # p['rootFolder'] = 'N:/Social Care Model Paper III'\n \n p['noPolicySim'] = False\n p['multiprocessing'] = True\n p['numberProcessors'] = 9\n p['numRepeats'] = 3\n \n p['startYear'] = 1860\n p['endYear'] = 2040\n p['thePresent'] = 2012\n p['statsCollectFrom'] = 1990\n p['regressionCollectFrom'] = 1960 \n p['implementPoliciesFromYear'] = 2020\n p['yearOutcome'] = 2015\n \n p['favouriteSeed'] = 123\n p['loadFromFile'] = False\n p['verboseDebugging'] = False\n p['singleRunGraphs'] = False\n p['saveChecks'] = True\n p['getCheckVariablesAtYear'] = 2015\n # To change through command-line arguments\n\n p['numberPolicyParameters'] = 2\n p['valuesPerParam'] = 1\n p['numberScenarios'] = 3\n \n ############ Policy Parameters #######################\n p['incomeCareParam'] = 0.0005 #[0.00025 - 0.001]\n p['taxBreakRate'] = 0.0\n p['ageOfRetirement'] = 65\n p['socialSupportLevel'] = 5\n # p['educationCosts']\n #############################################################\n p['socialCareCreditShare'] = 0.0\n p['maxWtWChildAge'] = 5\n # The basics: starting population and year, etc.\n \n p['discountingFactor'] = 0.03\n \n \n p['initialPop'] = 600 \n \n p['minStartAge'] = 24\n p['maxStartAge'] = 45\n p['numberClasses'] = 5\n p['socialClasses'] = ['unskilled', 'skilled', 'lower', 'middle', 'upper']\n p['initialClassShares'] = [0.2, 0.25, 0.3, 0.2, 0.05]\n p['initialUnemployment'] = [0.25, 0.2, 0.15, 0.1, 0.1]\n p['unemploymentAgeBandParam'] = 0.3\n \n # doDeath function parameters\n p['mortalityBias'] = 0.85 # After 1950\n p['careNeedBias'] = 0.9\n p['unmetCareNeedBias'] = 0.5\n p['baseDieProb'] = 0.0001\n p['babyDieProb'] = 0.005\n p['maleAgeScaling'] = 14.0\n p['maleAgeDieProb'] = 0.00021\n p['femaleAgeScaling'] = 15.5\n p['femaleAgeDieProb'] = 0.00019\n \n p['orphansRelocationParam'] = 0.5\n \n # doBirths function parameters\n p['minPregnancyAge'] = 17\n p['maxPregnancyAge'] = 42\n p['growingPopBirthProb'] = 0.215\n p['fertilityCorrector'] = 1.0\n p['fertilityBias'] = 0.9\n \n # careTransitions function parameters\n p['zeroYearCare'] = 80.0\n p['childcareDecreaseRate'] = 0.25\n p['personCareProb'] = 0.0008\n p['maleAgeCareScaling'] = 18.0 # p['maleAgeCareProb'] = 0.0008\n p['femaleAgeCareScaling'] = 19.0 # p['femaleAgeCareProb'] = 0.0008\n p['baseCareProb'] = 0.0002\n p['careBias'] = 0.9\n p['careTransitionRate'] = 0.7\n\n p['unmetNeedExponent'] = 1.0 # 0.005 #[0.005 - 0.02]\n \n p['numCareLevels'] = 5\n p['careLevelNames'] = ['none','low','moderate','substantial','critical']\n p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 32.0, 80.0 ]\n p['quantumCare'] = 4.0\n \n # careSupplies getCare and probSuppliers function parameters\n \n ######## Key parameter 1 ##############\n \n \n p['weeklyHours'] = 40.0\n \n \n p['priceChildCare'] = 0.76 # 6 \n p['schoolAge'] = 5\n p['maxFormalChildcareHours'] = 48\n p['schoolHours'] = 30\n p['freeChildcareHours'] = 15\n p['workingParentsFreeChildcareHours'] = 30\n p['minAgeStartChildCareSupport'] = 3\n p['minAgeStartChildCareSupportByIncome'] = 2\n p['maxHouseholdIncomeChildCareSupport'] = 40 # 320\n \n ######## Key parameter 2 ##############\n # 5: No public supply \n \n p['retiredHours'] = [48.0, 36.0, 20.0, 10.0] # 60.0\n p['studentHours'] = [24.0, 16.0, 8.0, 4.0]\n p['teenAgersHours'] = [16.0, 0.0, 0.0, 0.0]\n p['unemployedHours'] = [32.0, 24.0, 16.0, 8.0]\n p['employedHours'] = [28.0, 20.0, 12.0, 8.0]\n p['formalCareDiscountFactor'] = 0.5\n \n p['socialNetworkDistances'] = [0.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0]\n p['networkDistanceParam'] = 2.0\n p['socialCareWeightBias'] = 1.0\n p['unmetCareNeedDiscountParam'] = 0.5\n p['shareUnmetNeedDiscountParam'] = 0.5\n # p['pastShareUnmetNeedWeight'] = 0.5\n \n \n \n p['networkSizeParam'] = 10.0 # 1.0\n \n p['careSupplyBias'] = 0.5\n p['careIncomeParam'] = 0.001\n \n # Hospitalization Costs\n p['qalyBeta'] = 0.18\n p['qalyAlpha'] = 1.5\n p['qalyDiscountRate'] = 0.035\n p['qalyIndexes'] = [1.0, 0.8, 0.6, 0.4, 0.2]\n p['unmetCareHealthParam'] = 0.1\n p['hospitalizationParam'] = 0.5\n p['needLevelParam'] = 2.0\n p['unmetSocialCareParam'] = 2.0\n p['costHospitalizationPerDay'] = 400\n \n # ageTransitions, enterWorkForce and marketWage functions parameters\n p['ageTeenagers'] = 12\n p['minWorkingAge'] = 16\n \n ######## Key parameter 3 ##############\n \n p['careBankingSchemeOn'] = False\n p['socialCareBankingAge'] = 65\n \n p['absoluteCreditQuantity'] = False\n p['quantityYearlyIncrease'] = 0.0\n p['socialCareCreditQuantity'] = 0\n p['kinshipNetworkCarePropension'] = 0.5\n p['volunteersCarePropensionCoefficient'] = 0.01\n p['pensionContributionRate'] = 0.05\n \n p['hillHealthLevelThreshold'] = 3\n p['seriouslyHillSupportRate'] = 0.5\n \n ### Prices ####\n p['pricePublicSocialCare'] = 20.0 # [2.55] # 20\n p['priceSocialCare'] = 17.0 # [2.29] # 18\n p['taxBrackets'] = [663, 228, 0] # [28.16, 110.23] # [221, 865]\n p['taxBandsNumber'] = 3\n p['bandsTaxationRates'] = [0.4, 0.2, 0.0] # [0.0, 0.2, 0.4]\n # Tax Break Policy\n\n \n p['pensionWage'] = [5.0, 7.0, 10.0, 13.0, 18.0] # [0.64, 0.89, 1.27, 1.66, 2.29] # \n p['incomeInitialLevels'] = [5.0, 7.0, 9.0, 11.0, 14.0] #[0.64, 0.89, 1.15, 1.40, 1.78] # \n p['incomeFinalLevels'] = [10.0, 15.0, 22.0, 33.0, 50.0] #[1.27, 1.91, 2.80, 4.21, 6.37] # \n p['educationCosts'] = [0.0, 100.0, 150.0, 200.0] #[0.0, 12.74, 19.12, 25.49] # \n \n # Priced growth #####\n p['wageGrowthRate'] = 1.0 # 1.01338 # \n\n p['incomeGrowthRate'] = [0.4, 0.35, 0.35, 0.3, 0.25]\n \n # SES inter-generational mobility parameters\n p['leaveHomeStudentsProb'] = 0.5\n \n p['eduWageSensitivity'] = 0.2 # 0.5\n p['eduRankSensitivity'] = 3.0 # 5.0\n p['costantIncomeParam'] = 80.0 # 20.0\n p['costantEduParam'] = 10.0 # 10.0\n p['careEducationParam'] = 0.005 # 0.04\n \n \n \n # p['incEduExp'] = 0.25\n p['educationLevels'] = ['GCSE', 'A-Level', 'HND', 'Degree', 'Higher Degree']\n p['workingAge'] = [16, 18, 20, 22, 24]\n \n # doDivorce function parameters\n p['basicDivorceRate'] = 0.06\n p['variableDivorce'] = 0.06\n p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n p['divorceBias'] = 1.0\n \n # doMarriages function parameters\n p['deltageProb'] = [0.0, 0.1, 0.25, 0.4, 0.2, 0.05]\n p['incomeMarriageParam'] = 0.025\n p['studentFactorParam'] = 0.5\n ######## Key parameter 4 ##############\n p['betaGeoExp'] = 2.0 #[1.0 - 4.0]\n \n p['betaSocExp'] = 2.0\n p['rankGenderBias'] = 0.5\n p['basicMaleMarriageProb'] = 0.9\n p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0 ]\n \n # jobMarket, updateWork and unemploymentRate functions parameters\n p['unemploymentClassBias'] = 0.75\n p['unemploymentAgeBias'] = [1.0, 0.55, 0.35, 0.25, 0.2, 0.2]\n p['numberAgeBands'] = 6\n p['jobMobilitySlope'] = 0.004\n p['jobMobilityIntercept'] = 0.05\n p['ageBiasParam'] = [7.0, 3.0, 1.0, 0.5, 0.35, 0.15]\n p['deltaIncomeExp'] = 0.05\n p['unemployedCareBurdernParam'] = 0.025\n # Potential key parameter\n p['relocationCareLossExp'] = 1.0 # 40.0 # \n p['incomeSocialCostRelativeWeight'] = 0.5\n \n p['firingParam'] = 0.2\n p['wageVar'] = 0.06\n p['workDiscountingTime'] = 0.75 # 0.8\n p['sizeWeightParam'] = 0.7\n p['minClassWeightParam'] = 1.0\n p['incomeDiscountingExponent'] = 4.0\n p['discountingMultiplier'] = 2.0\n #p['incomeDiscountingParam'] = 2.0\n \n # relocationPensioners function parameters\n p['agingParentsMoveInWithKids'] = 0.1\n p['variableMoveBack'] = 0.1\n p['retiredRelocationParam'] = 0.001 # 0.005\n \n # houseMap function parameters\n p['geoDistanceSensitivityParam'] = 2.0\n p['socDistanceSensitivityParam'] = 2.0\n p['classAffinityWeight'] = 4.0\n p['distanceSensitivityParam'] = 0.5\n \n # relocationProb function parameters\n p['baseRelocatingProb'] = 0.05\n p['relocationParameter'] = 1.0 \n p['apprenticesRelocationProb'] = 0.5\n #p['expReloc'] = 1.0\n \n # computeRelocationCost and relocation Propensity functions parameters\n p['yearsInTownSensitivityParam'] = 0.5\n \n ######## Key parameter 5 ##############\n p['relocationCostParam'] = 0.5 # 1.0 \n \n ######## Key parameter 6 ##############\n p['propensityRelocationParam'] = 2.0 # 2.0 \n p['denRelocationWeight'] = 0.5\n \n \n ## Description of the map, towns, and houses\n p['mapGridXDimension'] = 8\n p['mapGridYDimension'] = 12 \n p['townGridDimension'] = 70\n p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]\n p['ukMap'] = [[ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],\n [ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],\n [ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],\n [ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ]]\n p['ukClassBias'] = [[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],\n [ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],\n [ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]\n p['mapDensityModifier'] = 0.6\n # p['numHouseClasses'] = 3\n # p['houseClasses'] = ['small','medium','large']\n \n ## Graphical interface details\n p['interactiveGraphics'] = False #True\n p['delayTime'] = 0.0\n p['screenWidth'] = 1300\n p['screenHeight'] = 700\n p['bgColour'] = 'black'\n p['mainFont'] = 'Helvetica 18'\n p['fontColour'] = 'white'\n p['dateX'] = 70\n p['dateY'] = 20\n p['popX'] = 70\n p['popY'] = 50\n p['pixelsInPopPyramid'] = 2000\n p['num5YearAgeClasses'] = 28\n p['careLevelColour'] = ['blue','green','yellow','orange','red']\n p['houseSizeColour'] = ['brown','purple','yellow']\n p['pixelsPerTown'] = 56\n p['maxTextUpdateList'] = 22\n \n # p['eduEduSensitivity'] = 0.5\n # p['mortalityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['fertilityBias'] = [1.0, 0.92, 0.84, 0.76, 0.68]\n # p['divorceBias'] = [2.0, 1.5, 1.0, 0.75, 0.5]\n\n ## Transitions to care statistics\n \n ## Availability of care statistics\n \n #p['childHours'] = 5.0\n # p['employedHours'] = 12.0\n #p['homeAdultHours'] = 30.0\n #p['workingAdultHours'] = 25.0\n #p['maxEmployedHours'] = 60.0\n \n #p['lowCareHandicap'] = 0.5\n #p['hourlyCostOfCare'] = 20.0\n \n ## Fertility statistics\n \n # p['steadyPopBirthProb'] = 0.13\n # p['transitionYear'] = 1965\n \n ## Class and employment statistics\n # p['numClasses'] = 5\n # p['occupationClasses'] = ['lower','intermediate','higher']\n # p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]\n\n ## Age transition statistics\n # p['ageOfAdulthood'] = 17\n \n ## Marriage function parameters\n \n # p['basicFemaleMarriageProb'] = 0.25\n # p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['femaleMarriageProb'] = [0.01, 0.15, 0.3, 0.2, 0.1, 0.1, 0.06, 0.05, 0.02, 0.01, 0.01, 0.005]\n # p['maleMarriageProb'] = [0.005, 0.08, 0.25, 0.25, 0.15, 0.1, 0.07, 0.05, 0.03, 0.02, 0.01, 0.005]\n \n ## Leaving home and moving around statistics\n # p['probApartWillMoveTogether'] = 0.3\n # p['coupleMovesToExistingHousehold'] = 0.3\n # p['basicProbAdultMoveOut'] = 0.22\n # p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbSingleMove'] = 0.05\n # p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]\n # p['basicProbFamilyMove'] = 0.03\n # p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]\n\n \n return p", "def analyze_similarities():\r\n print('Total number of candidate pairs:', len(pairs))\r\n print(f'\\nNumber of actual item pairs in the train set: {pairs[\"true_match\"].sum()}\\n')\r\n\r\n for feature in ['text_score', 'image_score', 'txt_img_score', 'words_ratio', 'txt_img_words']:\r\n\r\n # Check distribution of True and False predictions for various similarity scores\r\n print('-' * 50)\r\n print(f'\\nDistribution of True/False predictions for {feature}')\r\n for thr in (0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95):\r\n print('-' * 50)\r\n print(f'Similarity score over {thr}')\r\n pairs_sample = pairs[pairs[feature] >= thr]\r\n print(f'Number of similar item pairs: {len(pairs_sample)}')\r\n print(pairs_sample['true_match'].value_counts(normalize=True))\r\n\r\n # Check if identical phash can be used to improve the accuracy\r\n same_phash = pairs[pairs['phash_match'] == 1]\r\n different_phash = pairs[pairs['phash_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same phash:')\r\n print(same_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_phash))\r\n\r\n print('\\nFor item pairs with different phash:')\r\n print(different_phash['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_phash))\r\n\r\n # Check if numbers in titles can be used to improve the accuracy\r\n same_numbers = pairs[pairs['nums_match'] == 1]\r\n different_numbers = pairs[pairs['nums_match'] == 0]\r\n\r\n print('\\nFor item pairs with the same numbers:')\r\n print(same_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(same_numbers))\r\n\r\n print('\\nFor item pairs with different numbers:')\r\n print(different_numbers['true_match'].value_counts(normalize=True))\r\n print('Number of item pairs in this subset:', len(different_numbers))", "def _compute_sensitivities(self, context):\n _logger.info(\"calling _compute_sensitivities.\")\n cached_id = np.random.randint(1000)\n if self.start_epoch == context.epoch_id:\n sensitivities_file = self.sensitivities_file\n else:\n sensitivities_file = self.sensitivities_file + \".epoch\" + str(\n context.epoch_id)\n sensitivities = self._load_sensitivities(sensitivities_file)\n\n for param in context.eval_graph.all_parameters():\n if not re.match(self.pruned_params, param.name()):\n continue\n if param.name() not in sensitivities:\n sensitivities[param.name()] = {\n 'pruned_percent': [],\n 'loss': [],\n 'size': param.shape()[0]\n }\n\n metric = None\n\n for param in sensitivities.keys():\n ratio = self.delta_rate\n while ratio < 1:\n ratio = round(ratio, 2)\n if ratio in sensitivities[param]['pruned_percent']:\n _logger.debug('{}, {} has computed.'.format(param, ratio))\n ratio += self.delta_rate\n continue\n if metric is None:\n metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n\n param_backup = {}\n # prune parameter by ratio\n self._prune_parameters(\n context.eval_graph,\n context.scope, [param], [ratio],\n context.place,\n lazy=True,\n param_backup=param_backup)\n self.pruned_list[0]\n # get accuracy after pruning and update self.sensitivities\n pruned_metric = self._eval_graph(context, self.eval_rate,\n cached_id)\n loss = metric - pruned_metric\n _logger.info(\"pruned param: {}; {}; loss={}\".format(\n param, ratio, loss))\n for brother in self.pruned_list[0]:\n if re.match(self.pruned_params, brother):\n if brother not in sensitivities:\n sensitivities[brother] = {\n 'pruned_percent': [],\n 'loss': []\n }\n sensitivities[brother]['pruned_percent'].append(ratio)\n sensitivities[brother]['loss'].append(loss)\n\n self._save_sensitivities(sensitivities, sensitivities_file)\n\n # restore pruned parameters\n for param_name in param_backup.keys():\n param_t = context.scope.find_var(param_name).get_tensor()\n param_t.set(self.param_backup[param_name], context.place)\n\n# pruned_metric = self._eval_graph(context)\n\n ratio += self.delta_rate\n return sensitivities", "def monte_carlo_sample(self):\n\t\tresult = dict()\n\t\tfor n in self.topological_sort():\n\t\t\tpvals = tuple(result[p] for p in n.parents)\n\t\t\tresult[n.name] = n.cpt.rand_result(pvals)\n\t\treturn result", "def run(self):\n for i in range(self.generations):\n log.info(f'Training population in generation {i + 1}...')\n if i == 0:\n self.create_first_generation()\n else:\n self.create_next_generation()\n log.info(f'best individual: {self.best_individual()[1]}')\n log.info(f'best individual score: {self.best_individual()[0]}')", "def bulk_by_sample(self, request):\n if request.method == 'POST':\n hamming = True if 'hamming_distance' in request.GET else False\n validator = validate_list_of_ids(request.data, max_query=500)\n if validator['has_errors']:\n return Response({\n \"message\": validator['message'],\n \"data\": request.data\n })\n else:\n return self.formatted_response(get_sccmec_primers_by_sample(\n request.data['ids'],\n request.user.pk,\n exact_hits=True if 'exact_hits' in request.GET else False,\n predict=True if 'predict' in request.GET else False,\n hamming_distance=hamming\n ))", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def sampler(self, *args, **kwargs):\n\n return (samples_subgraphs ** 2).tolist()", "def run_experiment ( X, y, model_call, param_grid = None, scoring_func = accuracy,cv = KFoldStratifiedCV ( number_of_folds = 5 ),):\n\n scores = []\n iteration = 0\n # Iterate through the split\n for train, test in cv.split ( y ):\n # If first iteration and k values are passed, get the best one\n if iteration == 0 and param_grid:\n k = choose_k (\n X [ train ], y [ train ], model_call, param_grid, scoring_func, cv = cv )\n logger.info ( f\"Choosing k= { k } \" )\n else:\n # Defaults to 1 for condensed.\n k = 1\n\n iteration += 1\n\n # Instantiate the model with the value of k\n model = model_call ( k = k )\n\n # Standardize the data\n standardizer = Standardizer ( mean = True, std = True )\n\n # Fit the model\n model.fit ( X = standardizer.fit_transform ( X [ train ] ), y = y [ train ] )\n\n # make test set predictions\n y_pred = model.predict ( X = standardizer.transform ( X [ test ] ) )\n\n # Append the score\n scores.append ( scoring_func ( y [ test ], y_pred ) )\n \n logger.info ( f\"Avg Score: { np.mean ( scores ) } \" )\n \n return model\n # End run_experiment()", "def get_pub_scores(self, subset='auth_all'):\n from nltk.corpus import stopwords\n from nltk.tokenize import word_tokenize\n import csv\n from difflib import SequenceMatcher\n import jellyfish\n# self.sanity_check()\n\n if subset == 'auth_top':\n pubs = self.pub_auth_top['pub']\n elif subset == 'auth_all':\n pubs = self.pub_auth_all['pub']\n elif subset == 'inst_top':\n pubs = self.pub_inst_top['pub']\n elif subset == 'inst_all':\n pubs = self.pub_inst_all['pub']\n\n # load publication metrics\n\n # download stowords the first time\n def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n def get_q(s):\n q = 0\n if \"Q4\" in s:\n q = 4\n if \"Q3\" in s:\n q = 3\n if \"Q2\" in s:\n q = 2\n if \"Q1\" in s:\n q = 1\n return q\n\n stop_words = set(stopwords.words('english'))\n\n journals = []\n with open('scimagojr.csv', newline='') as csvfile:\n s = csv.reader(csvfile, delimiter=';')\n for row in s:\n jname = row[2].lower()\n word_tokens = word_tokenize(jname)\n fname = [w for w in word_tokens if w not in stop_words]\n sent1 = ' '.join(fname)\n sent1 = sent1.replace('/', '')\n row[2] = sent1\n journals.append(row)\n\n Q = []\n for p in pubs:\n jname = p.lower()\n word_tokens = word_tokenize(jname)\n fname = [w for w in word_tokens if w not in stop_words]\n sent1 = ' '.join(fname)\n sent1 = sent1.replace('/', '')\n\n match = 0\n J = \"\"\n for Journal in journals:\n journal = Journal[2]\n s1 = similar(sent1, journal)\n s2 = jellyfish.jaro_winkler(sent1, journal)\n if s1 > 0.9 and s2 > 0.9:\n match += 1\n J = Journal[-1]\n Q.append(get_q(J))\n\n if subset == 'auth_top':\n self.pub_auth_top['Q'] = Q\n elif subset == 'auth_all':\n self.pub_auth_all['Q'] = Q\n elif subset == 'inst_top':\n self.pub_inst_top['Q'] = Q\n elif subset == 'inst_all':\n self.pub_inst_all['Q'] = Q", "def store_results(self, bp_nr, fold_nr, pair_nr,\n filters, patterns, variances,\n train_feature, test_feature,\n train_feature_full_fold, test_feature_full_fold, clf,\n train_accuracy, test_accuracy):\n local_vars = locals()\n del local_vars['self']\n del local_vars['bp_nr']\n del local_vars['fold_nr']\n del local_vars['pair_nr']\n for var in local_vars:\n self.__dict__[var][bp_nr, fold_nr, pair_nr] = local_vars[var]", "def sample(self):\n # Initialize nested dictionary of kwargs\n kwargs = Dict()\n\n # Realize samples\n for comp, param_name in self.params_to_realize:\n hyperparams = getattr(self, comp)[param_name].copy()\n kwargs[comp][param_name] = self.sample_param(hyperparams)\n\n # Convert any q, phi into e1, e2 as required by lenstronomy\n for comp in self.comps_qphi_to_e1e2: # e.g. 'lens_mass'\n q = kwargs[comp].pop('q')\n phi = kwargs[comp].pop('phi')\n e1, e2 = param_util.phi_q2_ellipticity(phi, q)\n kwargs[comp]['e1'] = e1\n kwargs[comp]['e2'] = e2\n\n # Source pos is defined wrt the lens pos\n kwargs['src_light']['center_x'] += kwargs['lens_mass']['center_x']\n kwargs['src_light']['center_y'] += kwargs['lens_mass']['center_y']\n\n # Ext shear is defined wrt the lens center\n kwargs['external_shear']['ra_0'] = kwargs['lens_mass']['center_x']\n kwargs['external_shear']['dec_0'] = kwargs['lens_mass']['center_y']\n \n if 'lens_light' in self.components:\n # Lens light shares center with lens mass\n kwargs['lens_light']['center_x'] = kwargs['lens_mass']['center_x']\n kwargs['lens_light']['center_y'] = kwargs['lens_mass']['center_y']\n return kwargs", "def test_search_multiple_scoring(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1, \"baz\": 4})\n add_document(\"foo\", {\"bar\": 1})\n\n # And I add scoring with params\n score = ScriptScore(\"s = custom_param + doc['bar'].value\", params={\"custom_param\": 1})\n t.score(score)\n\n boost = {\n \"boost_factor\": \"10\",\n \"filter\": Exists(\"baz\")\n }\n t.score(boost)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(2)\n results[0][\"_source\"][\"baz\"].should.equal(4)\n (\"baz\" in results[1][\"_source\"].keys()).should.be.false", "def process_what_to_run_randoms(pairs_to_test, random_counterpart):\n # prepare pairs for random vs random.\n pairs_for_sstesting_random = []\n targets = list(set([pair[0] for pair in pairs_to_test]))\n for target in targets:\n pairs_for_sstesting_random.append([target, [random_counterpart]])\n return pairs_for_sstesting_random", "def get_params_dist_single_subject(raw_path, results_path):\n if type(results_path) == str:\n res_df = pd.read_csv(results_path, index_col=0)\n else:\n res_df = results_path\n\n au_top, au_top_acc = [], []\n fe_top, fe_top_acc = [], []\n pca_dims, pca_dims_acc = [], []\n\n for test_type in range(1, 6):\n res_test = res_df[res_df.test_type == '[' + str(test_type) + ']']\n\n # AU top-n options and their mean accuracies\n au_top_opts = res_df.au_top.unique().tolist()\n au_mean_accs = []\n\n for au_top_opt in au_top_opts:\n au_mean_accs.append(res_test[res_test.au_top == au_top_opt].mean_test_score.mean())\n\n au_top.append(au_top_opts)\n au_top_acc.append(au_mean_accs)\n\n # Features top-n options and their mean accuracies\n fe_top_opts = res_df.fe_top.unique().tolist()\n fe_mean_accs = []\n\n for fe_top_opt in fe_top_opts:\n fe_mean_accs.append(res_test[res_test.fe_top == fe_top_opt].mean_test_score.mean())\n\n fe_top.append(fe_top_opts)\n fe_top_acc.append(fe_mean_accs)\n\n # PCA dimension options and their mean accuracies\n pca_dim_opts = res_df.pca_dim.unique().tolist()\n pca_dim_mean_accs = []\n\n for pca_dim_opt in pca_dim_opts:\n pca_dim_mean_accs.append(res_test[res_test.pca_dim == pca_dim_opt].mean_test_score.mean())\n\n pca_dims.append(pca_dim_opts)\n pca_dims_acc.append(pca_dim_mean_accs)\n\n return au_top, au_top_acc, fe_top, fe_top_acc, pca_dims, pca_dims_acc", "def learnHyperparams(self, report=None):\n \n self.theta = crossValidate(self.k, self.cvEpochs, self.hpRanges, self.trainSet, \\\n self.train, self.test, report);", "def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs", "def get_bests(self):\n set_names = [\"training\", \"hp_selection\", \"validation\"]\n run_tec_conf_set = recursivedict()\n validation = self._campaign_configuration['General']['validation']\n hp_selection = self._campaign_configuration['General']['hp_selection']\n if (validation, hp_selection) in {(\"All\", \"All\"), (\"Extrapolation\", \"All\"), (\"All\", \"HoldOut\"), (\"HoldOut\", \"All\"), (\"HoldOut\", \"HoldOut\"), (\"Extrapolation\", \"HoldOut\")}:\n # For each run, for each technique the best configuration\n run_tec_best_conf = recursivedict()\n\n # Hyperparameter search\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n technique = conf.technique\n run_tec_conf_set[run][technique][str(conf.get_signature()[4:])] = conf.mapes\n # First experiment for this technique or better than the current best\n if technique not in run_tec_best_conf[run] or conf.mapes[\"hp_selection\"] < run_tec_best_conf[run][technique].mapes[\"hp_selection\"]:\n run_tec_best_conf[run][technique] = conf\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"-->Printing results for run %s\", str(run))\n overall_run_best = None\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp.get_signature()[4:], temp.mapes[\"training\"], temp.mapes[\"hp_selection\"], temp.mapes[\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp.mapes[\"hp_selection\"] < overall_run_best.mapes[\"hp_selection\"]:\n overall_run_best = temp\n best_model_description = overall_run_best.print_model()\n self._logger.info(\"<--Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best.get_signature()[3:], \"(\" + best_model_description + \")\" if best_model_description else \"\", overall_run_best.mapes[\"training\"], overall_run_best.mapes[\"hp_selection\"], overall_run_best.mapes[\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"All\"), (\"KFold\", \"HoldOut\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each fold, for each technique, the best configuration\n run_fold_tec_best_conf = recursivedict()\n\n # Hyperparameter search inside each fold\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[1].replace(\"f\", \"\"))\n technique = conf.technique\n if \"hp_selection\" not in run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] = run_tec_conf_set[run][technique][str(conf.get_signature_string()[4:])][set_name] + conf.mapes[set_name] / folds\n # First experiment for this fold+technique or better than the current best\n if technique not in run_fold_tec_best_conf[run][fold] or conf.mapes[\"hp_selection\"] < run_fold_tec_best_conf[run][fold][technique].mapes[\"hp_selection\"]:\n run_fold_tec_best_conf[run][fold][technique] = conf\n\n # Aggregate different folds (only the value of the mapes)\n run_tec_set = recursivedict()\n for run in run_fold_tec_best_conf:\n for fold in run_fold_tec_best_conf[run]:\n for tec in run_fold_tec_best_conf[run][fold]:\n if \"hp_selection\" not in run_tec_set[run][technique]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_fold_tec_best_conf[run][fold][tec].mapes[set_name]\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", str(run))\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n # Overall best will contain as first argument the technique with the best (across runs) average (across folds) mape on validation; now we consider on all the runs and on all the folds the configuraiton of this technique with best validation mape\n\n elif (validation, hp_selection) in {(\"All\", \"KFold\"), (\"HoldOut\", \"KFold\"), (\"Extrapolation\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each technique, for each configuration, the aggregated mape\n run_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + conf.mapes[set_name] / folds\n\n # Select the best configuration for each technique across different folders\n run_tec_best_conf = recursivedict()\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n if tec not in run_tec_best_conf[run] or run_tec_conf_set[run][tec][conf][\"hp_selection\"] < run_tec_best_conf[run][tec][1][\"hp_selection\"]:\n run_tec_best_conf[run][tec] = (conf, run_tec_conf_set[run][tec][conf])\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = () # (technique, configuration, mapes)\n # Print data of single techniques\n for technique in run_tec_best_conf[run]:\n temp = run_tec_best_conf[run][technique]\n self._logger.info(\"---Best result for %s - Configuration is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, temp[0], temp[1][\"training\"], temp[1][\"hp_selection\"], temp[1][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or temp[1][\"hp_selection\"] < overall_run_best[2][\"hp_selection\"]:\n overall_run_best = (technique, temp[0], temp[1])\n\n self._logger.info(\"---Overall best result is %s %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1], overall_run_best[2][\"training\"], overall_run_best[2][\"hp_selection\"], overall_run_best[2][\"validation\"])\n\n elif (validation, hp_selection) in {(\"KFold\", \"KFold\")}:\n folds = float(self._campaign_configuration['General']['folds'])\n # For each run, for each external fold, for each technique, the aggregated mape\n run_efold_tec_conf_set = recursivedict()\n\n # Hyperparameter search aggregating over internal folders\n for conf in self._exp_confs:\n run = int(conf.get_signature()[0].replace(\"run_\", \"\"))\n ext_fold = int(conf.get_signature()[2].replace(\"f\", \"\"))\n technique = conf.technique\n configuration = str(conf.get_signature()[4:])\n if \"hp_selection\" not in run_tec_conf_set[run][technique][configuration]:\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_tec_conf_set[run][technique][configuration][set_name] = run_tec_conf_set[run][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n if configuration not in run_efold_tec_conf_set[run][ext_fold][technique]:\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = 0\n for set_name in set_names:\n run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] = run_efold_tec_conf_set[run][ext_fold][technique][configuration][set_name] + (conf.mapes[set_name] / (folds * folds))\n\n # Select the best configuration for each technique in each external fold across different internal folders\n run_efold_tec_best_conf = recursivedict()\n for run in run_efold_tec_conf_set:\n for efold in run_efold_tec_conf_set[run]:\n for tec in run_efold_tec_conf_set[run][efold]:\n for conf in run_efold_tec_conf_set[run][efold][tec]:\n if conf not in run_efold_tec_best_conf[run][efold][tec] or run_efold_tec_conf_set[run][efold][tec][conf][\"hp_selection\"] < run_efold_tec_best_conf[run][efold][tec][1][\"hp_selection\"]:\n run_efold_tec_best_conf[run][efold][tec] = (conf, run_efold_tec_conf_set[run][efold][tec][conf], run_efold_tec_conf_set[run][efold][tec][conf])\n\n # Aggregate on external folds\n run_tec_set = recursivedict()\n for run in run_efold_tec_best_conf:\n for efold in run_efold_tec_best_conf[run]:\n for tec in run_efold_tec_best_conf[run][efold]:\n if \"hp_selection\" not in run_tec_set[run][tec]:\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = 0\n for set_name in set_names:\n run_tec_set[run][tec][set_name] = run_tec_set[run][tec][set_name] + run_efold_tec_best_conf[run][efold][tec][1][set_name]\n\n # Print results for each run\n for run in range(0, self._campaign_configuration['General']['run_num']):\n self._logger.info(\"Printing results for run %s\", run)\n overall_run_best = ()\n # Print data of single techniques\n for technique in run_tec_set[run]:\n self._logger.info(\"---Best result for %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", technique, run_tec_set[run][technique][\"training\"], run_tec_set[run][technique][\"hp_selection\"], run_tec_set[run][technique][\"validation\"])\n\n # Compute which is the best technique\n if not overall_run_best or run_tec_set[run][technique][\"hp_selection\"] < overall_run_best[1][\"hp_selection\"]:\n overall_run_best = (technique, run_tec_set[run][technique])\n\n self._logger.info(\"---Overall best result is %s - (Training MAPE is %f - HP Selection MAPE is %f) - Validation MAPE is %f\", overall_run_best[0], overall_run_best[1][\"training\"], overall_run_best[1][\"hp_selection\"], overall_run_best[1][\"validation\"])\n\n else:\n self._logger.error(\"Unexpected combination: %s\", str((validation, hp_selection)))\n sys.exit(1)\n best_confs = {}\n best_technique = None\n for conf in self._exp_confs:\n technique = conf.technique\n if technique not in best_confs or conf.mapes[\"validation\"] < best_confs[technique].mapes[\"validation\"]:\n best_confs[technique] = conf\n for technique in best_confs:\n if not best_technique or best_confs[technique].mapes[\"validation\"] < best_confs[best_technique].mapes[\"validation\"]:\n best_technique = technique\n if bool(self._campaign_configuration['General']['details']):\n for run in run_tec_conf_set:\n for tec in run_tec_conf_set[run]:\n for conf in run_tec_conf_set[run][tec]:\n assert \"hp_selection\" in run_tec_conf_set[run][tec][conf]\n assert \"validation\" in run_tec_conf_set[run][tec][conf], \"training MAPE not found for \" + str(run) + str(tec) + str(conf)\n self._logger.info(\"Run %s - Technique %s - Conf %s - Training MAPE %f - Test MAPE %f\", str(run), ec.enum_to_configuration_label[tec], str(conf), run_tec_conf_set[run][tec][conf][\"hp_selection\"], run_tec_conf_set[run][tec][conf][\"validation\"])\n return best_confs, best_technique", "def test_split_data_success(self):\n for test in self.success_split_data_test_params:\n score_test = score.ScoresGenerator()\n score_test.split_data(test[KEY_INPUT])\n self.assertDictEqual(score_test.SCORES, test[KEY_EXPECTED])", "def bulk_by_sample(self, request):\n if request.method == 'POST':\n hamming = True if 'hamming_distance' in request.GET else False\n validator = validate_list_of_ids(request.data, max_query=500)\n if validator['has_errors']:\n return Response({\n \"message\": validator['message'],\n \"data\": request.data\n })\n else:\n return self.formatted_response(get_sccmec_primers_by_sample(\n request.data['ids'],\n request.user.pk,\n is_subtypes=True,\n exact_hits=True if 'exact_hits' in request.GET else False,\n predict=True if 'predict' in request.GET else False,\n hamming_distance=hamming\n ))", "def gw_heritability(\n input_snp_filename: \"Data Input, use the SNPs file from dataParse\",\n output_summary_filename: 'output file for the genomewide results summary, use .csv',\n logger_filename: 'file for the logger, use a txt',\n sweeps: \"number of samples for each chain\" = 1000,\n burnin: \"number of burnin samples\" = 1000,\n n_chains: \"number of chains of the sampler\" = 4,\n n_cores: \"number of parallel cores to use\" = 4,\n N_1kG: \"number of SNPs onwhich the LD-score is calculates\" = 1290028,\n chromosome: \"chromosome on which the analysis is run\" = \"all\",\n sep: \"separator for the input files, use t for tab separated (not \\t)\" = \",\",\n model: 'regression model'='normal',\n fix_intercept = False,\n ):\n\n # Initialisation of the logger\n output_logger = log.setup_logger(\"output_logger\", logger_filename)\n log.initialise_log(output_logger,\n 'genome-wide regression, model: %s' %model,\n [input_snp_filename],\n [output_summary_filename],\n sweeps,\n burnin,\n chromosome = str(chromosome),\n other_params_diz = {'chains': n_chains, 'cores': n_cores})\n\n # Initialisation function, it reads the summary stats file, filters the SNPs,\n # creates the output files\n\n logging.info(\"Start Analysis\")\n\n snps = s.Snps()\n # read table\n snps.read_table(input_snp_filename, separator=sep)\n # generate chi squared stats\n snps.generate_stats()\n # update the summary stats\n snps.update_summary()\n output_logger.info(\" Sample size \" + str(snps.n_patients) + \"\\n\")\n\n\n snps.apply_filter_table(s.baghera_filter)\n snps.update_summary()\n output_logger.info(\"After baghera init filter.\\nNumber of SNPs: %s\\nNumber of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n # Non coding SNPs are assigned to a dummy gene, such that the regression is done on the entire SNPs' set\n snps.rename_non_annotated(name='NonCoding')\n\n if chromosome != \"all\":\n snps.apply_filter_table(snps.cut_single_chrom, **{'chromosome': chromosome})\n output_logger.info(\n \"Analysis restricted to chr %s\" %str(chromosome) )\n\n snps.update_summary()\n output_logger.info(\"Analysis. Number of SNPs: %s\\n, Number of genes: %s\\n\" \\\n %(str(snps.n_snps), str(snps.n_genes)) )\n\n\n if model =='normal':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n elif model=='gamma':\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n else:\n logging.info('Normal model by default')\n [intercept, slope] = heritability.gw_normal(snps, output_summary_filename, output_logger,\n sweeps, burnin, n_chains, n_cores, N_1kG, fix_intercept)\n logging.info(\"Analysis complete\")", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def samples(self):\n pass", "def get_hyperparams(self):", "def fit(train_data, train_target):\r\n for name in models.keys():\r\n est = models[name]\r\n est_params = params2[name]\r\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5)\r\n gscv.fit(train_data, train_target)\r\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\r\n print(\"Where we selected the parameters: {}\" .format(gscv.cv_results_['params'][gscv.best_index_]))\r\n print(\"with mean cross-validated score: {}\" .format(gscv.best_score_))", "def _sample_likelihood_params(self):\n if self.marginalize:\n # We integrated out `beta` a la Bayesian linear regression.\n pass\n else:\n self._sample_beta_and_sigma_y()", "def optimize_hyper_parameters(data, predictor, cv_fold, verbose=0):\n # Hyper parameters to explore\n hyper = predictor.hyper_parameters_grid\n regs = list(ParameterGrid(hyper))\n if len(regs) == 0:\n return {}\n if len(regs) == 1:\n return regs[0]\n\n # Optimization\n if verbose:\n print(\"Optimizing...\")\n scores = []\n if cv_fold > 1:\n skf = StratifiedKFold(n_splits=cv_fold, shuffle=True, random_state=SEED)\n\n n_param = 0\n for reg in regs:\n n_param += 1\n if verbose > 1:\n print(\"Optimizing parameter {0} out of {1}...\".format(n_param, len(regs)))\n predictor.set_hyper_parameters(hyper_parameters=reg)\n scores_per_reg = []\n\n # splitting\n if cv_fold > 1:\n for train_idx, test_idx in skf.split(data['x_train'], data['y_train']):\n # Split training data in train and dev (called test as it's more convenient)\n new_data = {'x_train': data['x_train'][train_idx], 'x_test': data['x_train'][test_idx],\n 'y_train': data['y_train'][train_idx], 'y_test': data['y_train'][test_idx]}\n\n # Train classifier\n predictor.fit(new_data)\n score = predictor.score(new_data)\n scores_per_reg.append(score)\n\n else:\n predictor.fit(data) # No cv so we fit on the whole data and we keep the best hyper params\n score = predictor.score(data)\n scores_per_reg.append(score)\n\n # We only keep the mean:\n scores.append(np.mean(scores_per_reg))\n if verbose > 1:\n print(\"Parameters {0} yielded a score of {1}.\".format(reg, scores[-1]))\n\n best = np.argmax(scores) # To find the hyper parameter that yielded the best score on average.\n return regs[best]", "def hyper_parameter_tuning(X, y, classifier, models, sntypes_map, feature_names, fig_dir='.', remove_models=(), name=''):\n\n # Hyperparameter grid\n n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\n max_features = ['auto', 'sqrt']\n max_depth = [int(x) for x in np.linspace(10, 110, num=11)]\n max_depth.append(None)\n min_samples_split = [2, 5, 10]\n min_samples_leaf = [1, 2, 4]\n bootstrap = [True, False]\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n # Get data\n num_features = X.shape[1]\n model_names = [sntypes_map[model] for model in models]\n X, y, models, remove_models = remove_redundant_classes(X, y, models, remove_models)\n\n # Get best features\n n = 50\n num_features, feature_names, X = get_n_best_features(n, X, y, classifier, feature_names, num_features, fig_dir, name, models, model_names)\n\n # Randomised Search\n clf_random = RandomizedSearchCV(estimator=classifier, param_distributions=random_grid, n_iter=7, cv=3, verbose=2,\n random_state=42, n_jobs=2)\n clf_random.fit(X, y)\n print(clf_random.best_params_)\n\n def evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n return accuracy\n\n best_random = clf_random.best_estimator_\n # random_accuracy = evaluate(best_random, test_features, test_labels)", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def online_dictionary_learning(self, dinit, tinit, winit, pars, trainingdata, HMat, QMat):\n num_images = trainingdata.shape[1]\n # n0 = num_images/10\n n0 = num_images/(pars['batchSize']*10)\n model = dict(\n D=dinit, # dictionary\n A=tinit, # transform matrix\n W=winit, # classifier\n )\n\n param = {\n 'lambda1': pars['lambda_'],\n 'lambda2': 0,\n 'mode': 2\n }\n\n # crf iterations\n fobj_avg = dict()\n\n if not os.path.isdir(settings.GENERATED_DATA_DIRECTORY):\n os.mkdir(settings.GENERATED_DATA_DIRECTORY)\n\n for iter_ in range(pars['maxIters']):\n tic = time.time()\n # Take a random permutation of the samples\n filename = 'permute_{}_{}_{}.npy'.format(iter_, pars['numBases'], pars['dataset'])\n full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, filename)\n\n if os.path.isfile(full_path):\n ind_rnd = np.load(full_path, allow_pickle=False, fix_imports=False)\n else:\n ind_rnd = np.random.permutation(num_images)\n np.save(full_path, ind_rnd, allow_pickle=False, fix_imports=False)\n\n for batch in range(num_images//pars['batchSize']):\n # load the dataset\n # we only loads one sample or a small batch at each iteration\n # batch_idx = ind_rnd((1:pars.batchSize)+pars.batchSize*(batch-1));\n lower_index = pars['batchSize'] * batch\n upper_index = lower_index + pars['batchSize']\n batch_idx = ind_rnd[lower_index:upper_index]\n yt = trainingdata[:, batch_idx]\n ht = HMat[:, batch_idx]\n qt = QMat[:, batch_idx]\n # TODO: Review if in these cases it's mandatory to assign copies\n D = model['D']\n W = model['W']\n A = model['A']\n # sparse coding\n # S = L1QP_FeatureSign_Set(yt, D, gamma, lambda);\n\n S = lasso(\n yt,\n D if np.isfortran(D) else np.asfortranarray(D),\n **param\n )\n\n # compute the gradient of crf parameters\n grad_W = (1-pars['mu'])*(W@S - ht)@S.T + pars['nu2']*W\n grad_A = pars['mu']*(A@S - qt)@S.T + pars['nu1']*A\n grad_S1 = W.T @ (W@S - ht) # gradient w.r.t S for 0.5*||H-WS||_2^2\n grad_S2 = A.T @ (A@S - qt) # gradient w.r.t S for 0.5*||Q-AS||_2^2\n\n # compute the gradient of dictionary\n # find the active set and compute beta\n B1 = np.zeros((pars['numBases'], pars['batchSize']), dtype=np.int)\n B2 = np.zeros((pars['numBases'], pars['batchSize']), dtype=np.int)\n DtD = D.T@D\n for j in range(pars['batchSize']):\n active_set = np.array(np.nonzero(S[:, j] != 0)).ravel()\n # DtD = D(:,active_set)'*D(:,active_set) + gamma*eye(length(active_set));\n DtD_hat = DtD[active_set, active_set] + pars['gamma']*np.eye(active_set.shape[0])\n\n # DtD_inv = DtD\\eye(length(active_set));\n DtD_inv = np.linalg.solve(DtD_hat, np.eye(active_set.shape[0]))\n\n B1[active_set, j] = (DtD_inv @ grad_S1[active_set, j]).T\n\n B2[active_set, j] = (DtD_inv @ grad_S2[active_set, j]).T\n\n grad_D = (1-pars['mu'])*(-D@B1@S.T + (yt - D@S)@B1.T) + pars['mu'] * \\\n (-D@B2@S.T + (yt - D@S)@B2.T) # dD = -D*B*S' + (X - D*S)*B';\n\n # use yang's method\n # gfullMat = zeros([size(D),size(D,2)]);\n # [gMat, IDX] = sparseDerivative(D, full(S), yt);\n # gfullMat(:,IDX,IDX) = gMat;\n # gradSmat = repmat(reshape(grad_S1,[1 1 length(grad_S1)]),size(D));\n # grad_D = sum(gfullMat.*gradSmat,3);\n\n # update the learning rate\n rho_i = min(pars['rho'], pars['rho']*n0/(batch+1))\n\n # update model parameters\n D = D - rho_i*grad_D\n D = D / np.tile(np.linalg.norm(D, axis=0), (D.shape[0], 1))\n model['D'] = D\n\n W = W - rho_i*grad_W\n model['W'] = W\n\n A = A - rho_i*grad_A\n model['A'] = A\n\n # get statistics\n S = lasso(\n trainingdata,\n D if np.isfortran(D) else np.asfortranarray(D),\n **param\n )\n fobj = self.get_objective_lc(D, S, trainingdata, W, HMat, A, QMat, pars['lambda_'], pars['mu'])[0]\n # *** numpy.linalg.LinAlgError: Last 2 dimensions of the array must be square\n # stat['fobj_avg'][iter_] = fobj + 0.5*nu1*np.sum(W**2) + 0.5*nu2*np.sum(A**2)\n fobj_avg[iter_] = fobj + 0.5*pars['nu1']*np.sum(np.multiply(W, W)) + 0.5*pars['nu2']*np.sum(A**2)\n # filename = 'model_{}_{}_{}.npy'.format(iter_, num_bases, pars['dataset'])\n # full_path = os.path.join(generated_data_directory, filename)\n # ValueError: Object arrays cannot be saved when allow_pickle=False\n # the model is being saved in three different files to avoid\n # setting allow_pickle=True when trying to save the whole model\n # np.save(full_path, model, allow_pickle=True, fix_imports=False)\n for key, value in model.items():\n filename = '{}_{}_{}_{}.npy'.format(key, iter_, pars['numBases'], pars['dataset'])\n full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, filename)\n np.save(full_path, value, allow_pickle=False, fix_imports=False)\n\n toc = time.time()\n print('Iter = {}, Elapsed Time = {}'.format(iter_, toc-tic))\n\n stat_filename = 'stat_{}_{}.json'.format(pars['numBases'], pars['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n # saving as JSON to avoid using pickle\n with open(stat_full_path, 'w') as file_:\n json.dump(fobj_avg, file_)\n\n return model, fobj_avg", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def set_parameters(self, population_size=40, num_tests=5, num_searches=5, num_enabled=17, bonus1=10, bonus2=1,\n **kwargs):\n kwargs.pop('num_searches_best', None)\n super().set_parameters(num_searches_best=0, local_searches=(mts_ls1v1, mts_ls2), **kwargs)", "def run_randomisedLasso(X,Y):\n\n prob_all = list()\n alphas = list()\n for i_y, y in enumerate(Y):\n print(i_y)\n randLasso = ms.MyRandomizedLasso(alpha='cv',n_jobs=1)\n randLasso.fit(X,y)\n\n prob = randLasso.scores_\n prob_all.append(prob)\n \n return prob_all", "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n modstr = '%s__' % self.modality\n items = sorted([(k.replace('clf__'+modstr, ''), v) for k, v in p.items() if modstr in k])\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params", "def hyperparameter_tunning(\n model,\n train_features: pd.DataFrame,\n train_target: pd.Series,\n validation_features: pd.DataFrame,\n validation_target: pd.Series,\n hyperparameter_grid: dict\n) -> dict:\n\n best_estimator = None\n best_hyperparams = {}\n best_metric = 0.0\n\n hp_grid = [this_hp for this_hp in hyperparameter_grid.values()]\n all_combinations_list = list(itertools.product(*hp_grid))\n\n all_combinations_dic = []\n\n for this_combination in all_combinations_list:\n\n this_hp_set = {}\n\n for i, key in enumerate(hyperparameter_grid.keys()):\n\n this_hp_set[key] = this_combination[i]\n\n all_combinations_dic.append(this_hp_set)\n\n for this_hp_set in all_combinations_dic:\n\n this_estimator = model(**this_hp_set)\n\n this_estimator.fit(train_features, train_target)\n\n predictions = this_estimator.predict(validation_features)\n\n evaluation_metric = f1_score(validation_target, predictions)\n\n if evaluation_metric > best_metric:\n\n best_metric = evaluation_metric\n\n best_estimator = this_estimator\n\n best_hyperparams = this_hp_set\n\n return {'best_hyperparameters': best_hyperparams, 'best_model': best_estimator, 'best_metric': best_metric}", "def get_so_results(self, save=False):\n #Read through output files\n parameters=self.get_optimization_parameters(friendly=True)\n parameterRange = range(len(parameters))\n\n results = []\n\n for i in parameterRange:\n result = {\n 'name': parameters[i][0],\n 'max_result': '?',\n 'max_evals' : '?',\n 'max_cpu' : '?',\n 'min_result' : '?',\n 'min_evals' : '?',\n 'min_cpu' : '?',\n }\n #Read min and max files\n for max in [0, 1]:\n iterator = 0\n \n try:\n file = open(os.path.join(self.path, 'output_1.%d.txt' % (2*i + max)),'r')\n output=[None for r in range(4)]\n for f in file.readlines():\n value = f.rstrip('\\n') #Read the file line by line.\n #Line 0: seperator. Line 1: Evals. Line 2: Time. Line 3: result\n index=parameterRange.index(i)\n output[iterator] = value\n iterator = (iterator + 1)%4\n file.close()\n evals = output[1].split(' ')[2]\n cpu_time = output[2].split(' ')[2]\n sens_result = output[3]\n \n if max == 0:\n max_str = 'max'\n else:\n max_str = 'min'\n result[max_str + '_result'] = sens_result\n result[max_str + '_cpu'] = cpu_time\n result[max_str + '_evals'] = evals\n \n except:\n raise\n \n results.append(result)\n \n #Finally, if save==True, write these results to file results.txt\n if save:\n if not os.path.isfile(os.path.join(self.path, 'results.txt')):\n results_file = open(os.path.join(self.path, 'results.txt'), 'w')\n header_line = 'Parameter name\\tMin result\\tMax result\\tMin CPU time\\tMin Evals\\tMax CPU time\\tMax Evals\\n'\n results_file.write(header_line)\n for result in results:\n result_line = result['name'] + '\\t' + result['min_result'] + '\\t' + result['max_result'] + '\\t' + result['min_cpu'] + '\\t' + result['min_evals'] + '\\t' + result['max_cpu'] + '\\t' + result['max_evals'] + '\\n'\n results_file.write(result_line)\n results_file.close()\n return results", "def test_get_all_scaled_scores_success(self):\n with mock.patch('score.ScoresGenerator.split_data') as mock_split_data:\n with mock.patch('score.ScoresGenerator.create_category_scaled_score') \\\n as mock_scaled_category:\n with mock.patch('score.ScoresGenerator.create_total_scaled_score') \\\n as mock_scaled_total:\n for test in self.success_get_all_scaled_score_test_params:\n score_test = score.ScoresGenerator()\n score_test.get_all_scaled_scores(test[KEY_INPUT])\n self.assertDictEqual(score_test.SCALED_SCORES, test[KEY_EXPECTED])", "def random_param_tune(self):\n random_grid = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 4],\n 'min_samples_split': [2, 5, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n\n rf = RandomForestClassifier()\n rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=250, cv=3, verbose=2, n_jobs=-1)\n rf_random.fit(self.X_train, self.y_train)\n self.results.write(str(rf_random.best_params_) + \"\\n\")", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def sample_parameters_given_hyper(self, gen_seed=0):\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n\n hypers = self.get_hypers()\n s = hypers[b's']\n r = hypers[b'r']\n nu = hypers[b'nu']\n m = hypers[b'mu']\n\n rho = rng.gammavariate(nu/2.0, s)\n mu = rng.normalvariate(m, (r/rho)**.5)\n\n assert(rho > 0)\n\n params = {'mu': mu, 'rho': rho}\n\n return params", "def resampleParams(self, caliStep, iterNO=-1):\n names = self.getNames()\n smcSamples = self.smcSamples[iterNO]\n numSamples = self.numSamples\n numThreads = self.threads if self.threads else cpu_count()\n # posterior probability at caliStep is used as the proposal distribution\n proposal = self.posterior[:, caliStep]\n newSmcSamples, newparamsFile, gmm, maxNumComponents = \\\n resampledParamsTable(keys=names, smcSamples=smcSamples, proposal=proposal, num=numSamples,\n threads=numThreads,\n maxNumComponents=self.__maxNumComponents, priorWeight=self.__priorWeight,\n covType=self.__covType,\n tableName='smcTable%i.txt' % (iterNO + 1))\n self.smcSamples.append(newSmcSamples)\n self.paramsFiles.append(newparamsFile)\n return gmm, maxNumComponents", "def localGenerateInput(self, model, myInput):\n # create values dictionary\n weight = 1.0\n for key in sorted(self.distDict):\n # check if the key is a comma separated list of strings\n # in this case, the user wants to sample the comma separated variables with the same sampled value => link the value to all comma separated variables\n totDim = self.variables2distributionsMapping[key]['totDim']\n dist = self.variables2distributionsMapping[key]['name']\n reducedDim = self.variables2distributionsMapping[key]['reducedDim']\n weight = 1.0\n if totDim == 1:\n if self.samplingType == 'uniform':\n distData = self.distDict[key].getCrowDistDict()\n if ('xMin' not in distData.keys()) or ('xMax' not in distData.keys()):\n self.raiseAnError(IOError,\"In the Monte-Carlo sampler a uniform sampling type has been chosen;\"\n + \" however, one or more distributions have not specified either the lowerBound or the upperBound\")\n lower = distData['xMin']\n upper = distData['xMax']\n rvsnum = lower + (upper - lower) * randomUtils.random()\n # TODO (wangc): I think the calculation for epsilon need to be updated as following\n # epsilon = (upper-lower)/(self.limit+1) * 0.5\n epsilon = (upper-lower)/self.limit\n midPlusCDF = self.distDict[key].cdf(rvsnum + epsilon)\n midMinusCDF = self.distDict[key].cdf(rvsnum - epsilon)\n weight *= midPlusCDF - midMinusCDF\n else:\n rvsnum = self.distDict[key].rvs()\n for kkey in key.split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[0]\n self.inputInfo['SampledVarsPb'][key] = self.distDict[key].pdf(rvsnum)\n self.inputInfo['ProbabilityWeight-' + key] = 1.\n elif totDim > 1:\n if reducedDim == 1:\n if self.samplingType is None:\n rvsnum = self.distDict[key].rvs()\n coordinate = np.atleast_1d(rvsnum).tolist()\n else:\n coordinate = np.zeros(totDim)\n for i in range(totDim):\n lower = self.distDict[key].returnLowerBound(i)\n upper = self.distDict[key].returnUpperBound(i)\n coordinate[i] = lower + (upper - lower) * randomUtils.random()\n if reducedDim > len(coordinate):\n self.raiseAnError(IOError, \"The dimension defined for variables drew from the multivariate normal distribution is exceeded by the dimension used in Distribution (MultivariateNormal) \")\n probabilityValue = self.distDict[key].pdf(coordinate)\n self.inputInfo['SampledVarsPb'][key] = probabilityValue\n for var in self.distributions2variablesMapping[dist]:\n varID = utils.first(var.keys())\n varDim = var[varID]\n for kkey in varID.strip().split(','):\n self.values[kkey] = np.atleast_1d(rvsnum)[varDim-1]\n self.inputInfo[f'ProbabilityWeight-{dist}'] = 1.\n else:\n self.raiseAnError(IOError, \"Total dimension for given distribution should be >= 1\")\n\n if len(self.inputInfo['SampledVarsPb'].keys()) > 0:\n self.inputInfo['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())\n else:\n self.inputInfo['PointProbability'] = 1.0\n if self.samplingType == 'uniform':\n self.inputInfo['ProbabilityWeight' ] = weight\n else:\n self.inputInfo['ProbabilityWeight' ] = 1.0 # MC weight is 1/N => weight is one\n self.inputInfo['SamplerType'] = 'MonteCarlo'" ]
[ "0.62443453", "0.58079064", "0.57170784", "0.57169795", "0.5709108", "0.5601513", "0.55566067", "0.5552902", "0.5533448", "0.5526824", "0.54794496", "0.54682755", "0.544979", "0.5448814", "0.5426104", "0.5412882", "0.541072", "0.5403978", "0.53949046", "0.53916514", "0.5387663", "0.5372034", "0.53717315", "0.5364995", "0.5351399", "0.5347868", "0.534615", "0.5345665", "0.5329956", "0.53259486", "0.53240454", "0.5323983", "0.5314613", "0.53139627", "0.5309169", "0.52881014", "0.5285877", "0.52804035", "0.52799034", "0.5278302", "0.5266308", "0.5262987", "0.525092", "0.5245755", "0.52380383", "0.52363837", "0.52333206", "0.5224028", "0.5223622", "0.5222757", "0.5221867", "0.52210194", "0.52095574", "0.5202995", "0.5202813", "0.5197759", "0.51953536", "0.5195153", "0.51931655", "0.51879567", "0.51838243", "0.5176172", "0.51752025", "0.5174726", "0.5172534", "0.5166607", "0.51661754", "0.5159106", "0.5150838", "0.5150531", "0.5150411", "0.5148908", "0.513594", "0.5135714", "0.51343596", "0.5127147", "0.5124052", "0.5120417", "0.5119835", "0.5119542", "0.5117565", "0.5117058", "0.5117041", "0.51164466", "0.5113563", "0.5108728", "0.5107172", "0.5106641", "0.51060176", "0.5105107", "0.5099422", "0.5095983", "0.50932455", "0.5092306", "0.5084274", "0.50756264", "0.5073506", "0.5066847", "0.5061421", "0.5055715", "0.50543064" ]
0.0
-1
Construct generalized extreme value distribution. The parameters `loc`, `scale`, and `concentration` must be shaped in a way that supports broadcasting (e.g. `loc + scale` + `concentration` is valid).
def __init__(self, loc, scale, concentration, validate_args=False, allow_nan_stats=True, name='GeneralizedExtremeValue'): parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale, concentration], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale, concentration]) # Positive scale is asserted by the incorporated GEV bijector. self._gev_bijector = gev_cdf_bijector.GeneralizedExtremeValueCDF( loc=loc, scale=scale, concentration=concentration, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(GeneralizedExtremeValue, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The GEV bijector encodes the CDF function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert( self._gev_bijector, validate_args=validate_args), parameters=parameters, name=name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, loc=0, scale=1, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def maxabs_scale(X, *, axis=..., copy=...):\n ...", "def default_loc_scale_fn(\n is_singular=False,\n loc_initializer=init_ops.random_normal_initializer(stddev=0.1),\n untransformed_scale_initializer=init_ops.random_normal_initializer(\n mean=-3., stddev=0.1),\n loc_regularizer=None,\n untransformed_scale_regularizer=None,\n loc_constraint=None,\n untransformed_scale_constraint=None):\n def _fn(dtype, shape, name, trainable, add_variable_fn):\n \"\"\"Creates `loc`, `scale` parameters.\"\"\"\n loc = add_variable_fn(\n name=name + \"_loc\",\n shape=shape,\n initializer=loc_initializer,\n regularizer=loc_regularizer,\n constraint=loc_constraint,\n dtype=dtype,\n trainable=trainable)\n if is_singular:\n return loc, None\n untransformed_scale = add_variable_fn(\n name=name + \"_untransformed_scale\",\n shape=shape,\n initializer=untransformed_scale_initializer,\n regularizer=untransformed_scale_regularizer,\n constraint=untransformed_scale_constraint,\n dtype=dtype,\n trainable=trainable)\n scale = (np.finfo(dtype.as_numpy_dtype).eps +\n nn_ops.softplus(untransformed_scale))\n return loc, scale\n return _fn", "def gaussian_many(\n x: float,\n values: np.array,\n uncertainties: np.array\n) -> np.array:\n center = np.array(values)\n width = np.maximum(np.array(uncertainties), 1e-6)\n coefficient = 1 / np.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n return coefficient * np.exp(exponent)", "def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,\n input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes + 1.0) - 1.0\n for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def coordExtrema(a):\n # Extreme values of longitude and latitude in the survey.\n longiMin = sp.inf\n latMin = sp.inf\n longiMax = -sp.inf\n latMax = -sp.inf\n for t in range(len(a)):\n if a[t].pktCount > 0:\n arraMin = sp.amin(a[t].longi)\n if arraMin < longiMin:\n longiMin = sp.amin(a[t].longi)\n arraMin = sp.amin(a[t].lat)\n if arraMin < latMin:\n latMin = arraMin\n arraMax = sp.amax(a[t].longi)\n if arraMax > longiMax:\n longiMax = arraMax\n arraMax = sp.amax(a[t].lat)\n if arraMax > latMax:\n latMax = arraMax\n\n ext = cs.emptyClass()\n ext.longiMin = longiMin\n ext.longiMax = longiMax\n ext.latMin = latMin\n ext.latMax = latMax\n return ext", "def MinX(*args, **kwargs):\n return _gdi_.DC_MinX(*args, **kwargs)", "def minmax_scale(X, feature_range=..., *, axis=..., copy=...):\n ...", "def gauss_func(x, wid, cen, amp):\n\n return np.exp(-((x-cen)**2.)/(2.*wid**2)) * amp", "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def gev_ll(loc,c,scale):\n \n def gev_logp(value):\n scaled = (value - loc) / scale\n logp = -(scale\n + ((c + 1) / c) * tt.log1p(c * scaled)\n + (1 + c * scaled) ** (-1/c))\n alpha = loc - scale / c\n \n # If the value is greater than zero, then check to see if \n # it is greater than alpha. Otherwise, check to see if it \n # is less than alpha.\n bounds = tt.switch(value > 0, value > alpha, value < alpha)\n \n # The returned array will have entries of -inf if the bound\n # is not satisfied. This condition says \"if c is zero or\n # value is less than alpha, return -inf and blow up \n # the log-likelihood.\n return bound(logp, bounds, c != 0)\n return gev_logp", "def xscale(value):\n impl.xscale(**locals())", "def robust_scale(X, *, axis=..., with_centering=..., with_scaling=..., quantile_range=..., copy=..., unit_variance=...):\n ...", "def gen_gaussian_low(img, c_res, c=0.5, vx_size=1):\n\n # Input parsing\n assert (c_res > 0) and (c > 0) and (vx_size > 0)\n assert isinstance(img, np.ndarray) and (len(img.shape) == 3)\n\n # Initialization\n f_vx = c_res / vx_size\n ff_vx = min(img.shape) / (2. * np.pi * f_vx)\n sf_vx = ff_vx / math.sqrt(2. * math.log(1. / c))\n\n # Meshgrid generation\n nx, ny, nz = (img.shape[0] - 1) * .5, (img.shape[1] - 1) * .5, (img.shape[2] - 1) * .5\n if (nx % 1) == 0:\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx + 1, 1)))\n else:\n if nx < 1:\n arr_x = np.arange(0, 1)\n else:\n nx = math.ceil(nx)\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx, 1)))\n if (ny % 1) == 0:\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny + 1, 1)))\n else:\n if ny < 1:\n arr_y = np.arange(0, 1)\n else:\n ny = math.ceil(ny)\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny, 1)))\n if (nz % 1) == 0:\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz + 1, 1)))\n else:\n if nz < 1:\n arr_z = np.arange(0, 1)\n else:\n nz = math.ceil(nz)\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz, 1)))\n [X, Y, Z] = np.meshgrid(arr_x, arr_y, arr_z, indexing='ij')\n X = X.astype(np.float32, copy=False)\n Y = Y.astype(np.float32, copy=False)\n Z = Z.astype(np.float32, copy=False)\n R = np.sqrt(X * X + Y * Y + Z * Z)\n\n # Building\n return np.exp(-R / (2.*sf_vx*sf_vx))", "def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n\n return g.unsqueeze(0).unsqueeze(0)", "def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)", "def __call__(\n self,\n loc: Union[np.ndarray, float],\n scale: Union[np.ndarray, float] = 1.0,\n size: Optional[Union[List[int], int]] = None,\n **kwargs,\n ) -> RandomVariable:\n return super().__call__(loc, scale, size=size, **kwargs)", "def extreme_jacobian_entries(\n m=None, scaled=True, large=1e4, small=1e-4, zero=1e-10, jac=None, nlp=None\n):\n if jac is None or nlp is None:\n jac, nlp = get_jacobian(m, scaled)\n el = []\n for i, c in enumerate(nlp.clist):\n for j in jac[i].indices:\n v = nlp.vlist[j]\n e = abs(jac[i, j])\n if (e <= small and e > zero) or e >= large:\n el.append((e, c, v))\n return el", "def apply_short1(y, A, c, scale=1):\n m = A.nrows()\n y = vector(ZZ, 1/ZZ(scale) * y[-m:])\n a = balanced_lift(y*A)\n e = balanced_lift(y*c)\n return a, e", "def pl_exp_cut(x, mask=None, **params):\n energy_scale = np.ones(x.shape)\n if mask is not None:\n # E -> E * (1 + s)\n energy_scale[mask] += params['Energy_Scale']\n else:\n # apply to all energies\n energy_scale += params['Energy_Scale']\n\n if isinstance(x, u.Quantity):\n energy_scale *= u.dimensionless_unscaled\n\n x_scaled = x * energy_scale\n result = params[\"Prefactor\"] * np.power(x_scaled / params[\"Scale\"], params[\"Index\"])\n result *= np.exp(-x_scaled / params[\"Cutoff\"])\n return result", "def bigaussian(mu, wid, x, m = 0.5):\n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = gaussian(mu, wid * m, x[0:ix])\n y[ix+1:lx] = gaussian(mu, wid * (1 - m), x[ix+1:lx]) \n \n return y", "def gaussian_low(img, c_res, c=0.5, vx_size=1):\n\n # Input parsing\n assert (c_res > 0) and (c > 0) and (vx_size > 0)\n\n # Initialization\n f_c = vx_size / c_res\n s_f = f_c / math.sqrt(2. * math.log(1. / c))\n s = 1. / (2. * np.pi * s_f)\n\n # Filtering\n return sp.ndimage.gaussian_filter(img, s)", "def __call__(self, df, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(df, loc, scale, size=size, **kwargs)", "def __call__(self, mean=1.0, scale=1.0, size=None, **kwargs):\n return super().__call__(mean, scale, size=size, **kwargs)", "def gaussian(x, peak_x=.0, sigma=1.0, name=''):\n x = x.astype(np.float)\n variables = {'function': gaussian, 'peak_x': peak_x, 'sigma': sigma}\n y = np.exp((-1 * (x - peak_x)**2) / (2 * sigma**2))\n return packer(x, y, variables, name=name)", "def fit(self, data, ndim=None, min_energy=0.9):\n self.mu = np.mean(data, 1)\n data_centered = data - np.mean(data, 1)[:, None]\n co_var = np.cov(data_centered)\n eig_vals, eig_vecs = np.linalg.eig(co_var)\n eig_order = eig_vals.argsort()[::-1] # just to be sure we have eigenvalues ordered\n eig_vals = eig_vals[eig_order] / np.sum(eig_vals)\n eig_vecs = eig_vecs[eig_order]\n if ndim is None:\n explains = np.cumsum(eig_vals)\n ndim = np.where(explains > min_energy)[0][0]\n logging.info('PcaTransformer selected %i dims (original: %i), which explains %i%% of the data' % (ndim, data_centered.shape[0], np.round(explains[ndim]*100)))\n\n self.w = eig_vecs[0:ndim, :]\n return self.w", "def _ei(x, gp, y_max, xi):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mean, std = gp.predict(x, return_std=True)\n\n z = (mean - y_max - xi)/std\n return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)", "def __init__(self,\n loc,\n concentration,\n validate_args=False,\n allow_nan_stats=True,\n name=\"InverseGaussian\"):\n parameters = dict(locals())\n with tf.name_scope(name, values=[loc, concentration]):\n self._loc = tf.convert_to_tensor(loc, name=\"loc\")\n self._concentration = tf.convert_to_tensor(concentration,\n name=\"concentration\")\n with tf.control_dependencies([\n tf.assert_positive(self._loc),\n tf.assert_positive(self._concentration)] if validate_args else []):\n self._loc = tf.identity(self._loc, name=\"loc\")\n self._concentration = tf.identity(self._concentration,\n name=\"concentration\")\n tf.assert_same_float_dtype([self._loc, self._concentration])\n super(InverseGaussian, self).__init__(\n dtype=self._loc.dtype,\n reparameterization_type=tf.distributions.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._loc, self._concentration],\n name=name)", "def gaussian(\n x: float,\n measurement: 'mstats.ValueUncertainty',\n max_sigma: float = 10.0\n) -> float:\n center = measurement.value\n width = measurement.uncertainty\n width = max(width, 1e-6)\n\n if x <= (center - max_sigma * width) or x >= (center + max_sigma * width):\n # Don't calculate values outside a \"reasonable\" 10 sigma range\n return 0.0\n\n coefficient = 1 / math.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n\n return coefficient * math.exp(exponent)", "def make_gaussian(size, sigma=10, center=None):\n\n x = np.arange(0, size[1], 1, float)\n y = np.arange(0, size[0], 1, float)\n y = y[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size[0] // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)", "def external2internal(xe,bounds):\n\n xi = np.empty_like(xe)\n\n for i,(v,bound) in enumerate(zip(xe,bounds)):\n \n a = bound[0] # minimum\n b = bound[1] # maximum\n\n if a == None and b == None: # No constraints\n xi[i] = v\n\n elif b == None: # only min\n xi[i] = np.sqrt( (v-a+1.)**2.-1 )\n\n elif a == None: # only max\n xi[i] = np.sqrt( (b-v+1.)**2.-1 )\n\n else: # both min and max\n xi[i] = np.arcsin( (2.*(v-a)/(b-a))-1.)\n\n return xi", "def pl_exp_cut_low_high(x, **params):\n result = (x / params['Scale_CR']) ** (-params['Index_CR'])\n result *= params['Prefactor_CR']\n result *= np.exp(-x / params['Emax_CR']) * np.exp(-params['Emin_CR'] / x)\n return result", "def gaussian(dims: Tuple[int, int], cutoff_freq: float) -> np.ndarray:\n # create grid\n m, n = [(dim - 1) / 2 for dim in dims]\n yy, xx = np.ogrid[-m : m + 1, -n : n + 1]\n\n # compute transfer function\n tf = np.exp(-(np.power(xx, 2) + np.power(yy, 2)) / (2 * np.power(cutoff_freq, 2)))\n\n # normalize and return transfer func\n return (tf - np.max(tf)) / (np.max(tf) - np.min(tf))", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def define_cx_unscaled(self, _cx_scaled: list[MX | SX, ...], scaling: np.ndarray) -> list[MX | SX, ...]:\n _cx = [self.nlp.cx() for _ in range(len(_cx_scaled))]\n for node_index in range(len(_cx_scaled)):\n _cx[node_index] = [self.nlp.cx() for _ in range(len(_cx_scaled[0]))]\n\n for node_index in range(len(_cx_scaled)):\n for j in range(len(_cx_scaled[0])):\n _cx[node_index][j] = _cx_scaled[node_index][j] * scaling\n return _cx", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def makeGaussian(size, fwhm, sigma, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n \n #return (np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)) #approximation using exponenial functions\n return ((1/(2*np.pi*sigma**2))*np.exp(-((xx)**2 + (yy)**2)/(2*sigma**2))) # symmetric 2D Gaussian distribution", "def __call__(self, b, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(b, loc, scale, size=size, **kwargs)", "def sigma_nfw_rmin_rmax(R, r_min, r_max, m_x, r_x, c_x, rho_scale, **kwargs):\n if r_min < R:\n return sigma_nfw_rmax(R=R, r_max=r_max, m_x=rho_scale*m_x, r_x=r_x, c_x=c_x)\n\n r_s = r_x / c_x\n rho_s = (\n rho_scale * m_x / (4 * np.pi * r_x**3) * c_x**3\n / (np.log(1 + c_x) - c_x / (1 + c_x))\n )\n\n prefactor = 2 * r_s * rho_s\n a = r_min / r_s\n b = r_max / r_s\n c = R / r_s\n\n sigma = prefactor * (\n - 1. / (2 * (1 + a) * (1 + b) * (1 - c**2)**1.5) * (\n + (2 + 2 * a) * ((c**2 - b**2) * (c**2 - 1))**0.5\n - (2 + 2 * b) * ((c**2 - a**2) * (c**2 - 1))**0.5\n + (1 + a) * (1 + b) * np.log(\n (((c**2 - a**2) * (c**2 - 1))**0.5 + (a + c**2))\n / (((c**2 - a**2) * (c**2 - 1))**0.5 - (a + c**2))\n * (((c**2 - b**2) * (c**2 - 1))**0.5 - (b + c**2))\n / (((c**2 - b**2) * (c**2 - 1))**0.5 + (b + c**2))\n )\n )\n )\n\n return sigma", "def get_geo_extents(nc, possible_units, std_name, axis_name, short_name):\n\n geo_extent_vars = {}\n geo_extent_units = []\n\n # variables must have units\n for var in nc.get_variables_by_attributes(units=lambda x: x is not None):\n \n geo_extent_vars[var.name] = 0\n # units in this set\n if var.units in possible_units:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n # standard name\n if hasattr(var, 'standard_name') and var.standard_name == std_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n # axis of \"X\"\n if hasattr(var, 'axis') and var.axis == axis_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n if var.name == std_name or var.name == short_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n\n if len(geo_extent_vars) == 0:\n return\n\n # filter out any zero scores\n geo_extent_vars = dict(filter(lambda x: x[1]>0, geo_extent_vars.items()))\n\n # sort by criteria passed\n final_geo_vars = sorted(geo_extent_vars, key=lambda x: geo_extent_vars[x], reverse=True)\n\n obs_mins = [np.nanmin(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n obs_maxs = [np.nanmax(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n # Let's just pick one\n geo_vals = nc.variables[final_geo_vars[0][:]]\n if geo_vals.size == 1:\n obs_res = [0.0]\n else:\n obs_res = [np.nanmean(np.diff(nc.variables[var])) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n geo_min = round(float(min(obs_mins)), 5)\n geo_max = round(float(max(obs_maxs)), 5)\n geo_extent_units = [nc.variables[k].units for k, v in geo_extent_vars.items()][0]\n geo_res = \"{} {}\".format(round(float(abs(np.mean(obs_res))), 5), geo_extent_units)\n\n print('<attribute name=\"geospatial_{}_min\" value=\"{}\" />'.format(short_name, geo_min))\n print('<attribute name=\"geospatial_{}_max\" value=\"{}\" />'.format(short_name, geo_max))\n print('<attribute name=\"geospatial_{}_resolution\" value=\"{}\" />'.format(short_name, geo_res))\n print('<attribute name=\"geospatial_{}_units\" value=\"{}\" />'.format(short_name, geo_extent_units))", "def map_scalar(self, source: Self, scalar: dict,\n cube_scale: float = 20., distances: bool = False,\n max_distance: float = None) -> np.ndarray:\n # create a cube for extrapolation\n smin, smax = source.extents\n tmin, tmax = self.extents # target\n min = np.minimum(smin, tmin).flatten()\n max = np.maximum(smax, tmax).flatten()\n\n avg, cube = self.__cuboid(min, max, scale=cube_scale)\n\n # pair original coordinates to scalar values and add the cuboid\n sdata = [(nid, source[nid].coors, scalar[nid]) for nid in source.keys()]\n spoints = np.array([x[1] for x in sdata], dtype=float)\n svalues = np.array([x[2] for x in sdata], dtype=float)\n mean = np.mean(svalues, axis=0)\n cube_values = np.array([mean] * cube.shape[0], dtype=float)\n spoints = np.concatenate((spoints, cube), axis=0)\n svalues = np.concatenate((svalues, cube_values), axis=0)\n\n # prepare new nodes and their coordinates\n tdata = [(nid, self[nid].coors) for nid in self.keys()]\n tids = [x[0] for x in tdata]\n tpoints = np.array([x[1] for x in tdata], dtype=float)\n\n # map values to new nodes\n grid = scipy.interpolate.griddata(spoints, svalues, tpoints, method=\"linear\")\n\n\n # reformat mapped values to a dict {nid, value}\n results = dict(list(zip(tids, grid)))\n\n # if closest distances are reuqested\n if distances:\n tree = scipy.spatial.cKDTree(spoints)\n xi = scipy.interpolate.interpnd._ndim_coords_from_arrays(tpoints,\n ndim=tpoints.shape[1])\n distances, indexes = tree.query(xi)\n\n # Copy original result but mask missing values with NaNs\n if max_distance:\n grid2 = grid[:]\n grid2[distances > max_distance] = np.nan\n grid = grid2\n distances = dict(list(zip(tids, distances)))\n\n else:\n distances = None\n\n if distances:\n return results, distances\n else:\n return results", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def _etaE_cool(self,x):\n return self._eta_sfr_scaling(x,'E_cool')", "def analytic(x, t, D, x0, xend, logx=False, c_s=1, use_log2=False):\n import scipy.special\n if t.ndim == 1:\n t = t.reshape((t.size, 1))\n expb = (lambda arg: 2**arg) if use_log2 else np.exp\n x = expb(x) if logx else x\n return c_s * scipy.special.erfc(x/(2*(D*t)**0.5))", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def mix_2d_fit(self):\n xgg, ygg= np.meshgrid(self.xgrid, self.ygrid, indexing='ij')\n print 'Finding minimum N and sigma'\n # Coarsest grid\n N0Arr = np.arange(10)*5000. + 5000.\n sigmaArr= np.arange(5)*.1 + 3.8\n pArr = np.arange(10)*.1 + .1\n # CArr = \n pmin, Cmin, Nmin, sigma_min, rms = _mix_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, pArr, CArr, normtype=normtype)\n # # Coarsest grid\n # N0Arr = np.arange(10)*1000. + Nmin - 2500.\n # sigmaArr= np.arange(50)*1. + sigma_min - 1.\n # Nmin, sigma_min, rms= _gauss_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, normtype=normtype)\n # # finest grid\n # N0Arr = np.arange(10)*dN + Nmin - 500.\n # sigmaArr= np.arange(50)*dsigma + sigma_min - 0.5\n # Nmin, sigma_min, rms= _gauss_2d_fit(xgg, ygg, self.nArr, N0Arr, sigmaArr, normtype=normtype)\n # self.Ngauss = Nmin\n # self.sigma = sigma_min\n # self.rms2d = rms\n print 'End finding minimum N and sigma'\n print 'N =', Nmin,' sigma =', sigma_min \n return", "def _make_gaussian_maps(x, gaussians, size=None, scaling=6.):\n if size is None:\n size = x.shape[-2:]\n bs = x.shape[0]\n else:\n size = [size] * 2\n bs = 1\n dtype = x.dtype\n device = x.device\n\n gaussian_maps = []\n map_template = torch.ones(*size, dtype=dtype, device=device)\n meshgrids = torch.meshgrid(\n [torch.linspace(0, 1, size[0], dtype=dtype, device=device),\n torch.linspace(0, 1, size[1], dtype=dtype, device=device),])\n\n for gaussian_idx, yx_mu_logstd in enumerate(torch.unbind(gaussians)):\n map = map_template.clone()\n for mu_logstd, mgrid in zip(yx_mu_logstd, meshgrids):\n mu = mu_logstd[0]\n std = torch.exp(mu_logstd[1])\n map *= torch.exp(-((mgrid - mu) / std) ** 2 / 2)\n\n map *= scaling\n gaussian_maps.append(map)\n\n gaussian_maps = torch.stack(gaussian_maps)\n gaussian_maps = gaussian_maps.unsqueeze(0).expand(bs, -1, -1, -1)\n return gaussian_maps", "def scatter_min(a, slices, value):\n a.scatter_min(slices, value)", "def transform(self, original_input):\n return super(ExponentialDimension, self).transform(np.exp(original_input))", "def extrapolate_nearest(self, known_coords, known_values, extrap_coords, groupname):\n\t\t#First need to reshape known_coords and known_values\n\t\tn_params = self.signal[groupname]['dimension']\n\t\tknown_coords = np.reshape( known_coords, (-1,n_params) )\n\t\tknown_values = np.reshape( known_values, (-1) )\n\t\t\t\t\n\t\t#Different methods if dimension is (!/=)= 1\n\t\tif self.signal[groupname]['dimension'] == 1:\n\t\t\t#Reshape extrap_coords\n\t\t\textrap_coords = np.reshape(extrap_coords, (-1))\n\t\t\t\n\t\t\t#Initialize array for extrapolation values\n\t\t\textrap_values = np.ones(len(extrap_coords))*np.nan\n\t\t\t\n\t\t\t#Find minimum and maximum coordinates and the respective values at those coordinates\n\t\t\tmin_coord_index = np.argmin(known_coords)\n\t\t\tmin_coord = known_coords[min_coord_index]\n\t\t\tmin_coord_value = known_values[min_coord_index]\n\t\t\t\n\t\t\tmax_coord_index = np.argmax(known_coords)\n\t\t\tmax_coord = known_coords[max_coord_index]\n\t\t\tmax_coord_value = known_values[max_coord_index]\n\t\t\t\n\t\t\t#Do the nearest-bound extrapolation\n\t\t\textrap_values[np.absolute(extrap_coords - max_coord) >= np.absolute(extrap_coords - min_coord)] = min_coord_value\n\t\t\textrap_values[np.absolute(extrap_coords - max_coord) < np.absolute(extrap_coords - min_coord)] = max_coord_value\n\t\t\t\n\t\telif self.signal[groupname]['dimension'] > 1:\n\t\t\textrap_values = griddata(known_coords, known_values, extrap_coords, method='nearest')\n\t\t\n\t\treturn extrap_values", "def __init__(self,\n loc,\n scale,\n num_probit_terms_approx=2,\n gauss_hermite_scale_limit=None,\n gauss_hermite_degree=20,\n validate_args=False,\n allow_nan_stats=True,\n name='LogitNormal'):\n parameters = dict(locals())\n num_probit_terms_approx = int(num_probit_terms_approx)\n if num_probit_terms_approx < 1 or num_probit_terms_approx > 8:\n raise ValueError(\n 'Argument `num_probit_terms_approx` must be an integer between '\n '`1` and `8` (inclusive).')\n self._num_probit_terms_approx = num_probit_terms_approx\n self._gauss_hermite_scale_limit = gauss_hermite_scale_limit\n self._gauss_hermite_degree = gauss_hermite_degree\n with tf.name_scope(name) as name:\n super(LogitNormal, self).__init__(\n distribution=normal_lib.Normal(loc=loc, scale=scale),\n bijector=sigmoid_bijector.Sigmoid(),\n validate_args=validate_args,\n parameters=parameters,\n name=name)", "def extreme_jacobian_rows(\n m=None, scaled=True, large=1e4, small=1e-4, jac=None, nlp=None\n):\n # Need both jac for the linear algebra and nlp for constraint names\n if jac is None or nlp is None:\n jac, nlp = get_jacobian(m, scaled)\n el = []\n for i, c in enumerate(nlp.clist):\n norm = 0\n # Calculate L2 norm\n for j in jac[i].indices:\n norm += jac[i, j] ** 2\n norm = norm**0.5\n if norm <= small or norm >= large:\n el.append((norm, c))\n return el", "def create_exgauss_lookup_table(self):\n return self.exgauss_cdf_nparray(range(self.xmin,self.xmax, self.dx)).tolist(), range(self.xmin,self.xmax, self.dx)", "def argmin(self, values):\n return self.aggregate(values, \"argmin\")", "def gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def extreme_values(self, extreme):\n\n\t\tif extreme.lower() == 'min':\n\t\t\treturn data.min()\n\t\telif extreme.lower() == 'max':\n\t\t\treturn data.max()\n\t\telse:\n\t\t\tassert 'Invalid Parameter !'", "def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))", "def exponentiate_and_normalize(values, dim=0):\n\n return torch.exp(lognormexp(values, dim=dim))", "def optimal_scale(n,pred,true):\n def ECE(n,pred,true):\n n_bins = n\n bins = [[] for i in range(n_bins)]\n\n # computing the bins\n for i in range(pred.shape[0]):\n for j in range(n_bins):\n if pred[i].max()>j*(1./n_bins) and pred[i].max()<=(j+1)*(1./n_bins):\n bins[j].append(i)\n # computing the average accuracy over the bins\n cum_sum = [0 for i in range(n_bins)]\n for j in range(n_bins):\n for i in range(len(bins[j])):\n if np.argmax(pred[bins[j][i]]) == np.argmax(true[bins[j][i]]):\n cum_sum[j]+= 1./len(bins[j])\n # computing the ECE metric as presented in the paper\n ECE = 0.\n for j in range(n_bins):\n ECE+= abs((j+1./2)*(1./n_bins)-cum_sum[j])*(float(len(bins[j]))/2000.)\n return(ECE)\n\n # the range of temperature for which we evaluate the ECE\n Ts = np.linspace(1,3,30)\n l = []\n for i in range(30):\n print(Ts[i])\n scaled = temperature_scaling(pred,Ts[i])\n l.append(ECE(n,scaled,true))\n l = np.array(l)\n print(l)\n res = temperature_scaling(pred,Ts[np.argmin(l)])\n return(res,Ts[np.argmin(l)])", "def argmin(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmin\")\n return k, cast(pdarray, v)", "def loguniform_dist(low, high, base=10):\n return ExponentiateDistribution(sp_uniform(low, high - low), base=base)", "def get_x_hs_out_min_c(\n x_d_hs_in: np.ndarray, q_hs_max_cl: np.ndarray, v_d_supply: np.ndarray) -> np.ndarray:\n\n # air density, kg/m3\n rho = get_air_density()\n\n # latent heat of evaporation, kJ/kg\n l_wtr = get_evaporation_latent_heat()\n\n return x_d_hs_in - q_hs_max_cl / (rho * l_wtr * np.sum(v_d_supply, axis=0)) * 10 ** 3", "def _f_special_gauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n g /= g.sum()\n return tf.constant(g, dtype=tf.float32)", "def test_sc_agg_econ_scale():\n data = {'capital_cost': 53455000,\n 'fixed_operating_cost': 360000,\n 'fixed_charge_rate': 0.096,\n 'variable_operating_cost': 0}\n\n with tempfile.TemporaryDirectory() as td:\n gen_temp = os.path.join(td, 'ri_my_pv_gen.h5')\n shutil.copy(GEN, gen_temp)\n\n with h5py.File(gen_temp, 'a') as res:\n for k, v in data.items():\n arr = np.full(res['meta'].shape, v)\n res.create_dataset(k, res['meta'].shape, data=arr)\n res[k].attrs['scale_factor'] = 1.0\n\n eqn = '2 * capacity ** -0.3'\n s = SupplyCurveAggregation.summary(EXCL, gen_temp, TM_DSET,\n excl_dict=EXCL_DICT,\n res_class_dset=RES_CLASS_DSET,\n res_class_bins=RES_CLASS_BINS,\n data_layers=DATA_LAYERS,\n gids=list(np.arange(10)),\n max_workers=1, cap_cost_scale=eqn)\n\n aep = s['capacity'] * s['mean_cf'] * 8760 * 1000\n\n true_raw_lcoe = ((data['fixed_charge_rate'] * data['capital_cost']\n + data['fixed_operating_cost'])\n / aep + data['variable_operating_cost'])\n true_raw_lcoe *= 1000 # convert $/kwh -> $/MWh\n\n # Back out the fcr * capital_cost term ($)\n x = ((s['raw_lcoe'] / 1000 - data['variable_operating_cost'])\n * aep - data['fixed_operating_cost'])\n eval_inputs = {k: s[k].values.flatten() for k in s.columns}\n # pylint: disable=eval-used\n scalars = eval(str(eqn), globals(), eval_inputs)\n s['scalars'] = scalars\n x *= scalars\n true_scaled_lcoe = ((x + data['fixed_operating_cost'])\n / aep + data['variable_operating_cost'])\n true_scaled_lcoe *= 1000 # convert $/kwh -> $/MWh\n\n assert np.allclose(true_scaled_lcoe, s['mean_lcoe'])\n assert np.allclose(true_raw_lcoe, s['raw_lcoe'])\n s = s.sort_values('capacity')\n assert all(s['mean_lcoe'].diff()[1:] < 0)\n for i in s.index.values:\n if s.loc[i, 'scalars'] < 1:\n assert s.loc[i, 'mean_lcoe'] < s.loc[i, 'raw_lcoe']\n else:\n assert s.loc[i, 'mean_lcoe'] >= s.loc[i, 'raw_lcoe']", "def fit_gaussian_1d(image: Image, params_0=None, bounds=None, axis=0):\n arr, arr_e = image.array, image.array_e\n ordinate = arr.mean(axis=axis)\n\n # Now we can generate an array of errors.\n ordinate_e = np.sqrt(np.mean(arr_e**2, axis=axis))\n\n # Setting default values.\n if params_0 is None:\n # Now we generate the initial values for our Gaussian fit.\n # These values are crucial – as this is a high dimensional fitting\n # problem, it is likely that we'll get stuck in a local minimum if these\n # aren't good.\n # Guess that the Gaussian mean is at the most intense mean pixel value.\n mean0 = np.argmax(ordinate)\n # Guess that the standard deviation is a single pixel.\n sdev0 = 1\n # Guess that the background (offset) is the median pixel value.\n offset0 = np.median(ordinate)\n # Guess that the scale is equal to the largest recorded value.\n scale0 = arr.max()\n params_0 = [mean0, sdev0, offset0, scale0]\n if bounds is None:\n bounds = ([0, 0, 0, 0],\n [ordinate.shape[0], ordinate.shape[0], scale0, scale0 * 10])\n\n # Perform the fitting.\n fit_popt_pcov = curve_fit(\n univariate_normal,\n np.arange(0, ordinate.shape[0], 1), ordinate, bounds=bounds,\n sigma=ordinate_e, p0=params_0, maxfev=2000 * (len(params_0) + 1))\n\n fit_info = FitInfo(fit_popt_pcov[0], fit_popt_pcov[1], univariate_normal)\n\n # Determine uncertainty from covarience matrix.\n # Note: the stddev of the fit Gaussian can be accessed via popt[1].\n p_sigma = np.sqrt(np.diag(fit_info.pcov))\n\n return BkgSubInfo(fit_info.popt[2], p_sigma[2], fit_gaussian_1d, fit_info)", "def generalized_supp(self, X, v, log_hyperparam):\n n_samples = int(self.dual_var.shape[0] / 2)\n C = np.exp(log_hyperparam[0])\n alpha = self.dual_var[0:n_samples] - \\\n self.dual_var[n_samples:(2 * n_samples)]\n full_supp = np.logical_and(\n np.logical_not(\n np.isclose(np.abs(alpha), 0)),\n np.logical_not(\n np.isclose(np.abs(alpha), C)))\n return v[full_supp]", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def spatial_expval(map_):\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * x), np.sum(map_ * y)", "def rvs(self, *args, **kwds):\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = np.logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n if np.all(scale == 0):\n return loc * np.ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n self._random_state = check_random_state(rndm)\n\n if isinstance(size, tuple):\n if len(size) > 0:\n raise ValueError(size)\n else:\n pass\n elif not isinstance(size, int):\n raise ValueError(size)\n\n low = np.log(args[0] - 0.4999)\n high = np.log(args[1] + 0.4999)\n size = self._random_state.randint(args[2], args[3] + 1)\n self._size = size\n vals = np.rint(\n np.exp(self._random_state.uniform(low=low, high=high, size=size))\n ).astype(int)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n vals = tuple([int(val) for val in vals])\n\n return vals", "def Gaussiankernel(size, sigma=1): \n size = int(size) // 2\n # create x grid and y grid\n x, y = np.mgrid[-size:size+1, -size:size+1] \n # gaussian distribution formula\n normal = 1 / np.sqrt(2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n \n return g/g.sum()", "def new_scaled_energy(run, smoother=\"pol2\"):\n get_from_ccdb(run)\n endpoint_calib = ROOT.pstags().endpoint_calib\n endpoint_energy = ROOT.pstags().endpoint_energy\n fout = open(f\"new_scaled_energy.{run}\", \"w\")\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm_fit\")\n if not Eps_tagm:\n Eps_tagm = ROOT.gROOT.FindObject(\"Epair_Etagm\")\n if not Eps_tagm:\n Eps_tagm = plot_Etagm_Epair(run)[0]\n Eps_tagm.Fit(smoother)\n for func in Eps_tagm.GetListOfFunctions():\n ntagm = Eps_tagm.GetNbinsX()\n for i in range(ntagm):\n Elow = Eps_tagm.GetXaxis().GetBinLowEdge(102-i)\n Ehigh = Eps_tagm.GetXaxis().GetBinUpEdge(102-i)\n f = [(endpoint_calib - endpoint_energy + func.Eval(E)) /\n endpoint_calib for E in (Elow, Ehigh)]\n fout.write(f\"{i+1} {f[0]} {f[1]}\\n\")\n break", "def initiategaussian(sd, x0):\n y = np.exp(-x**2/(2*sd**2))\n return y", "def mms_scale(values):\r\n mms = MinMaxScaler()\r\n return mms.fit_transform(values)", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def expmap(self, v, x, c):\n c = self.truncate_c(c)\n v_norm = self.clip(tf.norm(v, ord=2, axis=-1, keepdims=True))\n second_term = TanC(self._lambda_x(x, c) * v_norm / 2.0, c) * v / v_norm\n gamma = self._mobius_add(x, second_term, c)\n return gamma", "def arg_min_scalar(objective, **kwargs):\n return minimize_scalar(objective, **kwargs).x", "def elastic_transform(X, min_alpha=36, max_alpha=38, min_sigma=5, max_sigma=6, random_state=None, n_jobs=1):\n if random_state is None:\n rng = np.random\n else:\n rng = np.random.RandomState(random_state)\n alphas = rng.uniform(min_alpha, max_alpha, size=X.shape[0])\n sigmas = rng.uniform(min_sigma, max_sigma, size=X.shape[0])\n X_elas = Parallel(n_jobs=n_jobs)(delayed(elastic_transform_one)(X[i], alphas[i], sigmas[i]) for i in range(X.shape[0]))\n return np.array(X_elas, dtype='float32')", "def logit_normal_mean_gh(loc, scale, deg):\n # We want to integrate\n # A = \\int_-inf^inf sigmoid(x) * Normal(loc, scale).pdf(x) dx\n # To bring it into the right form for Gauss-Hermite quadrature,\n # we make the substitution y = (x - loc) / scale, to get\n # A = (1/sqrt(2*pi)) * \\int_-inf^inf [\n # sigmoid(y * scale + loc) * exp(-1/2 y**2) dy]\n grid, weights = onp.polynomial.hermite_e.hermegauss(deg)\n grid = tf.cast(grid, dtype=loc.dtype)\n weights = tf.cast(weights, dtype=loc.dtype)\n normalizer = tf.constant(onp.sqrt(2 * onp.pi), dtype=loc.dtype)\n values = tf.sigmoid(grid * scale[..., tf.newaxis] + loc[..., tf.newaxis])\n return tf.reduce_sum(values * weights, axis=-1) / normalizer", "def spec_conv_gauss_custom(x, y, resolution=None, fwhm=None, dwindow=2, verb=True):\n # Select width of the Gaussian\n if (resolution is None) and (fwhm is None):\n sys.exit('Must specify either `resolution` or `fwhm`.')\n if resolution is None:\n # Check fwhm is a valid number\n if isinstance(fwhm, (int, float)): \n if verb: print('Use the same fwhm for each datapoint: {}'.format(fwhm))\n else: sys.exit('fwhm not valid {}'.format(fwhm))\n else:\n if verb: print('Use resolution: {} (different fwhm each datapoint)'.format(resolution))\n # Compute fwhm for each datapoint\n fwhm = x / resolution\n #sigma_new = fwhm / (2 * np.sqrt(2 * np.log(2)))\n #sigma = sigma_new\n return conv_gauss_custom(x, y, fwhm, dwindow=dwindow)", "def argmin2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmin())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmin())", "def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):\n\te = EMData()\n\te.set_size(nx, ny, nz)\n\tif( ysigma == None ) : ysigma = xsigma\n\tif( zsigma == None ) : zsigma = xsigma\n\tif( xcenter == None ) : xcenter = nx//2\n\tif( ycenter == None ) : ycenter = ny//2\n\tif( zcenter == None ) : zcenter = nz//2\n\te.process_inplace(\"testimage.puregaussian\", {\"x_sigma\":xsigma,\"y_sigma\":ysigma,\"z_sigma\":zsigma,\"x_center\":xcenter,\"y_center\":ycenter,\"z_center\":zcenter} )\n\treturn e", "def __scale_constraint(c, v):\n if c.equality:\n c.set_value((c.lower * v, c.body * v))\n else:\n c.set_value(\n (__none_left_mult(c.lower, v), c.body * v, __none_left_mult(c.upper, v))\n )", "def makeGaussian(size, fwhm = 3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def makeGaussian(size, fwhm = 3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:,np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def atmin(a,lowerlimit=None,dimension=None,inclusive=1):\r\n if inclusive: lowerfcn = N.greater\r\n else: lowerfcn = N.greater_equal\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if lowerlimit == None:\r\n lowerlimit = N.minimum.reduce(N.ravel(a))-11\r\n biggest = N.maximum.reduce(N.ravel(a))\r\n ta = N.where(lowerfcn(a,lowerlimit),a,biggest)\r\n return N.minimum.reduce(ta,dimension)", "def calculate(cubes):\n try:\n sic = cubes.extract_cube(Constraint(name='sic'))\n except iris.exceptions.ConstraintMismatchError:\n try:\n sic = cubes.extract_cube(Constraint(name='siconca'))\n except iris.exceptions.ConstraintMismatchError as exc:\n raise RecipeError(\n 'Derivation of siextent failed due to missing variables '\n 'sic and siconca.') from exc\n\n ones = da.ones_like(sic)\n siextent_data = da.ma.masked_where(sic.lazy_data() < 15., ones)\n siextent = sic.copy(siextent_data)\n siextent.units = 'm2'\n\n return siextent", "def aicmle(timeSeries, distribution):\n mlevals = {} \n if distribution == 'pareto':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['mu'] = 1 - timeSeries.shape[0] / (timeSeries.shape[0] * np.log(mlevals['xmin']) - np.sum(np.log(timeSeries)))\n \n elif distribution == 'lognormal':\n mlevals['mu'] = np.sum(np.log(timeSeries)) / timeSeries.shape[0]\n mlevals['sigma'] = np.sqrt(np.sum( (np.log(timeSeries) - mlevals['mu'])**2) / timeSeries.shape[0])\n \n elif distribution == 'normal':\n mlevals['mu'] = np.mean(timeSeries)\n mlevals['sigma'] = np.sqrt(sum((timeSeries - np.mean(timeSeries))**2) / timeSeries.shape[0])\n \n elif distribution == 'exponential':\n mlevals['lambda'] = 1.0 / np.mean(timeSeries)\n \n elif distribution == 'boundedpl':\n mlevals['xmin'] = np.min(timeSeries)\n mlevals['xmax'] = np.max(timeSeries)\n minmuEstimate = 1.1\n mlevals['mu'] = fmin(lambda mu: -len(timeSeries) * np.log( (mu - 1) / (np.min(timeSeries)**(1 - mu) - np.max(timeSeries)**(1 - mu))) + mu * np.sum(np.log(timeSeries)), minmuEstimate, disp=0)[0]\n\n return mlevals", "def makeGaussian(size, fwhm=3, center=None):\n\n x = np.arange(0, size, 1, float)\n y = x[:, np.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return np.exp(-4 * np.log(2) * ((x - x0)**2 + (y - y0)**2) / fwhm**2)", "def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0., interpolation_order=1):\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two'\n ' floats. Received: %s' % (zoom_range,))\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,\n fill_mode=fill_mode, cval=cval,\n order=interpolation_order)\n return x" ]
[ "0.5140067", "0.51391375", "0.51391375", "0.51391375", "0.51391375", "0.51391375", "0.51385653", "0.512212", "0.51002985", "0.5092538", "0.5085862", "0.5006036", "0.49219593", "0.49213806", "0.4891641", "0.48677793", "0.48438725", "0.4842382", "0.48329198", "0.48204932", "0.48114428", "0.4772086", "0.47648063", "0.47611213", "0.47515526", "0.47369727", "0.47274134", "0.4711493", "0.47055256", "0.47015047", "0.4688535", "0.4683905", "0.46751523", "0.4666467", "0.4636576", "0.46297947", "0.46255624", "0.4625146", "0.46242663", "0.46235207", "0.4619241", "0.4601439", "0.45902988", "0.45859763", "0.4585121", "0.458425", "0.45835245", "0.4577322", "0.45702964", "0.4567122", "0.4557991", "0.45569262", "0.4547964", "0.45431924", "0.45350203", "0.45256403", "0.45223317", "0.45201164", "0.45165613", "0.4506059", "0.450379", "0.4503266", "0.4495178", "0.44948307", "0.44906077", "0.44848117", "0.44832134", "0.44792062", "0.44792062", "0.44762096", "0.44760808", "0.44728407", "0.44620544", "0.4460263", "0.4454864", "0.44522974", "0.44505063", "0.4445215", "0.44426584", "0.44406804", "0.44396913", "0.44385925", "0.44301394", "0.44285345", "0.44257146", "0.44253206", "0.44251108", "0.44162372", "0.4412055", "0.4411221", "0.44110128", "0.44044945", "0.44008225", "0.44007564", "0.44007564", "0.44005466", "0.43923882", "0.43859825", "0.43843696", "0.43829158" ]
0.6448586
0
Distribution parameter for the location.
def loc(self): return self._gev_bijector.loc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distribution(self) -> str:\n return pulumi.get(self, \"distribution\")", "def loc(self):\n return self.distribution.loc", "def get_distribution_parameters(self):\r\n return \"UNDEFINED\"", "def distribution(self, env):\n pass", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n locationFind = paramInput.findFirst('location')\n if locationFind != None:\n self.location = locationFind.value\n else:\n self.raiseAnError(IOError,'location value needed for Logistic distribution')\n scaleFind = paramInput.findFirst('scale')\n if scaleFind != None:\n self.scale = scaleFind.value\n else:\n self.raiseAnError(IOError,'scale value needed for Logistic distribution')\n self.initializeDistribution()", "def _fit_distribution(sample, name):\n if name == WEIBULL_2P_KEYWORD:\n # Do not fit the location parameter because it is 0 for a 2-p. dist.\n params = sts.weibull_min.fit(sample, floc=0)\n elif name == WEIBULL_3P_KEYWORD or \\\n name == WEIBULL_3P_KEYWORD_ALTERNATIVE:\n params = sts.weibull_min.fit(sample)\n elif name == NORMAL_KEYWORD:\n params = list(sts.norm.fit(sample))\n # Shape doesn't exist for normal\n params.insert(0, 0)\n elif name == LOGNORMAL_EXPMU_PARAMETER_KEYWORD or \\\n name == LOGNORMAL_MU_PARAMETER_KEYWORD:\n # For lognormal loc is set to 0\n params = sts.lognorm.fit(sample, floc=0)\n elif name == 'KernelDensity':\n dens = sm.nonparametric.KDEUnivariate(sample)\n dens.fit(gridsize=2000)\n # Kernel density doesn't have shape, loc, scale\n return (dens.cdf, dens.icdf)\n else:\n err_msg = \"Distribution '{}' is unknown.\".format(name)\n raise ValueError(err_msg)\n\n return (ConstantParam(params[0]),\n ConstantParam(params[1]),\n ConstantParam(params[2]))", "def dist(self):\n pass", "def set_distribution(self,\n distn_arg,\n **kwargs):\n\n # Check if string or scipy distribution\n if isinstance(distn_arg, str):\n distn_arg = string_to_distribution(distn_name = distn_arg, **kwargs)\n elif not isinstance(distn_arg, ss._distn_infrastructure.rv_frozen):\n raise TypeError(f\"Type of {distn_arg} unknown.\")\n\n self.distribution = distn_arg", "def __init__(self, location, weight):\n self.location = location\n self.is_dc = type(location) == DistributionCenter\n self.weight = weight\n self.neighbors = []\n self.individual_score = 0\n self.relative_score = 0", "def random_location(self, normal=True):\n if normal:\n return distribute_normally(x1=self.x1, x2=self.x2, y1=self.y1, y2=self.y2)\n else:\n raise NotImplementedError", "def dist(self):\n return self._dist", "def SetDistribution(self, dist=None):\n from mystic.math import Distribution\n if dist and Distribution not in dist.__class__.mro():\n dist = Distribution(dist) #XXX: or throw error?\n self._dist = dist\n return", "def dist(self, name):\n raise NotImplementedError", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n locationFind = paramInput.findFirst('location')\n if locationFind != None:\n self.location = locationFind.value\n else:\n self.raiseAnError(IOError,'location value needed for Laplace distribution')\n scaleFind = paramInput.findFirst('scale')\n if scaleFind != None:\n self.scale = scaleFind.value\n else:\n self.raiseAnError(IOError,'scale value needed for Laplace distribution')\n self.initializeDistribution()", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['scale'] = self.scale\n retDict['location'] = self.location\n return retDict", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['scale'] = self.scale\n retDict['location'] = self.location\n return retDict", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale)\n else:\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale,a,b)", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n pFind = paramInput.findFirst('p')\n if pFind != None:\n self.p = pFind.value\n else: self.raiseAnError(IOError,'p value needed for Geometric distribution')\n self.initializeDistribution()", "def location(self):\n return self.properties.get(\"location\", Location())", "def initializeDistribution(self):\n if self.lowerBoundUsed == False:\n self.lowerBound = -sys.float_info.max\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicLaplaceDistribution(self.location,self.scale,self.lowerBound,self.upperBound)", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicGeometricDistribution(self.p)\n else: self.raiseAnError(IOError,'Truncated Geometric not yet implemented')", "def uniformLocation(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n return 0", "def getDistType(self):\n return self.distType", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['p'] = self.p\n return retDict", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['p'] = self.p\n return retDict", "def distributor(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"distributor\")", "def distributor(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"distributor\")", "def location(self):\r\n return self._get('location', {})", "def dist_by_rating(self):\n return ratings_distribution", "def get_location_distribution_one_brand(table, brand):\n # get the rows from the table with the specified brand\n cur, con = database.connect_to_database()\n\n # get all the different locations present in the specified table\n locations = database.get_locations(table)\n\n # calculate the distribution\n index_names = []\n distribution = []\n for index, location in locations.iterrows():\n # find all the rows with the specified brand for each location\n query = \"SELECT * FROM \" + table + \" WHERE Model LIKE '%%%\" \\\n + brand + \"%%%' AND Location LIKE '%%%\" \\\n + location.location + \"%%%';\"\n result = pandas.read_sql_query(query, con)\n index_names.append(smart_str(location.location))\n distribution.append(len(result))\n\n location_distribution = pandas.Series(distribution, index=index_names)\n return location_distribution", "def dist(self, init_repr, name):\n randomness = self.get_ground_vector('!Dist:{}-Dist'.format(name))\n return self.distmodel(torch.cat([init_repr, randomness])) # Consider reusing varmodel", "def distributor(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"distributor\")", "def returnInputParameter():\n return DistributionsCollection()", "def location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"location\")", "def __init__(__self__, *,\n location: pulumi.Input[str]):\n pulumi.set(__self__, \"location\", location)", "def pdf(sample, location=0, scale=1):\n location = T.cast(location, theano.config.floatX)\n SQRT_2_PI = np.sqrt(2 * PI)\n SQRT_2_PI = T.cast(SQRT_2_PI, theano.config.floatX)\n\n divisor = 2 * scale ** 2 # + epsilon,\n divisor = T.cast(divisor, theano.config.floatX)\n if isinstance(location, T.TensorVariable) and location.ndim == 0:\n location = location.dimshuffle('x')\n\n exp_arg = -((sample - location) ** 2) / divisor\n z = 1. / (SQRT_2_PI * scale + epsilon)\n\n return T.exp(exp_arg) * z", "def set_dist(self, dist):\n dist = u.Quantity(dist, unit=u.AU)\n if dist.value < 0:\n warnings.warn(\"distance cannot be negative. Using absolute value.\")\n self.dist = np.absolute(dist.value)", "def location(self, location):\n self._location = location", "def generate_distribution(self, rng: RNG, filtration_range, scale=None):\n if scale is None:\n scale = self.scale\n if self.delayed_coordinates:\n points = self.generate_3D_points_delayed(rng, self.number_of_points, scale)\n else:\n points = self.generate_points(rng, self.number_of_points, self.dimension, scale)\n # distance_matrix = pairwise_distances(points)\n if self.gpu:\n # TODO add in GPU calculation.\n raise NotImplementedError(\"GPU calculation of distance matrix not implemented yet.\")\n else:\n sparse_distance_matrix = self.make_sparse_dm(points, filtration_range[-1])\n if self.f is not None:\n output_string = \"[Points]\\n\"\n output_string += str(points)\n self.f.write(output_string)\n # An attempt to reduce memory usage, might not work\n del points\n diagrams = self.generate_diagrams(sparse_distance_matrix, threshold=filtration_range[-1])\n homology = self.generate_homology(diagrams, filtration_range)\n distribution = homology[self.homology_dimension]\n if self.f is not None:\n self.f.write(\"\\n[Distribution]\\n\" + str(distribution) + \"\\n\")\n return distribution", "def set_location(self, location):\n if not location:\n location = getcwd() + '/stormpath-exports'\n\n return location", "def usage_location(self):\n if \"usageLocation\" in self._prop_dict:\n return self._prop_dict[\"usageLocation\"]\n else:\n return None", "def usage_location(self):\n if \"usageLocation\" in self._prop_dict:\n return self._prop_dict[\"usageLocation\"]\n else:\n return None", "def initializeDistribution(self):\n self.checkDistParams()\n\n self.lowerBound = min(self.mapping.keys())\n self.upperBound = max(self.mapping.keys())", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def dist(self, name):\n return self.get_ground_vector('!Dist:{}'.format(name))", "def dist(self, name):\n return self.get_ground_vector('!Dist:{}'.format(name))", "def dist(self, name):\n return self.get_ground_vector('!Dist:{}'.format(name))", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def location(self, location: str):\n self._location = location", "def dist_albedo(self, p):\r\n\r\n return self.uniform(p, self.prange)", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['n'] = self.n\n retDict['p'] = self.p\n return retDict", "def set_param(self, name, value, *, distrib=None, ref=None):\n raise NotImplementedError", "def __init__(self, distribution_type: Type[Distribution]) -> None:\n super().__init__()\n self.distribution_type = distribution_type", "def adapt(self, domain_distr_param: str, domain_distr_param_value: Union[float, int]):\n if domain_distr_param not in self.get_field_names():\n raise KeyError(f'The domain parameter {self.name} does not have a domain distribution parameter '\n f'called {domain_distr_param}!')\n setattr(self, domain_distr_param, domain_distr_param_value)", "def __init__(\n self,\n source: str,\n param: Distribution[str] = StrConst(\"1group1gene\")\n ):\n\n self.source = source\n self.param = param\n\n self.check_valid_dist()\n return", "def getCrowDistDict(self):\n retDict = Distribution.getCrowDistDict(self)\n retDict['mu'] = self.mu\n return retDict", "def _course_location(self):\r\n return \"location:{org}+{number}+{run}+course+{run}\".format(**self._course_dict)", "def DistributionSize(self):\n return self.distribution_size", "def __init__(self, step_name, variable_name, distribution):\n self.step_name = step_name\n self.variable_name = variable_name\n self._distribution = distribution", "def LocationAttributeConfig():\n return concepts.ResourceParameterAttributeConfig(\n name='location',\n help_text='Google Cloud location for the {resource}.',\n fallthroughs=[\n deps.PropertyFallthrough(properties.VALUES.container_aws.location)\n ],\n )", "def __str__(self):\r\n return self.get_distribution_type()", "def location(self) -> str:\n return self.__location", "def location(self) -> str:\n return self.__location", "def location(self) -> str:\n return self.__location", "def initializeDistribution(self):\n if self.nPoints is None:\n self.xArray = np.arange(self.lowerBound,self.upperBound+1)\n else:\n self.xArray = np.linspace(self.lowerBound,self.upperBound,self.nPoints)\n\n # Here the actual calculation of discrete distribution parameters is performed\n self.pdfArray = 1.0/self.xArray.size * np.ones(self.xArray.size)\n paramsDict={}\n paramsDict['outcome'] = self.xArray\n paramsDict['state'] = self.pdfArray\n\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(paramsDict)\n initialPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(initialPerm)", "def get_location(self):\n\t\treturn self.location", "def location(self) -> Optional[str]:\n raise NotImplementedError()", "def location(self, value):\n if self.scoping:\n self.scoping.location = value\n else:\n raise Exception(\"Property field location is based on scoping, and scoping is not defined\")", "def initializeDistribution(self):\n self.convertToDistrDict['Legendre'] = self.convertLegendreToUniform\n self.convertToQuadDict ['Legendre'] = self.convertUniformToLegendre\n self.measureNormDict ['Legendre'] = self.stdProbabilityNorm\n self.convertToDistrDict['ClenshawCurtis'] = self.convertLegendreToUniform\n self.convertToQuadDict ['ClenshawCurtis'] = self.convertUniformToLegendre\n self.measureNormDict ['ClenshawCurtis'] = self.stdProbabilityNorm\n self._distribution = distribution1D.BasicUniformDistribution(self.lowerBound,self.lowerBound+self.range)", "def build_distribution(self, dist, label, **kwargs):\n dist = get_distribution(dist)\n\n if \"dims\" in kwargs:\n group_dim = [dim for dim in kwargs[\"dims\"] if dim.endswith(\"_group_expr\")]\n kwargs = {\n k: self.expand_prior_args(k, v, label, dims=group_dim) for (k, v) in kwargs.items()\n }\n else:\n kwargs = {k: self.expand_prior_args(k, v, label) for (k, v) in kwargs.items()}\n\n if self.noncentered and has_hyperprior(kwargs):\n sigma = kwargs[\"sigma\"]\n offset = pm.Normal(label + \"_offset\", mu=0, sigma=1, dims=kwargs[\"dims\"])\n return pm.Deterministic(label, offset * sigma, dims=kwargs[\"dims\"])\n return dist(label, **kwargs)", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def get_dist_probability(self):\n return self.dist_probability", "def location(self) -> str:\n return self.metadata.location", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def getDistributionType(self):\n return self._distribution_type", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicPoissonDistribution(self.mu)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n self.raiseAnError(IOError,'Truncated poisson not yet implemented')", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")", "def location(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"location\")" ]
[ "0.65867454", "0.62829775", "0.62012964", "0.59412557", "0.57245076", "0.56689125", "0.5637412", "0.5567886", "0.5564207", "0.55510134", "0.5550806", "0.55264115", "0.5516382", "0.550923", "0.5505646", "0.5505646", "0.5499564", "0.5482442", "0.5477142", "0.54546994", "0.54109675", "0.5342536", "0.5317508", "0.53163344", "0.53163344", "0.52944404", "0.52944404", "0.52930206", "0.5292838", "0.5286097", "0.52855164", "0.52674556", "0.5260936", "0.5214182", "0.5214182", "0.5214182", "0.5211279", "0.5209758", "0.5187772", "0.5186403", "0.5152927", "0.51491684", "0.51477253", "0.51477253", "0.5144591", "0.51444143", "0.51416224", "0.51416224", "0.51416224", "0.51383734", "0.5133513", "0.5126406", "0.511809", "0.5114188", "0.5113965", "0.5113333", "0.5112023", "0.5088853", "0.5087458", "0.5081534", "0.5078859", "0.5070385", "0.5069447", "0.50670356", "0.50628716", "0.50628716", "0.50628716", "0.5059771", "0.5050924", "0.5050548", "0.504803", "0.5042765", "0.5042194", "0.5033466", "0.50164473", "0.5011254", "0.5008282", "0.5006128", "0.5005421", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50031483", "0.50028217", "0.50028217", "0.50028217", "0.50028217", "0.4994572", "0.4994572", "0.4994572", "0.4994572", "0.4994572" ]
0.0
-1
Distribution parameter for scale.
def scale(self): return self._gev_bijector.scale
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):\n return self.distribution.scale", "def scale_parameter(self):\n return self._scale_parameter", "def get_scale_parameter(self):\n\n shape_in_gamma_func = float(1 + (1 / self._shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self._scale_parameter = self._mean_fire_recurrence / gamma_func", "def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def GetScale(self):\n ...", "def get_scale():\r\n\r\n \r\n return 0.5", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def scale(self):\n return self._scale", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def _scale_param(self, resid_us):\n return((resid_us**2).sum().sum() / self.dof)", "def scale(self):", "def scaling(self):\n return self.__scaling", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def _hyperparam_to_scale(self, hyperparam):\n\n # If logscale is used, input hyperparam is log of the scale.\n if self.use_log_scale:\n scale = 10.0**hyperparam\n else:\n scale = numpy.abs(hyperparam)\n\n return scale", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self):\n return self._a", "def getScale(self):\n return self.factor**self.turnOn", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def _scale_to_hyperparam(self, scale):\n\n # If logscale is used, output hyperparam is log of scale.\n if self.use_log_scale:\n hyperparam = numpy.log10(numpy.abs(scale))\n else:\n hyperparam = numpy.abs(scale)\n\n return hyperparam", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def __init__(self,scale):\n self.scale = scale", "def scale(self, scale):\n\t\tself._current_score *= scale", "def scaleProcess(process,scale):\n #print '>>> scaleProcess(\"%s\",%.3f):'%(process.process(),scale)\n #print \">>> rate before = %s\"%(process.rate())\n process.set_rate(process.rate()*scale)\n #print \">>> rate after = %s\"%(process.rate())", "def _point_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n\n if width is not None:\n scale = toyplot.units.points(width) / self._width\n elif height is not None:\n scale = toyplot.units.points(height) / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def distribution(self) -> str:\n return pulumi.get(self, \"distribution\")", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def __init__(self, initializer, scale=1):\n self.scale = normalize_tuple(scale, 2, \"scale\")\n self.initializer = initializer", "def xscale(value):\n impl.xscale(**locals())", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def prob_distr(self, x):\n return 1.0/x", "def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz", "def scale_value(self):\n return self._scale_value[2]", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def get_scaled_value(self, value):\r\n raise NotImplementedError()", "def __init__(self, scale=False):\n self.scale = scale", "def set_scale(self, scale):\n scale = float(scale)\n if scale <= 1:\n raise ValueError('The scale parameter must exceed 1.')\n self._a = scale", "def scale(self, factor):\n return BSplineFunc(self.kvs, self.coeffs * factor)", "def __init__(self, scale=0.1, rng=None):\n if rng is None:\n self.rng = np.random.RandomState(np.random.randint(0, 10000))\n else:\n self.rng = rng\n self.scale = scale", "def default_scale(scale):\n return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2,\n 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))", "def yscale(value):\n impl.yscale(**locals())", "def gauss_hermite_scale_limit(self):\n return self._gauss_hermite_scale_limit", "def scale(self, factor):\n new = self.copy()\n new.d.clear()\n\n for val, prob in self.items():\n new.set(val * factor, prob)\n return new", "def scale(self, factor):\n self.b = factor * self.b", "def adapt(self, domain_distr_param: str, domain_distr_param_value: Union[float, int]):\n if domain_distr_param not in self.get_field_names():\n raise KeyError(f'The domain parameter {self.name} does not have a domain distribution parameter '\n f'called {domain_distr_param}!')\n setattr(self, domain_distr_param, domain_distr_param_value)", "def scale(self, value):\n\t\toldscale = self.oldmax - self.oldmin\n\t\tnewscale = self.newmax - self.newmin\n\t\treturn (newscale * (value - self.oldmin) / oldscale) + self.newmin", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsContext_Scale(*args, **kwargs)", "def Scale(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Scale(*args, **kwargs)", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def DistributionSize(self):\n return self.distribution_size", "def doppler_scale(self):\n return self._dopplerscale", "def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale", "def getnscale(self):\n return self.nscale", "def set_hyper_parameters(self, x):\n self.set_scale(x[0])", "def scale_to_factor(scale):\n return (B.pi / 2) / (2 * scale**2)", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def __init__(self, scale: Union[float, torch.Tensor]):\n\n super().__init__()\n self._scale = scale", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def scale_(entropy):\n\n # Double scale with each successive 1.\n bit_depth = 0\n while entropy(1):\n bit_depth += 1\n\n # Return a random number within the scale.\n return 2 ** bit_depth + entropy(bit_depth) - 1", "def _normal_log_prob(self, r, scale_log):\n return -(r**2) / 2 - scale_log - self.const", "def __call__(self, q):\n # SASCalculator ignores the scale, so we add it in here\n yout = BasePDFGenerator.__call__(self, q)\n yout *= self.scale.value\n return yout", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def auto_scale_factor(self):\r\n return self.gref.auto_scale_factor", "def Scale(*args, **kwargs):\n return _gdi_.Font_Scale(*args, **kwargs)", "def scale(curve):\n return curve/rmsd(curve)", "def set_density(self, theta, scale):\n self.density = Normal(self.mean(theta), scale)", "def get_apply_scale(self, applyScaleFactor, scale_quality = 1.0):\n v = self.scale * self.scale_quality * scale_quality\n if applyScaleFactor:\n v *= self.scale_factor\n return v", "def scale(self, scale):\n self.coords = self.coords * scale\n return self", "def density(self, arg):\n mean = - self.sigma**2 * self.data['maturity']\n std = self.sigma * self.data['maturity']**.5\n return scs.norm(mean, std).pdf(arg)", "def _pixel_scale(self, width=None, height=None, scale=None):\n if numpy.count_nonzero([width is not None, height is not None, scale is not None]) > 1:\n raise ValueError(\"Specify only one of width, height, or scale.\")\n if width is not None:\n scale = width / self._width\n elif height is not None:\n scale = height / self._height\n elif scale is None:\n scale = 1.0\n return scale", "def scale(self,scale_by):\n x = self._x * scale_by\n y = self._y * scale_by\n return Point(x,y)", "def scale_column(self):\n return self._scale_column", "def loss_scale(self):\n return self._loss_scale", "def get_scale_freq():\n return sf / 2 / (num_freq-1)", "def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")", "def scale_model(model, scale):\n params = model.named_parameters()\n dict_params = dict(params)\n with torch.no_grad():\n for name, param in dict_params.items():\n dict_params[name].set_(dict_params[name].data * scale)", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def __init__(self, scale, **kwargs):\n super(NormalNoise, self).__init__(**kwargs)\n self._scale = scale", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def GetUserScale(*args, **kwargs):\n return _gdi_.DC_GetUserScale(*args, **kwargs)", "def _scale(waveform):\n # Get random scale factor\n scale_factor = tf.random_uniform(shape=[], minval=0.5, maxval=2.5, dtype=tf.float32)\n\n return waveform * scale_factor", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def scaling_factor(self):\n bin_scale = self.spabins * self.spebins\n return bin_scale * self.int_time", "def parallel_scale(self, value):\n self.camera.parallel_scale = value\n self.Modified()", "def scaling_object(self):\n return self.__scaling_object", "def __call__(self, scale=1.0, size=None, **kwargs):\n return super().__call__(scale, size=size, **kwargs)", "def __call__(self, scale=1.0, size=None, **kwargs):\n return super().__call__(scale, size=size, **kwargs)", "def _scale(x):\n scaleFactor = 1\n _ret = int(x/scaleFactor)\n return _ret", "def _getScalesRand(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate(\n (_scales, sp.array([sp.sqrt(self.jitter[term_i])])))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales", "def _setscales(self, ndata, largestscale, notes, scaling):\n if scaling==\"log\":\n if notes<=0: notes=1\n # adjust nscale so smallest scale is 1\n noctave=self._log2(2.*ndata/largestscale)\n self.nscale=notes*noctave\n self.scales=numpy.zeros(self.nscale, float)\n for j in range(self.nscale):\n self.scales[j]=2.0**(float(j)/notes)\n elif scaling==\"linear\":\n nmax=ndata/largestscale/2\n self.scales=numpy.arange(float(2), float(nmax))\n self.nscale=len(self.scales)\n else: raise ValueError, \"scaling must be linear or log\"\n return", "def scale(f, a, j=0):\n return f.per(dmp_scale_in(f.rep, f.dom.convert(a), j, f.lev, f.dom))" ]
[ "0.83170885", "0.75846076", "0.7424819", "0.7398855", "0.7397794", "0.704038", "0.7032206", "0.7026145", "0.7016287", "0.69452536", "0.6886023", "0.6884829", "0.68717456", "0.6735917", "0.6735917", "0.67252976", "0.6618296", "0.65950215", "0.65737295", "0.65534216", "0.6532687", "0.6532687", "0.6532327", "0.6486958", "0.64561504", "0.64555204", "0.6414102", "0.64031506", "0.640292", "0.63949376", "0.63787675", "0.63056487", "0.6256672", "0.6249519", "0.6238052", "0.6201751", "0.61886203", "0.6162297", "0.6137346", "0.61278015", "0.612412", "0.61173004", "0.609705", "0.6083165", "0.6072726", "0.6069705", "0.60583425", "0.6052273", "0.60444576", "0.6036416", "0.6033621", "0.5990389", "0.59894395", "0.59875876", "0.59730893", "0.59687287", "0.59335613", "0.59327704", "0.5927017", "0.59190077", "0.5917772", "0.59140486", "0.59011173", "0.5893287", "0.58889806", "0.58797324", "0.58767277", "0.58638996", "0.58623546", "0.58600825", "0.5855783", "0.58544743", "0.5853411", "0.5849725", "0.58485186", "0.58460134", "0.5838519", "0.5832605", "0.5821115", "0.57893395", "0.5786857", "0.578391", "0.5775132", "0.5757908", "0.57522744", "0.5750681", "0.57467455", "0.57444507", "0.57427675", "0.5739911", "0.57333463", "0.5729343", "0.5728728", "0.5727362", "0.5723514", "0.5723514", "0.57230043", "0.57159835", "0.5714162", "0.5705906" ]
0.7008766
9
Distribution parameter for shape.
def concentration(self): return self._gev_bijector.concentration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, shape):\n return np.random.uniform(low=self.minval, high=self.maxval, size=shape)", "def weight(self, shape, name=\"\"):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)", "def __call__(self, shape):\n return np.random.normal(loc=self.mean, scale=self.stddev, size=shape)", "def _sample_distribution(shape, var, distribution, seed, dtype):\n distribution = str(distribution).lower()\n if distribution == 'truncated_normal':\n # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = prefer_static.sqrt(var) / 0.87962566103423978\n return tf.random.stateless_truncated_normal(\n shape, mean=0., stddev=stddev, dtype=dtype,\n seed=samplers.sanitize_seed(seed))\n elif distribution == 'uniform':\n limit = prefer_static.sqrt(3. * var)\n return samplers.uniform(shape, minval=-limit, maxval=limit,\n dtype=dtype, seed=seed)\n elif distribution == 'untruncated_normal':\n stddev = prefer_static.sqrt(var)\n return samplers.normal(shape, mean=0., stddev=stddev,\n dtype=dtype, seed=seed)\n raise ValueError('Unrecognized distribution: \"{}\".'.format(distribution))", "def _weight_variable(self, shape, name=\"\", dtype=tf.float32):\n # weights are drawn from a normal distribution with std 0.1 and mean 0.\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, dtype=dtype, name=name)", "def random_init(self, shape):\n return np.random.randn(shape[0],shape[1])*0.01", "def new_weight_variable(self, shape):\n self.total_parameters += np.product(shape)\n # Scale down regular Xavier initialization because we're residual.\n stddev = 0.2 * (2.0 / np.product(shape[:-1]))**0.5\n var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))\n self.parameters.append(var)\n return var", "def W_init(shape, name=None):\n values = rng.normal(loc=0, scale=1e-2, size=shape)\n return K.variable(values, name=name)", "def W_init(shape, name=None):\n values = np.random.normal(loc=0, scale=1e-2, size=shape)\n return K.variable(values, name=name)", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def get_scale_parameter(self):\n\n shape_in_gamma_func = float(1 + (1 / self._shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self._scale_parameter = self._mean_fire_recurrence / gamma_func", "def weight_variable(self, shape, var_name):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.get_variable(name=var_name,initializer=initial)", "def weight_variable(self, shape, name=\"\", dtype=tf.float32):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial,dtype=dtype,name=name)", "def get_distribution_parameters(self):\r\n return \"UNDEFINED\"", "def kernel_shape_param(self, param):\n\t\tindex = self.variables['kernel_format'].index(param)\n\t\treturn self.variables['weights'].shape[index]", "def shape(self):", "def shape(self):", "def initializeDistribution(self):\n if self.nPoints is None:\n self.xArray = np.arange(self.lowerBound,self.upperBound+1)\n else:\n self.xArray = np.linspace(self.lowerBound,self.upperBound,self.nPoints)\n\n # Here the actual calculation of discrete distribution parameters is performed\n self.pdfArray = 1.0/self.xArray.size * np.ones(self.xArray.size)\n paramsDict={}\n paramsDict['outcome'] = self.xArray\n paramsDict['state'] = self.pdfArray\n\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(paramsDict)\n initialPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(initialPerm)", "def shape(tensor):\n raise NotImplementedError", "def parameter_proposal(w, k = 200, sigma = 3):\n noise = np.random.randn(k, *np.shape(w))\n return (np.expand_dims(w, 0) + sigma * noise, noise)", "def weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)", "def weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)", "def weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)", "def input_shape(self) ->torch.Size:\n pass", "def distribution(self) -> str:\n return pulumi.get(self, \"distribution\")", "def weight_variable(shape):\n#{{{\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def prop_dist_form_params(*arg):\n return np.random.multivariate_normal(*arg)", "def b_init(shape, name=None):\n values = np.random.normal(loc=0.5, scale=1e-2, size=shape)\n return K.variable(values, name=name)", "def b_init(shape, name=None):\n values = rng.normal(loc=0.5, scale=1e-2, size=shape)\n return K.variable(values, name=name)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def _gen_behaviour(self, params: Tensor) -> Distribution:\n # TODO: check for parameter size mismatches\n # TODO: support params being for multiple different distributions\n\n if len(params.size()) == 1:\n params = params.unsqueeze(0)\n elif len(params.size()) > 2:\n # FIXME: better error message\n raise ValueError(\"unknown dimensionality\")\n\n if self.settings.dist is Categorical:\n return Categorical(logits=params)\n\n if self.settings.dist is Normal:\n return Normal(params[:, 0], params[:, 1])\n\n if self.settings.dist is MultivariateNormal:\n half = params.size()[1] // 2\n return MultivariateNormal(params[:, :half], diag_embed(softplus(params[:, half:])))\n\n raise NotImplementedError(\"actors do not support this action distribution yet\")", "def __init__(self,\n distribution,\n bijector=None,\n batch_shape=None,\n event_shape=None,\n validate_args=False,\n name=None):\n parameters = dict(locals())\n name = name or ((\"\" if bijector is None else bijector.name) +\n distribution.name)\n with ops.name_scope(name, values=[event_shape, batch_shape]) as name:\n # For convenience we define some handy constants.\n self._zero = constant_op.constant(0, dtype=dtypes.int32, name=\"zero\")\n self._empty = constant_op.constant([], dtype=dtypes.int32, name=\"empty\")\n\n if bijector is None:\n bijector = identity_bijector.Identity(validate_args=validate_args)\n\n # We will keep track of a static and dynamic version of\n # self._is_{batch,event}_override. This way we can do more prior to graph\n # execution, including possibly raising Python exceptions.\n\n self._override_batch_shape = self._maybe_validate_shape_override(\n batch_shape, distribution.is_scalar_batch(), validate_args,\n \"batch_shape\")\n self._is_batch_override = _logical_not(_logical_equal(\n _ndims_from_shape(self._override_batch_shape), self._zero))\n self._is_maybe_batch_override = bool(\n tensor_util.constant_value(self._override_batch_shape) is None or\n tensor_util.constant_value(self._override_batch_shape).size != 0)\n\n self._override_event_shape = self._maybe_validate_shape_override(\n event_shape, distribution.is_scalar_event(), validate_args,\n \"event_shape\")\n self._is_event_override = _logical_not(_logical_equal(\n _ndims_from_shape(self._override_event_shape), self._zero))\n self._is_maybe_event_override = bool(\n tensor_util.constant_value(self._override_event_shape) is None or\n tensor_util.constant_value(self._override_event_shape).size != 0)\n\n # To convert a scalar distribution into a multivariate distribution we\n # will draw dims from the sample dims, which are otherwise iid. This is\n # easy to do except in the case that the base distribution has batch dims\n # and we're overriding event shape. When that case happens the event dims\n # will incorrectly be to the left of the batch dims. In this case we'll\n # cyclically permute left the new dims.\n self._needs_rotation = _logical_and(\n self._is_event_override,\n _logical_not(self._is_batch_override),\n _logical_not(distribution.is_scalar_batch()))\n override_event_ndims = _ndims_from_shape(self._override_event_shape)\n self._rotate_ndims = _pick_scalar_condition(\n self._needs_rotation, override_event_ndims, 0)\n # We'll be reducing the head dims (if at all), i.e., this will be []\n # if we don't need to reduce.\n self._reduce_event_indices = math_ops.range(\n self._rotate_ndims - override_event_ndims, self._rotate_ndims)\n\n self._distribution = distribution\n self._bijector = bijector\n super(TransformedDistribution, self).__init__(\n dtype=self._distribution.dtype,\n reparameterization_type=self._distribution.reparameterization_type,\n validate_args=validate_args,\n allow_nan_stats=self._distribution.allow_nan_stats,\n parameters=parameters,\n # We let TransformedDistribution access _graph_parents since this class\n # is more like a baseclass than derived.\n graph_parents=(distribution._graph_parents + # pylint: disable=protected-access\n bijector.graph_parents),\n name=name)", "def shape(self):\n return self._shape", "def _initialize(shape, dtype, batch_ndims, scale, mode, distribution,\n seed=None):\n if not dtype_util.is_floating(dtype):\n raise TypeError('Argument `dtype` must be float type (saw: \"{}\").'.format(\n dtype))\n shape = prefer_static.reshape(shape, shape=[-1]) # Ensure shape is vector.\n fan_in, fan_out = _compute_fans_from_shape(shape, batch_ndims)\n fans = _summarize_fans(fan_in, fan_out, mode, dtype)\n scale = prefer_static.cast(scale, dtype)\n return _sample_distribution(shape, scale / fans, distribution, seed, dtype)", "def DistributionSize(self):\n return self.distribution_size", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)", "def layer_weight_init(self, size):\n # TODO: make smarter init\n return np.random.uniform(size=size)", "def weight_variable(shape, name=None):\r\n initial = tf.truncated_normal(shape, stddev=0.2)\r\n return tf.Variable(initial, name=name)", "def init_w(self, size):\n return np.random.uniform(self.r_min, self.r_max, size=size)", "def _fit_distribution(sample, name):\n if name == WEIBULL_2P_KEYWORD:\n # Do not fit the location parameter because it is 0 for a 2-p. dist.\n params = sts.weibull_min.fit(sample, floc=0)\n elif name == WEIBULL_3P_KEYWORD or \\\n name == WEIBULL_3P_KEYWORD_ALTERNATIVE:\n params = sts.weibull_min.fit(sample)\n elif name == NORMAL_KEYWORD:\n params = list(sts.norm.fit(sample))\n # Shape doesn't exist for normal\n params.insert(0, 0)\n elif name == LOGNORMAL_EXPMU_PARAMETER_KEYWORD or \\\n name == LOGNORMAL_MU_PARAMETER_KEYWORD:\n # For lognormal loc is set to 0\n params = sts.lognorm.fit(sample, floc=0)\n elif name == 'KernelDensity':\n dens = sm.nonparametric.KDEUnivariate(sample)\n dens.fit(gridsize=2000)\n # Kernel density doesn't have shape, loc, scale\n return (dens.cdf, dens.icdf)\n else:\n err_msg = \"Distribution '{}' is unknown.\".format(name)\n raise ValueError(err_msg)\n\n return (ConstantParam(params[0]),\n ConstantParam(params[1]),\n ConstantParam(params[2]))", "def scale(self):\n return self.distribution.scale", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def make_param(self, shape, init_scheme):\n if isinstance(init_scheme, numbers.Number):\n init_value = np.full(shape, init_scheme, floatX)\n elif init_scheme == 'uniform':\n #init_value = self._np_rng.uniform(low=-self.init_scale, high=self.init_scale, size=shape).astype(floatX) # FIXME\n init_value = np.random.uniform(low=-self.init_scale, high=self.init_scale, size=shape).astype(floatX)\n\n else:\n raise AssertionError('unsupported init_scheme')\n p = theano.shared(init_value)\n return p", "def get_scale_parameter(self):\n\n if self.scale_parameter == 0.0:\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\n gamma_func = special.gamma(shape_in_gamma_func)\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\n return self.scale_parameter\n else:\n return self.scale_parameter", "def get_scale_parameter(self):\r\n \r\n if self.scale_parameter == 0.0: \r\n shape_in_gamma_func = float(1+(1/self.shape_parameter))\r\n gamma_func = special.gamma(shape_in_gamma_func)\r\n self.scale_parameter = (self.mean_fire_recurrence/gamma_func)\r\n return self.scale_parameter\r\n else:\r\n return self.scale_parameter", "def _get_distribution_variables(self, R):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n R_typ = self.phase_param.R_typ # [m]\n # Particle-size distribution (area-weighted)\n f_a_dist = self.phase_param.f_a_dist(R) # [m-1]\n\n # Ensure the distribution is normalised, irrespective of discretisation\n # or user input\n f_a_dist = f_a_dist / pybamm.Integral(f_a_dist, R) # [m-1]\n\n # Volume-weighted particle-size distribution\n f_v_dist = R * f_a_dist / pybamm.Integral(R * f_a_dist, R) # [m-1]\n\n # Number-based particle-size distribution\n f_num_dist = (f_a_dist / R**2) / pybamm.Integral(\n f_a_dist / R**2, R\n ) # [m-1]\n\n # True mean radii and standard deviations, calculated from the f_a_dist that\n # was given, all have units [m]\n R_num_mean = pybamm.Integral(R * f_num_dist, R)\n R_a_mean = pybamm.Integral(R * f_a_dist, R)\n R_v_mean = pybamm.Integral(R * f_v_dist, R)\n sd_num = pybamm.sqrt(pybamm.Integral((R - R_num_mean) ** 2 * f_num_dist, R))\n sd_a = pybamm.sqrt(pybamm.Integral((R - R_a_mean) ** 2 * f_a_dist, R))\n sd_v = pybamm.sqrt(pybamm.Integral((R - R_v_mean) ** 2 * f_v_dist, R))\n\n # X-average the means and standard deviations to give scalars\n # (to remove the \"electrode\" domain, if present)\n R_num_mean = pybamm.x_average(R_num_mean)\n R_a_mean = pybamm.x_average(R_a_mean)\n R_v_mean = pybamm.x_average(R_v_mean)\n sd_num = pybamm.x_average(sd_num)\n sd_a = pybamm.x_average(sd_a)\n sd_v = pybamm.x_average(sd_v)\n\n # X-averaged distributions, or broadcast\n if R.domains[\"secondary\"] == [f\"{domain} electrode\"]:\n f_a_dist_xav = pybamm.x_average(f_a_dist)\n f_v_dist_xav = pybamm.x_average(f_v_dist)\n f_num_dist_xav = pybamm.x_average(f_num_dist)\n else:\n f_a_dist_xav = f_a_dist\n f_v_dist_xav = f_v_dist\n f_num_dist_xav = f_num_dist\n\n # broadcast\n f_a_dist = pybamm.SecondaryBroadcast(f_a_dist_xav, [f\"{domain} electrode\"])\n f_v_dist = pybamm.SecondaryBroadcast(f_v_dist_xav, [f\"{domain} electrode\"])\n f_num_dist = pybamm.SecondaryBroadcast(\n f_num_dist_xav, [f\"{domain} electrode\"]\n )\n\n variables = {\n f\"{Domain} {phase_name}particle sizes\": R / R_typ,\n f\"{Domain} {phase_name}particle sizes [m]\": R,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_a_dist,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" distribution [m-1]\": f_v_dist,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" distribution [m-1]\": f_num_dist,\n f\"{Domain} area-weighted mean particle radius [m]\": R_a_mean,\n f\"{Domain} volume-weighted mean particle radius [m]\": R_v_mean,\n f\"{Domain} number-based mean particle radius [m]\": R_num_mean,\n f\"{Domain} area-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_a,\n f\"{Domain} volume-weighted {phase_name}particle-size\"\n \" standard deviation [m]\": sd_v,\n f\"{Domain} number-based {phase_name}particle-size\"\n \" standard deviation [m]\": sd_num,\n # X-averaged sizes and distributions\n f\"X-averaged {domain} {phase_name}particle sizes [m]\": pybamm.x_average(R),\n f\"X-averaged {domain} area-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_a_dist_xav,\n f\"X-averaged {domain} volume-weighted {phase_name}particle-size \"\n \"distribution [m-1]\": f_v_dist_xav,\n f\"X-averaged {domain} number-based {phase_name}particle-size \"\n \"distribution [m-1]\": f_num_dist_xav,\n }\n\n return variables", "def draw(self, shape=(1,)):\n if not isinstance(shape, tuple):\n shape = (shape,)\n\n size = int(numpy.prod(shape))\n bins = self.lib.bincount(\n self.lib.random.choice(\n self.lib.arange(self._Nd),\n size,\n p=self.weights\n ),\n minlength=self._Nd\n )\n out = self.lib.empty((size, self._Nx), dtype=self.lib.float32)\n\n index = 0\n for n, mean, cov in zip(bins, self.means, self.covariances):\n out[index:index + n] = self.lib.random.multivariate_normal(mean, cov, int(n))\n index += n\n\n return out.reshape(shape + (self._Nx,))", "def sample_shape(shape,nsamp=None):\r\n if nsamp == None:\r\n sample_shape = shape\r\n else:\r\n sample_shape = shape + (nsamp,)\r\n return sample_shape", "def weight_variable(shape):\n initial = tf.truncated_normal(shape)\n return tf.Variable(initial)", "def weight_variable(shape, name=None):\n initial = tf.truncated_normal(shape, mean=0.5, stddev=0.4)\n return tf.Variable(initial, name=name)", "def _create_weight(self, shape, stddev=0.01, kname='stddev', name='weight'):\n kern = tf.truncated_normal(shape=shape, stddev=stddev)\n if kname == \"he\":\n n = shape[0] * shape[1] * shape[2]\n stddev = math.sqrt(2.0 / n)\n kern = tf.truncated_normal(shape=shape,mean=0.0, stddev=stddev)\n\n return tf.Variable(kern, name=name)", "def _create_weight(self, shape, stddev=0.01, kname='stddev', name='weight'):\n kern = tf.truncated_normal(shape=shape, stddev=stddev)\n if kname == \"he\":\n n = shape[0] * shape[1] * shape[2]\n stddev = math.sqrt(2.0 / n)\n kern = tf.truncated_normal(shape=shape,mean=0.0, stddev=stddev)\n\n return tf.Variable(kern, name=name)", "def shape(self):\n return np.array([self.w, self.h])", "def __init__(self, size, parameters):\n\n self.weights = self.init_weights(size)\n self.alpha = parameters['alpha']\n self.epsilon = parameters['epsilon']\n self.gamma = parameters['gamma']\n self.value = 0.0 #np.random.random()", "def shape(self):\r\n return self._shape", "def initializer(shape, dist=\"zero\"):\n if dist == \"zero\":\n M = torch.zeros(shape, dtype=torch.double)\n elif dist == \"one\":\n M = torch.ones(shape, dtype=torch.double)\n elif dist == \"id\":\n M = torch.eye(shape, dtype=torch.double)\n elif dist == \"uniform\":\n M = torch.rand(shape, dtype=torch.double)\n elif dist == \"log_normal\":\n m = torch.distributions.log_normal.LogNormal(\n 0, 1)\n M = m.sample(shape).double()\n elif dist == \"gamma\":\n m = torch.distributions.gamma.Gamma(1, 1)\n M = m.sample(shape).double()\n else:\n print(\"The distribution you gave is unknown... Matrix initialized with zeros\")\n return M", "def weight_variable(shape, name='weight', mean=0.0, stddev=None, initializer=None, constrain=None, dtype=tf.float32):\n if stddev is None:\n raise ValueError('stddev not specified!')\n if initializer is None:\n initializer = tf.random_normal_initializer(mean=mean, stddev=stddev)\n weights = tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype)\n if constrain is not None:\n constrain(weights)\n return weights", "def shape(self):\n return None", "def __init__(self, shape: Tuple[int, ...]) -> None:\n self._shape = shape\n self._n_sites = int(np.prod(self._shape))", "def __init__(self, shape, seed, mu=0., theta=0.15, sigma=0.2):\n self.mu = mu * np.ones(shape)\n self.theta = theta\n self.sigma = sigma\n self.seed = random.seed(seed)\n self.reset()", "def __init__(self,\n size,\n name=None,\n distribution=None) :\n self.size = size\n self.name = name\n self.distribution = distribution", "def shape(self):\n return self.__shape", "def shape(self):\n return self.__shape", "def __init__(self, mean=0.0, sigma=1.0):\n super().__init__()\n self.mean = mean\n self.sigma = sigma\n self.hasInfiniteBound = True\n self.type = 'Normal'\n self.distType = 'Continuous'\n self.compatibleQuadrature.append('Hermite')\n self.compatibleQuadrature.append('CDF')\n #THESE get set in initializeDistribution, since it depends on truncation\n #self.preferredQuadrature = 'Hermite'\n #self.preferredPolynomials = 'Hermite'", "def __call__(self, *args, **kwargs):\n mu, sigma = self.condition(args, **kwargs)\n return tf.contrib.distributions.Normal(loc=mu, scale=sigma)", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def glorot_init(shape):\n\n return tf.random_normal(shape=shape, stddev=tf.sqrt(2. / (shape[0] + shape[1])))", "def __call__(self, shape, dtype=dtypes.float32):\n return random_ops._random_walk(shape, self._nonlinearity, dtype,\n seed=self._seed)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def shape(self):\n return self._shape", "def shape(self):\n return self._shape", "def _gen_policy_params(self, state: State) -> Tensor:\n ...", "def __init__(self, x_axis, y_axis, input_dimension, sigma, learning_rate, random_seed=3):\n # Creating a random generator for random values; for initializing weights\n self.random_generator = np.random.RandomState(random_seed)\n \n self.x_axis = x_axis\n self.y_axis = y_axis\n self.input_dimension = input_dimension\n self.sigma = sigma\n self.learning_rate = learning_rate\n self.weights = np.array([[[0 for x in range(self.input_dimension)] for x in range(self.x_axis)] for x in range(y_axis)], dtype=float)", "def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.get_variable(\n 'weight', initializer=initial, regularizer=tf.nn.l2_loss)", "def create_weight_variable(shape):\n\n\tinitial = tf.truncated_normal(shape, stddev=0.1)\n\treturn tf.Variable(initial)", "def xavier_weight_init(shape):\n lim = np.sqrt(6. / sum(shape))\n out = np.random.uniform(-lim, lim, shape)\n\n return out", "def glorot(self, shape, name=None):\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)", "def distribution(self, env):\n pass", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def __init__(self, dimensions):\n self.w = nn.Parameter(1, dimensions)", "def dimension(self):", "def sample(self, shape=(1,)):\n pass", "def distribution(self):\n \n #external_distribution serves both the purpose of external setting of distribution and the caching of distribution()\n if self.external_distribution:\n return self.external_distribution_array;\n else:\n energy_vector = []\n superset = self.generate_superset(0) \n \n for i in superset:\n state = self.ket(i)\n \n norm_squared = np.dot(state.T, state)\n \n if norm_squared > 0: #zero is appended at the end\n energy = np.dot(state.T, np.dot( self.epsilon, state))\n interaction = np.dot(state.T, np.dot( self.u, state))/2.0 #divide by two. Otherwise, <l r| U |l r > = U_LR + U_RL = 2U\n #print state, np.dot(self.u, state) \n #print interaction\n energy_vector.append( energy + interaction )\n \n energy_vector.insert(0, 0.0) \n probability = np.exp( np.multiply(-self.beta, energy_vector)) \n probability /= probability.sum() \n return probability", "def returnInputParameter():\n return DistributionsCollection()", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def __init__(self, shape):\n\n self.shape = shape", "def get_random_points(self, shape, device=\"gpu0\"):\n rand_grid = torch.cuda.FloatTensor(shape).to(device).float()\n rand_grid.data.uniform_(0, 1)\n return Variable(rand_grid)", "def glorot(shape, name=None, scale=1.):\n init_range = np.sqrt(6.0/(shape[-1]+shape[-2])) * scale\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)", "def __call__(self, shape, rate, size=None, **kwargs):\n return super().__call__(shape, 1.0 / rate, size=size, **kwargs)" ]
[ "0.66089565", "0.61828715", "0.6138457", "0.61047345", "0.60195476", "0.6013357", "0.59687215", "0.5935368", "0.5908499", "0.589005", "0.5882569", "0.5881735", "0.58765745", "0.5872159", "0.5871932", "0.58504355", "0.58504355", "0.58361685", "0.58349097", "0.5757327", "0.5756181", "0.5756181", "0.5756181", "0.5753932", "0.572958", "0.5727115", "0.5721914", "0.57065356", "0.5696792", "0.56860715", "0.5680153", "0.56673115", "0.56646526", "0.5649722", "0.5647666", "0.5643837", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.5638611", "0.56176484", "0.561548", "0.5609259", "0.56080455", "0.56071377", "0.55940616", "0.5592156", "0.5591621", "0.55857474", "0.55688024", "0.55648625", "0.5561083", "0.55467415", "0.5543002", "0.55247384", "0.55247384", "0.55238515", "0.55212307", "0.55208015", "0.55206054", "0.551924", "0.5516699", "0.5504002", "0.5501944", "0.5500203", "0.54955775", "0.54955775", "0.5494216", "0.5490512", "0.54870445", "0.5485774", "0.5485153", "0.5481985", "0.5481985", "0.5478926", "0.5478926", "0.5477738", "0.5474592", "0.54741114", "0.5473859", "0.54735243", "0.54724723", "0.5468225", "0.546491", "0.546491", "0.546491", "0.5453906", "0.54525113", "0.5422874", "0.5422822", "0.5422345", "0.54221374", "0.54110056", "0.5410211", "0.54039913" ]
0.0
-1
Processor File Import statements
def get_imports(self) -> str: imports = [ "_ = require('lodash');", "stream = require('stream');", f"const hookRegister = require('./{settings.ARTILLERY_HOOK_FILE}').hookRegister;", f"hook = require('./{settings.ARTILLERY_LIB_FOLDER}/hooks').hook;", f"utils = require('./{settings.ARTILLERY_LIB_FOLDER}/providers');", f"settings = require('./{settings.ARTILLERY_LIB_FOLDER}/settings');", f"StatsCollector = require('./{settings.ARTILLERY_LIB_FOLDER}/statsCollector').StatsCollector;", f"profiles = require('./{settings.ARTILLERY_LIB_FOLDER}/profiles').profiles;", f"influx = require('./{settings.ARTILLERY_LIB_FOLDER}/influx').client;", ] return "\n".join(imports)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def importer():\n pass", "def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._chrome_data_test()", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._firefox_data_test()", "def executeImports():\n\tglobal cactusConfig\n\tinputPath = cactusConfig['inputPath']\n\toutputPath = cactusConfig['outputPath']\n\n\t#find all html files in input path\n\tfor filename in glob.glob(os.path.join(inputPath, '*.html')):\n\t\tif os.path.basename(filename) not in cactusConfig:\n\t\t\timportIntoFile(filename, outputPath + os.path.basename(filename))", "def test_import_process(self):\r\n good_file = self._get_google_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._google_data_test()", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_xml_data_test()", "def import_file(self, filename, **kwargs):\n raise NotImplementedError", "def add_collector_imports(self):\n with open(self.filename, \"r+\") as code_file:\n content = code_file.read()\n if not content.startswith(self.IMPORT_COLLECTOR_LINE):\n logger.debug(\n \"Adding import lines, please do not remove while generating yml.\"\n )\n code_file.seek(0, 0)\n code_file.write(\n f\"{self.IMPORT_COLLECTOR_LINE}\\n{self.EXPLICIT_DECLARATION_IMPORTS_LINE}\\n\\n{content}\"\n )", "def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()", "def import_db(import_file):\n import_data(import_file)", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def on_import(self, function_graph, node, reason):", "def import_module(self, location, name):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def import_(self, filepath):\n self.validate(filepath)\n self._start(filepath)", "def handle(self, *args, **options):\n #print \"OPTIONS\", options\n #print \"ARGS:\", args\n\n if len(args) < 1:\n raise CommandError(\n \"You must specify the static data dump file to import. This is \"\n \"probably something in the pattern of *.db3\")\n\n try:\n sqlite_file = args[0]\n check_for_eve_db(sqlite_file)\n\n if len(args) is 1:\n print \"No table names specified, importing all.\"\n util.run_importers(util.IMPORT_LIST, sqlite_file)\n else:\n specified_importers = get_importer_classes_from_arg_list(args)\n start_at_import = options.get('start_at_import')\n print \"Importing: %s\" % args[1:]\n\n include_deps = options.get('include_deps')\n if include_deps and not start_at_import:\n print \"Including dependencies.\"\n\n if start_at_import:\n # User wishes to start the import process at a specific\n # table name. Import the specified importer, and\n # everything after it.\n specified_importers = get_importers_for_start_at_import(specified_importers)\n\n util.run_importers(specified_importers, sqlite_file,\n include_deps=include_deps)\n except KeyboardInterrupt:\n print \"Terminating early...\"\n exit_with_succ()", "def prepare_imports(self, extend):\n tmp = re.match(r'.+/(.+).js', extend)\n if tmp:\n return self.imports(what=tmp.group(1), wherefrom=extend)\n raise GenerateError('Can not extract imports from {}'.format(extend))", "def run_import(self, expanded, unexpanded) : \n\t\tif not unexpanded :\n\t\t\treturn self.errormessage(\"Needs some filenames to import\")\n\t\tif not self.HasPerms(self.__context, 'Import/Export objects') :\n\t\t\treturn -1\n\t\tfor filename in unexpanded :\n\t\t\tself.__context.manage_importObject(filename)\n\t\t\tself.htmlmessage('%s imported successfully' % filename)", "def _analyse_stmt_Import(self, statement: ast.Import, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def testPreProcessedImport(self):\n a = 'a.mojom'\n self.WriteFile(a, \"\"\"\\\n module a;\n struct Bar {};\"\"\")\n self.ParseMojoms([a])\n\n b = 'b.mojom'\n self.WriteFile(\n b, \"\"\"\\\n module b;\n import \"a.mojom\";\n struct Foo { a.Bar bar; };\"\"\")\n self.ParseMojoms([b])", "def test_import_appending():\n c = CodeChunk('import moda\\nimport modb\\nimport modc', imports=['modc', 'modd'], programmingLanguage='python')\n\n dc = DocumentCompiler()\n dc.compile(c)\n\n assert len(c.imports) == 4\n assert 'moda' in c.imports\n assert 'modb' in c.imports\n assert 'modc' in c.imports\n assert 'modd' in c.imports", "def LoadBatch(filename):", "def handle(self, *args, **options):\n run_tximport()", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def main():\r\n\r\n #Create a list of all files that have the GPX file format\r\n fileList = glob.glob(os.path.join(inFolder,\"*.{0}\".format(inFormat)))\r\n\r\n #Create a connection to PostGIS database\r\n pgConn = createPostgisConnection(dbFormat, dbHost, dbName, dbSchema, dbUser, dbPWD)\r\n\r\n #Process each *listed* layer type from a GPS file\r\n for f in fileList:\r\n importGPX(f, gpxImportLayers, pgConn)", "def gen_import(self) -> str:\n as_name = self.exported_parts[-1]\n if as_name == self.imported_name:\n import_line = 'from {} import {}'.format(self.imported_module,\n self.imported_name)\n else:\n import_line = 'from {} import {} as {}'.format(self.imported_module,\n self.imported_name,\n as_name)\n return import_line", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def import_file(self) -> pulumi.Input['FileMetadataArgs']:\n return pulumi.get(self, \"import_file\")", "def _CMD_IMPORT(self, file_name):\n # reset inspector:\n # self.inspector = DataInspectorRecord()\n\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n # self.model.from_json_dict(buff)\n self.model.from_mat_file(file_name)\n\n elif ext == 'json':\n buff = ''\n with open(file_name, 'rb') as f:\n buff = f.read()\n model = json.loads(buff)\n self.model.from_json_dict(model)\n\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))\n\n # update initial selection - first row:\n if len(self.model.data_list) > 0:\n self.handle_row_select([self.model.data_list[0]])", "def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")", "def apply(self, path: ImportPath) -> None:\n pass", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "async def async_step_import(self, import_data: dict[str, str]) -> FlowResult:\n import_source = import_data.pop(\"import_source\")\n if import_source == \"geography_by_coords\":\n return await self.async_step_geography_by_coords(import_data)\n return await self.async_step_geography_by_name(import_data)", "def importfile(path):\n path = getpath(path, custom=True)\n assert _os.path.isfile(path) == True\n\n file_handler = _SourceFileLoader(*path.splitpath())\n return file_handler", "def import_and_index_resolutions():\n sys.path.append(os.path.abspath('import_scripts'))\n import import_resolutioninstance\n import_resolutioninstance.ResolutionImporter().load_items()", "def main(input_filepath, import_url):\n # Logging set up\n start = time()\n logger = logging.getLogger(__name__)\n log_import = logger.getChild('import_files')\n logger.info('Importing from raw data')\n \n # Dataset variables\n db_engine = create_engine(import_url, client_encoding='utf8')\n csvs = get_twitter_files(input_filepath)\n \n # Upload data\n log_import.info('Starting to upload {} csvs...'.format(len(csvs)))\n with click.progressbar(csvs, label='CSV Imports: ') as csv_progress:\n for csv in csv_progress:\n import_file(csv, db_engine)\n\n log_import.info('{} files done in {:.2f} secs.'.format(len(csvs), time() - start))", "def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]", "def import_all():\n import theory", "def ImportParsers(cls, import_dir):\n sys.path.append(import_dir)\n cls.elf_parser = importlib.import_module(\n \"vts.utils.python.library.elf_parser\")\n cls.vtable_parser = importlib.import_module(\n \"vts.utils.python.library.vtable_parser\")", "def pre_process(in_path):\n in_string = open(in_path, 'r').read()\n multi_line = '/\\\\*[^*]*\\\\*+(?:[^/*][^*]*\\\\*+)*/'\n\n # header\n description = re.search(multi_line, in_string).group(0)\n unit = re.search('\\\\n\\\\s*// unit .*', in_string).group(0)\n imports = re.findall('\\\\n\\\\s*// import .*', in_string)\n import_string = ''\n for i in imports:\n import_string += resolve_import(i.strip()[10:], in_path.parent)\n\n use_string = ''\n uses = re.findall('\\\\n\\\\s*// uses .*', in_string)\n for u in uses:\n use_string += 'uses ' + u.strip()[8:] + ';\\n'\n if use_string != '':\n use_string = '\\n\\n' + use_string\n\n header = '{' + description[2:-2] + '}\\n\\nunit ' + unit.strip()[8:] + ';' + use_string + '\\n\\n'\n\n # main part\n in_string_list, delphi_string_list = split(import_string + '\\n\\n' + in_string)\n\n return header, in_string_list, delphi_string_list", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def _imports(graph: mapry.Graph, py: mapry.Py) -> str:\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n stdlib_block = {'import typing'}\n\n third_party_block = set() # type: Set[str]\n\n if mapry.needs_type(a_type=graph, query=mapry.Path):\n if py.path_as == 'str':\n pass\n elif py.path_as == \"pathlib.Path\":\n stdlib_block.add(\"import pathlib\")\n else:\n raise NotImplementedError(\n \"Unhandled path_as: {!r}\".format(py.path_as))\n\n if mapry.needs_type(a_type=graph, query=mapry.TimeZone):\n if py.timezone_as == 'str':\n pass\n\n elif py.timezone_as == 'pytz.timezone':\n third_party_block.update(\n ('import pytz', 'import pytz.exceptions # type: ignore'))\n\n else:\n raise NotImplementedError(\n 'Unhandled timezone_as: {}'.format(py.timezone_as))\n\n # yapf: disable\n if any(mapry.needs_type(a_type=graph, query=query)\n for query in\n (mapry.Date, mapry.Time, mapry.Datetime, mapry.Duration)):\n # yapf: enable\n stdlib_block.add('import datetime')\n\n if mapry.needs_type(a_type=graph, query=mapry.Map):\n stdlib_block.add(\"import collections\")\n\n if len(graph.classes) > 0:\n stdlib_block.add(\n 'import collections'\n ) # needed for the initialization of class registries\n\n ##\n # Needs regex?\n ##\n\n import_re = False\n for a_type, _ in mapry.iterate_over_types(graph=graph):\n if isinstance(a_type, (mapry.String, mapry.Path)) and a_type.pattern:\n import_re = True\n break\n\n if isinstance(a_type, mapry.Duration):\n import_re = True\n break\n\n for cls in graph.classes.values():\n if cls.id_pattern is not None:\n import_re = True\n break\n\n if import_re:\n stdlib_block.add(\"import re\")\n\n ##\n # First party\n ##\n\n first_party_block = {\n 'import {}'.format(py.module_name),\n 'import {}.parse'.format(py.module_name)\n }\n\n block_strs = [] # type: List[str]\n if len(stdlib_block) > 0:\n block_strs.append('\\n'.join(sorted(stdlib_block)))\n\n if len(third_party_block) > 0:\n block_strs.append('\\n'.join(sorted(third_party_block)))\n\n if len(first_party_block) > 0:\n block_strs.append('\\n'.join(sorted(first_party_block)))\n\n return '\\n\\n'.join(block_strs)", "def _get_import_step(self, cr, uid, external_session, context=None):\n return 100", "def run_import_tasks(import_tasks):\n # import the given import tasks\n asset_tools.import_asset_tasks(import_tasks)\n\n # return the imported assets as paths\n return import_task.get_editor_property(\"imported_object_paths\")", "def first_import(file, list):\n\n list.append(file)\n print(\"Path added to list\")", "def import_bin(self, bin_file):\n self.__run_import_script(file=bin_file, is_bin=True)", "def process_files(args):\n coll = build_collection(args.data_path, args.include_online_only)\n\n for import_file in args.imports:\n _, ext = os.path.splitext(import_file)\n import_serializer_class = ser_interface.MtgSsmSerializer \\\n .by_extension_and_format(ext, args.import_format)\n import_serializer = import_serializer_class(coll)\n print('Importing counts from import: %s' % import_file)\n import_serializer.read_from_file(import_file)\n\n _, ext = os.path.splitext(args.collection)\n serializer_class = ser_interface.MtgSsmSerializer.by_extension_and_format(\n ext, args.format)\n serializer = serializer_class(coll)\n\n if os.path.exists(args.collection):\n print('Reading counts from existing file.')\n serializer.read_from_file(args.collection)\n backup_name = args.collection + '.bak-{:%Y%m%d_%H%M%S}'.format(\n datetime.datetime.now())\n print('Moving existing collection to backup: %s' % backup_name)\n shutil.move(args.collection, backup_name)\n\n print('Writing collection to file.')\n serializer.write_to_file(args.collection)", "def _analyse_stmt_ImportFrom(\n self, statement: ast.ImportFrom, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _import_custom(self, custom_modules):\n for filter_module in custom_modules:\n info('Loading {}'.format(filter_module))\n funs = module_utils.get_all_functions(filter_module)\n for fun_name, fun in funs.items():\n if fun_name.startswith('function'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding function {}'.format(import_name))\n self._functions[import_name] = fun\n elif fun_name.startswith('filter'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding filter {}'.format(import_name))\n self._filters[import_name] = fun", "def file_importer(relPath, methodS = \"r\", encodeS = None):\n import os\n scriptDir = os.path.dirname(os.path.realpath(__file__)) # absolute dir this script is in\n absFilePath = os.path.join(scriptDir, relPath)\n # print(encodingS)\n inpF = open(absFilePath, methodS, encoding = encodeS)\n return inpF", "def test_regressions_imports(self):\n issue = {\n \"number\": \"main/main\",\n \"contract\": \"C\",\n \"txlimit\": 1,\n \"in_directory\": \"imports_issue\",\n }\n self._simple_cli_run(\n f'{issue[\"number\"]}.sol',\n contract=issue[\"contract\"],\n tx_limit=issue[\"txlimit\"],\n in_directory=issue.get(\"in_directory\"),\n )", "def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")", "def _import_bh_(self):", "def __init__(__self__,\n resource_name: str,\n args: FileImportArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def load_assemble_file(task_file):\n return imp.load_source('assemblefile', task_file)", "def __init__(__self__, *,\n content_type: pulumi.Input[Union[str, 'FileImportContentType']],\n import_file: pulumi.Input['FileMetadataArgs'],\n ingestion_mode: pulumi.Input[Union[str, 'IngestionMode']],\n resource_group_name: pulumi.Input[str],\n source: pulumi.Input[str],\n workspace_name: pulumi.Input[str],\n file_import_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"content_type\", content_type)\n pulumi.set(__self__, \"import_file\", import_file)\n pulumi.set(__self__, \"ingestion_mode\", ingestion_mode)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"source\", source)\n pulumi.set(__self__, \"workspace_name\", workspace_name)\n if file_import_id is not None:\n pulumi.set(__self__, \"file_import_id\", file_import_id)", "def importIntoFile(filename, outputFile):\n\t#grab contents of current file\n\tcurrFile = open(filename).read().splitlines()\n\n\t#export file\n\twFile = open(outputFile, 'w+')\n\n\tprint \"\\tImporting into \" + outputFile + \":\\n\\t\\t\",\n\n\t#parse and write\n\tskipWrite = False\n\tspaceAppend = \"\"\n\tfor line in currFile:\n\t\tif line.find(importStrL) != -1:\n\t\t\tskipWrite = True\n\t\t\twFile.write(line)\n\t\t\t#handling indentation and space consistency\n\t\t\tif re.match(r\"\\s+\", line) == None:\n\t\t\t\tspaceAppend = \"\"\n\t\t\telse:\n\t\t\t\tspaceAppend = re.match(r\"\\s+\", line).group()\n\t\t\tline = line.replace(importStrL, \"\").replace(importStrR, \"\").strip()\n\t\t\twFile.write('\\n')\n\t\t\t#import lines, matching indentation\n\t\t\tfor importLine in cactusImports[line]:\n\t\t\t\twFile.write(spaceAppend + importLine + '\\n')\n\t\t\tprint line,\n\t\telse:\n\t\t\tif line.find(endStr) != -1:\n\t\t\t\tskipWrite = False\n\t\t\tif not skipWrite:\n\t\t\t\twFile.write(line+'\\n')\n\tprint '\\n'\n\twFile.close()", "def visit_Import(self, node: ast.Import) -> None:\n self.imports.append(node)\n\n # absolute imports - ignore indentation and just get all module names\n for name in node.names:\n self.modules.append(name.name)", "def test_get_imports(self):\n pass", "def _import(file_path):\n proxy_factory.import_proxies(open(file_path, 'r'))", "def import_task_files(self, session, task):\n self._fetch_info(task.imported_items(), False, True)", "def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))", "def run_import(input_path: str, output_path: str, typ: str) -> str:\n cmd = ''\n if typ.startswith(\"FeatureTable\"):\n if not input_path.endswith('biom'):\n cur_biom = '%s.biom' % splitext(input_path)[0]\n cmd += 'biom convert \\\\\\n'\n cmd += ' -i %s \\\\\\n' % input_path\n cmd += ' -o %s \\\\\\n' % cur_biom\n cmd += ' --table-type=\"OTU table\" \\\\\\n'\n cmd += ' --to-hdf5\\n\\n'\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % cur_biom\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"FeatureTable[Frequency]\"\\n'\n else:\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % input_path\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"FeatureTable[Frequency]\"\\n'\n else:\n cmd += 'qiime tools import \\\\\\n'\n cmd += ' --input-path %s \\\\\\n' % input_path\n cmd += ' --output-path %s \\\\\\n' % output_path\n cmd += ' --type \"%s\"\\n' % typ\n return cmd", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def importShaders(self):\n\t\tif self.shaderPath.exists:\n\t\t\tself.shaderPath.imp()", "def _general_import(self):\n # Lookups for method names and expected import-failure errors\n importers = {\n SourceTypes.BytesPlaintext: self._import_plaintext_bytes,\n SourceTypes.BytesZlib: self._import_zlib_bytes,\n SourceTypes.FnamePlaintext: self._import_plaintext_fname,\n SourceTypes.FnameZlib: self._import_zlib_fname,\n SourceTypes.DictJSON: self._import_json_dict,\n }\n import_errors = {\n SourceTypes.BytesPlaintext: TypeError,\n SourceTypes.BytesZlib: (zlib_error, TypeError),\n SourceTypes.FnamePlaintext: (OSError, TypeError, UnicodeDecodeError),\n SourceTypes.FnameZlib: (OSError, TypeError, zlib_error),\n SourceTypes.DictJSON: (ValidationError),\n }\n\n # Attempt series of import approaches\n # Enum keys are ordered, so iteration is too.\n for st in SourceTypes:\n if st not in importers:\n # No action for source types w/o a handler function defined.\n continue\n\n if self._try_import(importers[st], self._source, import_errors[st]):\n self.source_type = st\n return\n\n # Nothing worked, complain.\n raise TypeError(\"Invalid Inventory source type\")", "def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def import_various(context):\n # Only run step if a flag file is present\n if context.readDataFile('collective.opensearch-default.txt') is None:\n return\n logger = context.getLogger('collective.opensearch')\n site = context.getSite()\n pass", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def process_modules(self) -> typing.NoReturn:\n\t\tfor moduleName in self.moduleNameSet:\n\t\t\tdetected_encoding = detect_encoding(moduleName)\n\n\t\t\tprint(f\"Processing {moduleName} ({detected_encoding})\")\n\n\t\t\twith open(moduleName, 'r+', encoding=detected_encoding) as fileStream:\n\t\t\t\t# Store the content of the file\n\t\t\t\tfileContent: str = fileStream.read()\n\t\t\t\t# Sets the file's current position at the offset, the position of the read/write pointer within the file\n\t\t\t\tfileStream.seek(0, 0)\n\t\t\t\t# Truncates the file's size\n\t\t\t\tfileStream.truncate()\n\n\t\t\t\t# Process regex patterns\n\t\t\t\tfor regexDict in regexDictList:\n\t\t\t\t\tfileContent = self.process_function(regexDict, fileContent)\n\n\t\t\t\t# Rewrite the processed content of the file\n\t\t\t\tfileStream.write(fileContent)", "def import_control_section(self, filename_suffix='run'):\n pass", "async def async_step_import(self, import_info: dict[str, Any]) -> FlowResult:\n import_info.pop(CONF_MONITORED_CONDITIONS, None)\n import_info.pop(CONF_NICS, None)\n import_info.pop(CONF_DRIVES, None)\n import_info.pop(CONF_VOLUMES, None)\n return await self.async_step_user(import_info)", "def test_import_order():\n file_paths = glob.iglob('*/*.py')\n for file_path in file_paths:\n with open(file_path, 'r') as file_obj:\n file_contents = file_obj.read()\n new_file_contents = isort.code(file_contents)\n fail_msg = '{} imports are not compliant'.format(\n file_path)\n yield case.assertEqual, new_file_contents, file_contents, fail_msg", "def test_import_with_semaphore():\n c = CodeChunk('import moda\\nimport modb', imports=['modc', 'modd', ''], programmingLanguage='python')\n\n dc = DocumentCompiler()\n dc.compile(c)\n\n assert len(c.imports) == 3\n assert 'moda' not in c.imports\n assert 'modb' not in c.imports\n assert 'modc' in c.imports\n assert 'modd' in c.imports\n assert '' in c.imports", "def importer_process(import_id):\r\n trans = transaction.begin()\r\n imp = ImportQueueMgr.get(import_id)\r\n import_id = imp.id\r\n\r\n # Log that we've scheduled it\r\n logger.info(\"IMPORT: SCHEDULED for {0}.\".format(imp.username))\r\n # We need to mark that it's running to prevent it getting picked up\r\n # again.\r\n imp.mark_running()\r\n trans.commit()\r\n importer_process_worker.delay(import_id)", "def pre_processor(self):", "def xocImport(self, name, *args, **kwargs):\n trace(\"Import invoked:\", name, kwargs.keys())\n if name in sys.builtin_module_names:\n trace(\"Loading builtin module\", name)\n return self.load_module(name)\n else:\n return self.oldImport(name, *args, **kwargs)", "def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath", "def visit_ImportFrom(self, node: Any): # noqa: N802\n # print(\"import from:\", node, dir(node))\n for alias in node.names:\n self.nodes[\"imports_from\"][node.module].append(alias.name)\n self.generic_visit(node)", "def import_scene(file_path):\n\n pass", "def import_migration(import_type: migration.Migration, file_name: str, session: Session = Depends(generate_session)):\n file_path = app_dirs.MIGRATION_DIR.joinpath(import_type.value, file_name)\n return migration.migrate(import_type, file_path, session)", "def load_file(*args, **kwargs): # real signature unknown\n pass", "def _import(self, _import):\n\n self.__import = _import", "def preprocess_main():", "def cg_import(all_files, metadata, session):\n remote_directory = upload(all_files, session)\n buildinfo = session.CGImport(metadata, remote_directory)\n if not buildinfo:\n raise RuntimeError('CGImport failed')\n return buildinfo", "def add_python_files(self):", "def __init__(self,\n fileName,\n realFileName=None,\n prequelFileName=None,\n preErrorMessages=(), # Type to be checked\n doNotReadFiles=False,\n allowedFeatures=()):\n #type: (Text, Optional[Text], Optional[Text], List[Any], bool, List[Text]) -> None\n\n assert fileName is not None\n\n self.fileName=fileName #type: Text\n \"\"\" The filename as given when creating the source file\"\"\"\n\n self.prequelFileName=(\n fileName if prequelFileName is None\n else prequelFileName\n )\n \"\"\" \n The named of the unprocessed file or the filename.\n This is useful when a preprocessor is used. \n \"\"\"\n\n self.realFileName=(\n None if doNotReadFiles # filled later\n else (\n fileName if realFileName is None\n else realFileName))\n \"\"\" \n The name of the actual file name that is parsed.\n This is almost never used so don't use it unless\n you know what you are doing. \n \"\"\"\n\n # This should be after the definition of\n # filenames\n super(SourceFile, self).__init__(parents=[])\n\n\n if len(preErrorMessages) >= 1:\n for msg in preErrorMessages:\n Issue(\n origin=self,\n level=Levels.Error,\n message=msg\n )\n return\n\n self.sourceLines=[] #type: List[Text]\n \"\"\"\n The source lines of the 'logical' file.\n It will be the same as realSourceLines \n if not isBasedInHiddenFile. \n Filled by doReadFile but if doReadFile raise \n an exception, the sourceLines will still be of the\n appropriate type (no lines)\n The caller must call doReadFile explictely\n if doNotReadFiles.\n \"\"\"\n\n self.realSourceLines=[] #type: List[Text]\n \"\"\"\n The source lines of the 'real' file.\n It will be the same as sourceLines \n if not isBasedInHiddenFile. \n Filled by doReadFile but if doReadFile raise \n an exception, the sourceLines will still be of the\n appropriate type (no lines)\n The caller must call doReadFile explictely\n if doNotReadFiles.\n \"\"\"\n\n self.allowedFeatures=allowedFeatures #type: List[Text]\n \"\"\"\n A list of feature names that could be issued\n in the parser.\n \"\"\"\n\n\n\n\n if not doNotReadFiles:\n self.doReadFiles(\n logicalFileName=self.fileName,\n realFileName=self.realFileName)", "def start_import(data_import):\n\tdata_import = frappe.get_doc(\"Data Import Beta\", data_import)\n\ti = Importer(data_import.reference_doctype, data_import=data_import)\n\treturn i.import_data()", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')", "def test_imports():\n from tg_utils import admin\n from tg_utils import checks\n from tg_utils import compressor_filters\n from tg_utils import email\n from tg_utils import files\n from tg_utils import hashmodels\n from tg_utils import lock\n from tg_utils import managers\n from tg_utils import mixins\n from tg_utils import models\n from tg_utils import profiling\n from tg_utils import signals\n from tg_utils import uuid\n from tg_utils import decorators", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # Now let's do some db sanity checks.\r\n self._delicious_xml_data_test()", "def module_file(module):\n ...", "def import_xforms(abcfile, transform_names, parent_under, update):\n archive = cask.Archive(abcfile)\n update_data =[]\n\n for tr in transform_names:\n\n data = {}\n data['transform'] = tr\n data['dag_path']= get_future_dag_path(tr, parent_under, archive)\n data['exists'] = cmds.objExists(data['dag_path'])\n\n if not data['exists']:\n update_data.append(data)\n elif data['exists'] and update:\n update_data.append(data)\n\n if get_previously_imported_transforms(abcfile, parent_under) == []:\n\n # this doesnt use the -ct and -crt flags, which will cause the import to fail if root nodes are not present\n if update and update_data != []:\n cmd = 'AbcImport \"%s\" -d -rpr \"%s\" -ft \"%s\" -eft \"Shape\"' % (abcfile, parent_under, ' '.join( [ i['transform'] for i in update_data ] ))\n\n try:\n mel.eval(cmd)\n MGlobal.displayInfo(cmd)\n except Exception as e:\n message = \"Error running import transforms : %s\" % e\n MGlobal.displayError(message)\n return\n\n # conntect type AbcImport\n if update and update_data != []:\n cmd = 'AbcImport \"%s\" -d -rpr \"%s\" -ft \"%s\" -ct \"%s\" -crt -eft \"Shape\"' % (abcfile, parent_under, ' '.join( [ i['transform'] for i in update_data ] ), parent_under)\n\n try:\n mel.eval(cmd)\n MGlobal.displayInfo(cmd) \n except Exception as e:\n message = \"Error running import transforms : %s\" % e\n MGlobal.displayError(message)", "def test_process_file_import(self):\n\n new_file_proc = FileImportForm()\n res = new_file_proc.process_file('x')\n\n self.assertTrue(isinstance(res, AsyncResult))", "def local_register():\n for text in bpy.data.texts:\n if text.use_module:\n name = text.name\n if name.endswith(\".py\"):\n try:\n __import__(name[:-3])\n except:\n import traceback\n traceback.print_exc()", "def import_files(self, args, opts): # import <file> | <dir> ...\n oid_list = []\n total_new = 0\n for arg in args:\n if os.path.isfile(arg): # Import a file\n oid, new_file = self.import_file(arg) # Call local file import \n if not oid:\n print \" - Not able to import file %s\" % (arg) \n continue\n oid_list.append(oid)\n total_new += new_file\n elif os.path.isdir(arg): # Import a directory\n oids, new_files = self.import_directory(arg) # Call local dir import\n if not oids:\n print \" - Not able to import diretory %s\" % (arg) \n continue\n oid_list.extend(oids)\n total_new += new_files\n else:\n print \" - %s is not a file or directory, skipping\" % (arg)\n if not oid_list:\n print \" - No files were imported\"\n else:\n print \" - %s files imported, %s are new\" % (len(oid_list), total_new)\n \n return oid_list" ]
[ "0.72246414", "0.72070956", "0.6955379", "0.6755924", "0.6729011", "0.66933364", "0.6478454", "0.646348", "0.6388376", "0.6222997", "0.61884964", "0.6172685", "0.5980641", "0.59624255", "0.5938535", "0.59262383", "0.59066707", "0.5905204", "0.5884838", "0.58748347", "0.5849644", "0.5849363", "0.5753656", "0.57239795", "0.5707372", "0.5676577", "0.56496257", "0.5635729", "0.5619556", "0.56092507", "0.5607357", "0.56065977", "0.55961967", "0.5591993", "0.55845636", "0.558356", "0.5579917", "0.5574", "0.55721354", "0.55655015", "0.5564064", "0.5552112", "0.55441546", "0.5533227", "0.5509655", "0.5490614", "0.54889804", "0.5478307", "0.54770404", "0.54746544", "0.54740876", "0.54671806", "0.5462719", "0.54600674", "0.5457057", "0.5455616", "0.54471797", "0.54460704", "0.54428357", "0.543155", "0.54288334", "0.5426378", "0.54149795", "0.54067606", "0.5405583", "0.5405353", "0.5398478", "0.53947306", "0.5393658", "0.5392084", "0.53914875", "0.5386309", "0.53836036", "0.5380697", "0.53804666", "0.5379213", "0.5376996", "0.5369445", "0.5368754", "0.53673106", "0.53625554", "0.5359705", "0.53558", "0.53493786", "0.5348594", "0.5348458", "0.5338002", "0.5336649", "0.53344333", "0.5329494", "0.53144455", "0.53134173", "0.5311048", "0.5310725", "0.5306976", "0.5305049", "0.53045684", "0.52982605", "0.52968144", "0.5296796", "0.5296576" ]
0.0
-1
Processor File Global statements
def get_global_vars(self) -> str: return templates.GLOBAL_STATEMENTS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_main():", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def pre_processor(self):", "def main():\n processor.custom_config = parse_arguments()\n processor.process()\n logger.info(processor.statistics)\n logger.info(processor.custom_config)", "def process():", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def process():\n pass", "def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)", "def processing(self):\n pass", "def run ( self ) :\n exec self._cmd in self._myglobals,self._mylocals", "def process_all():\n\tconfilepath = check_args()\n\tif confilepath != \"\": #check arguments and sets some global variables \n\t\tconfig = read_conf(confilepath) #read config-file\n\t\tinst = get_hgf_institute(config) #check which hgf-institute\n\t\tbuild_or_remove_fielddesc(config) #create/delete fielddescriptors (fields + marctags)\n\t\tinsert_repnr_fielddesc(inst) #report number as hidden input in submit \n\t\tbuild_or_remove_doctypes(config,inst) #create/delete doctypes\n\t\tbuild_or_remove_schema(config) #create/delete collections for submit form\n\t\tgenerate_css(fieldlabels,inst) #create css_file \n\telse: pass", "def preLoopFunctions(self):\n\t\treturn", "def main():\r\n\r\n #Create a list of all files that have the GPX file format\r\n fileList = glob.glob(os.path.join(inFolder,\"*.{0}\".format(inFormat)))\r\n\r\n #Create a connection to PostGIS database\r\n pgConn = createPostgisConnection(dbFormat, dbHost, dbName, dbSchema, dbUser, dbPWD)\r\n\r\n #Process each *listed* layer type from a GPS file\r\n for f in fileList:\r\n importGPX(f, gpxImportLayers, pgConn)", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def main():\n p = DataPreprocessor()\n p.preprocess_and_save_data(p.path_to_file)", "def main():\n\targuments_sent = sys.argv\n\tif len(arguments_sent) > 1:\n\t\tfile_path = arguments_sent[1]\n\t\tprocess_based_on_type(file_path)", "def main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n print('song file processing is complete')\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n print('log file processing is complete')\n conn.close()", "def main():\n\tpass", "def main():\n file_requested = obtain_filename()\n process_command(file_requested)", "def main():\n pass", "def process(self):\n pass", "def run(self):\r\n self.env.process(self.rw_pifo_sm())", "def make_processing_functions(self):\n return", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def main_code():\n pass", "def start_processing(self):", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def main():\n if os.path.isdir(path):\n for filename in os.listdir(path):\n if filename.endswith('.asm'):\n execute_asm_file(path + '/' + filename, filename)\n else:\n execute_asm_file(path, path[path.rfind(\"/\") + 1:])", "def script(self):", "def main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=hallo user=hallo password=hallo\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()", "def parseProgram(inputFile):\n print(\"Program\")\n parseStatements(inputFile)", "def main():\n conn = psycopg2.connect('host=127.0.0.1 dbname=sparkifydb user=student password=student')\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', function=process_song_file)\n process_data(cur, conn, filepath='data/log_data', function=process_log_file)\n\n conn.close()", "def main(self):\r\n pass", "def process(self):", "def process(self):", "def process(self):", "def main():\n\n conn = psycopg2.connect(\n \"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()", "def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)", "def __init__(self):\n super(PreProcess, self).__init__()", "def RUN(self):", "def __scan_file(self, args, next_file):\n\n POGGER.info(\"Scanning file '$'.\", next_file)\n context = self.__plugins.starting_new_file(next_file)\n\n try:\n POGGER.info(\"Scanning file '$' token-by-token.\", next_file)\n source_provider = FileSourceProvider(next_file)\n if args.x_test_scan_fault:\n source_provider = None\n actual_tokens = self.__tokenizer.transform_from_provider(source_provider)\n\n if actual_tokens and actual_tokens[-1].is_pragma:\n self.__plugins.compile_pragmas(\n next_file, actual_tokens[-1].pragma_lines\n )\n actual_tokens = actual_tokens[:-1]\n\n POGGER.info(\"Scanning file '$' tokens.\", next_file)\n for next_token in actual_tokens:\n POGGER.info(\"Processing token: $\", next_token)\n self.__plugins.next_token(context, next_token)\n\n POGGER.info(\"Scanning file '$' line-by-line.\", next_file)\n source_provider = FileSourceProvider(next_file)\n line_number = 1\n next_line = source_provider.get_next_line()\n while next_line is not None:\n POGGER.info(\"Processing line $: $\", line_number, next_line)\n self.__plugins.next_line(context, line_number, next_line)\n line_number += 1\n next_line = source_provider.get_next_line()\n\n POGGER.info(\"Completed scanning file '$'.\", next_file)\n self.__plugins.completed_file(context, line_number)\n\n context.report_on_triggered_rules()\n except Exception:\n context.report_on_triggered_rules()\n raise", "def main():\n \n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n \n cur = conn.cursor()\n process_data(cur, conn, filepath='data/song_data',\n func=process_song_file) \n \n process_data(cur, conn, filepath='data/log_data',\n func=process_log_file)\n \n conn.close()", "def set_global_definitions(self):\n # TODO: Investigate how this could be combined with the creation of\n # self.configfiles in reffile_setup()\n\n self.global_subarray_definitions = {}\n self.global_readout_patterns = {}\n self.global_subarray_definition_files = {}\n self.global_readout_pattern_files = {}\n\n self.global_crosstalk_files = {}\n self.global_filtpupilcombo_files = {}\n self.global_filter_position_files = {}\n self.global_flux_cal_files = {}\n self.global_psf_wing_threshold_file = {}\n self.global_psfpath = {}\n # self.global_filter_throughput_files = {} ?\n\n for instrument in 'niriss fgs nircam miri nirspec'.split():\n if instrument.lower() == 'niriss':\n readout_pattern_file = 'niriss_readout_pattern.txt'\n subarray_def_file = 'niriss_subarrays.list'\n crosstalk_file = 'niriss_xtalk_zeros.txt'\n filtpupilcombo_file = 'niriss_dual_wheel_list.txt'\n filter_position_file = 'niriss_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'niriss_zeropoints.list'\n psf_wing_threshold_file = 'niriss_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'niriss/gridded_psf_library')\n elif instrument.lower() == 'fgs':\n readout_pattern_file = 'guider_readout_pattern.txt'\n subarray_def_file = 'guider_subarrays.list'\n crosstalk_file = 'guider_xtalk_zeros.txt'\n filtpupilcombo_file = 'guider_filter_dummy.list'\n filter_position_file = 'dummy.txt'\n flux_cal_file = 'guider_zeropoints.list'\n psf_wing_threshold_file = 'fgs_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'fgs/gridded_psf_library')\n elif instrument.lower() == 'nircam':\n readout_pattern_file = 'nircam_read_pattern_definitions.list'\n subarray_def_file = 'NIRCam_subarray_definitions.list'\n crosstalk_file = 'xtalk20150303g0.errorcut.txt'\n filtpupilcombo_file = 'nircam_filter_pupil_pairings.list'\n filter_position_file = 'nircam_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'NIRCam_zeropoints.list'\n psf_wing_threshold_file = 'nircam_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'nircam/gridded_psf_library')\n else:\n readout_pattern_file = 'N/A'\n subarray_def_file = 'N/A'\n crosstalk_file = 'N/A'\n filtpupilcombo_file = 'N/A'\n filter_position_file = 'N/A'\n flux_cal_file = 'N/A'\n psf_wing_threshold_file = 'N/A'\n psfpath = 'N/A'\n if instrument in 'niriss fgs nircam'.split():\n self.global_subarray_definitions[instrument] = self.get_subarray_defs(filename=os.path.join(self.modpath, 'config', subarray_def_file))\n self.global_readout_patterns[instrument] = self.get_readpattern_defs(filename=os.path.join(self.modpath, 'config', readout_pattern_file))\n self.global_subarray_definition_files[instrument] = os.path.join(self.modpath, 'config', subarray_def_file)\n self.global_readout_pattern_files[instrument] = os.path.join(self.modpath, 'config', readout_pattern_file)\n self.global_crosstalk_files[instrument] = os.path.join(self.modpath, 'config', crosstalk_file)\n self.global_filtpupilcombo_files[instrument] = os.path.join(self.modpath, 'config', filtpupilcombo_file)\n self.global_filter_position_files[instrument] = os.path.join(self.modpath, 'config', filter_position_file)\n self.global_flux_cal_files[instrument] = os.path.join(self.modpath, 'config', flux_cal_file)\n self.global_psf_wing_threshold_file[instrument] = os.path.join(self.modpath, 'config', psf_wing_threshold_file)\n self.global_psfpath[instrument] = psfpath", "def post_processor(self):", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def process_modules(self) -> typing.NoReturn:\n\t\tfor moduleName in self.moduleNameSet:\n\t\t\tdetected_encoding = detect_encoding(moduleName)\n\n\t\t\tprint(f\"Processing {moduleName} ({detected_encoding})\")\n\n\t\t\twith open(moduleName, 'r+', encoding=detected_encoding) as fileStream:\n\t\t\t\t# Store the content of the file\n\t\t\t\tfileContent: str = fileStream.read()\n\t\t\t\t# Sets the file's current position at the offset, the position of the read/write pointer within the file\n\t\t\t\tfileStream.seek(0, 0)\n\t\t\t\t# Truncates the file's size\n\t\t\t\tfileStream.truncate()\n\n\t\t\t\t# Process regex patterns\n\t\t\t\tfor regexDict in regexDictList:\n\t\t\t\t\tfileContent = self.process_function(regexDict, fileContent)\n\n\t\t\t\t# Rewrite the processed content of the file\n\t\t\t\tfileStream.write(fileContent)", "def test_function_runs(self):\n\t\tanalyse_text(self.filename)", "def __init__(self):\r\n self.label = \"ProcessGeogridFile\"\r\n self.description = \"This tool takes an input WRF Geogrid file in NetCDF format\" + \\\r\n \" and uses the HGT_M grid and an input high-resolution elevation grid\" + \\\r\n \"to produce a high-resolution hydrologically processed output.\"\r\n #self.canRunInBackground = False\r\n self.canRunInBackground = True\r\n self.category = \"Processing\"", "def Run():\r\n pass", "def\t_preprocessor(self) :\n\t\tlogging.debug('Beginning preprocessor')\n\t\t\n\t\t# Parse entries from ss class\n\t\tself._parse_initsol()\n\t\tself._parse_modelspace()\n\t\tself._parse_initbound()\n\t\t\n\t\t# Set regressors according to exptype\n\t\tself._set_regressors()\n\n\t\t# Deal with equations\n\t\tself.equations = self.ss.equations\n\n\t\t# Deal with noisy data ??", "def executeAll(lines):", "def _postprocess(self):", "def main():\r\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb2 user=postgres password=261998\")\r\n cur = conn.cursor()\r\n\r\n process_data(cur, conn, filepath='C:/Users/AG/Downloads/Data-engineering-nanodegree-master/Data-engineering-nanodegree-master/1_dend_data_modeling/P1_Postgres_Data_Modeling_and_ETL/data/song_data', func=process_song_file)\r\n process_data(cur, conn, filepath='C:/Users/AG/Downloads/Data-engineering-nanodegree-master/Data-engineering-nanodegree-master/1_dend_data_modeling/P1_Postgres_Data_Modeling_and_ETL/data/log_data', func=process_log_file)\r\n\r\n conn.close()", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def preprocess(self):\n pass", "def globalphase_compiler(self, gate, args):\n pass", "def globalphase_compiler(self, gate, args):\n pass", "def main():\n summary = dict()\n gba_file = open('gettysburg.txt', 'r')\n for line in gba_file:\n Process_line(line, summary)\n Pretty_print(summary)", "def main():\n\n pass", "def pre_execute(self):", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def postLoopFunctions(self):\n\t\treturn", "def parse_global_instructions(binary, module):\n while True:\n op_name, _ = binary.get_next_opcode(peek=True, accept_eol=True)\n if op_name is None:\n return\n if op_name == 'OpFunction':\n return\n\n inst = parse_instruction(binary, module)\n module.insert_global_inst(inst)", "def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def run(self, context):\n i = context.skip_ws(0)\n if len(context.history) > 1 and context.history[-2] == \"IsFuncDeclaration\":\n self.check_function_declaration(context)\n if type(context.scope) is not GlobalScope:\n if type(context.scope) == Function and context.scope.multiline == False:\n pass\n else:\n context.new_error(\"PREPROC_GLOBAL\", context.peek_token(0))\n if context.check_token(i, \"DEFINE\") is False:\n return False, 0\n val = context.peek_token(i).value.split(\"define\", 1)[1]\n content = Lexer(val, context.peek_token(i).pos[0])\n tkns = content.get_tokens()\n i = 0\n identifiers = []\n protection = context.filename.upper().split(\"/\")[-1].replace(\".\", \"_\")\n for tkn in tkns:\n if tkn.type == \"ESCAPED_NEWLINE\":\n context.new_error(\"NEWLINE_DEFINE\", tkn)\n elif tkn.type in [\"TAB\", \"SPACE\"]:\n i += 1\n continue\n elif tkn.type == \"IDENTIFIER\" and len(identifiers) == 0:\n if tkn.value.isupper() is False:\n context.new_error(\"MACRO_NAME_CAPITAL\", tkn)\n identifiers.append(tkn)\n tmp = i\n while tmp < len(tkns) - 1 and tkns[tmp].type in [\n \"SPACE\",\n \"TAB\",\n \"IDENTIFIER\",\n ]:\n tmp += 1\n if tmp == (len(tkns) - 1) and context.filetype == \"h\":\n if context.scope.header_protection == 0:\n if identifiers[0].value == protection:\n context.scope.header_protection = 1\n elif identifiers[0].value != protection:\n context.new_error(\"HEADER_PROT_NAME\", tkns[1])\n elif (\n context.filetype == \"c\"\n and context.scope.include_allowed == True\n and (\n len(tkns) > tmp + 1\n or (\n len(tkns) == tmp + 1\n and identifiers[0].value != protection\n and context.scope.header_protection == -1\n )\n )\n ):\n context.scope.include_allowed = False\n\n elif tkn.type in [\"IDENTIFIER\", \"STRING\", \"CONSTANT\"]:\n if context.skip_define_error == True:\n continue\n if len(identifiers) == 1:\n if tkn.type == \"IDENTIFIER\" and tkn.value.isupper() is False:\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n identifiers.append(tkn)\n elif len(identifiers) == 0:\n context.new_error(\"INCORRECT_DEFINE\", tkn)\n else:\n context.new_error(\"TOO_MANY_VALS\", tkn)\n elif tkn.type == \"LPARENTHESIS\":\n if context.skip_define_error == True:\n continue\n if len(identifiers) == 0:\n continue\n elif len(identifiers) == 1 and tkns[i - 1].type in [\"SPACE\", \"TAB\"]:\n continue\n else:\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n elif tkn.type in [\"LBRACKET\", \"LBRACE\"]:\n if context.skip_define_error == True:\n continue\n context.new_error(\"PREPROC_CONSTANT\", tkn)\n\n i += 1\n if context.filetype == \"h\" and context.scope.header_protection != 1:\n context.new_error(\"HEADER_PROT_ALL\", context.peek_token(0))\n return False, 0", "def pre_process_asm_file(assembly_file):\r\n line_counter = 0\r\n marker_dictionary = load_constants()\r\n commands_list = list()\r\n for command in assembly_file.readlines():\r\n command = command.split(\"/\")[0] # getting rid of comments\r\n command = \"\".join(command.split()) # getting rid of whitespaces\r\n if command:\r\n if command.startswith('('):\r\n marker_dictionary[command[1:-1]] = line_counter\r\n continue\r\n commands_list.append(command)\r\n line_counter += 1\r\n return commands_list, marker_dictionary", "def main(self):", "def locGlob(): \n #glob = \"From Internal Local Name Space\" # Toggle Comment\n print(glob)\n\n return", "def main(config_file, rows, cols):\n # setup paths\n _, _, params = cf.get_ifg_paths(config_file)\n _postprocess_linrate(rows, cols, params)\n if params[cf.TIME_SERIES_CAL]:\n _postprocess_timeseries(rows, cols, params)", "def perform_symbolization(self): # pragma: no cover\n # pylint: disable=redefined-variable-type\n if os.path.isfile(self.start_location):\n files = [self.start_location]\n else:\n files = self._get_files()\n\n for filename in files:\n print(\"Processing file -- {0}\".format(filename))\n updated_file_text = ''\n updated_file_text = ''\n with open(filename, 'r') as fin:\n for line in fin.readlines():\n new_line = self.replace_id_with_symbol(line)\n\n if not updated_file_text and new_line:\n updated_file_text = new_line\n elif new_line:\n updated_file_text += new_line\n\n with open(filename, 'w') as fout:\n fout.write(updated_file_text)", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def main():\n pass" ]
[ "0.6421695", "0.62486196", "0.61764765", "0.5911357", "0.58984786", "0.57821095", "0.5671686", "0.56631225", "0.5623473", "0.56146", "0.5594075", "0.5566961", "0.5538859", "0.5524859", "0.5524859", "0.5524859", "0.5524859", "0.5524859", "0.55061376", "0.5503649", "0.5500372", "0.54751843", "0.54691607", "0.5443128", "0.5438197", "0.5430443", "0.54030335", "0.5400241", "0.53877634", "0.5383016", "0.5377767", "0.53583056", "0.53565305", "0.53490335", "0.5340465", "0.532839", "0.53280807", "0.5324469", "0.5324469", "0.5324469", "0.53179914", "0.5313972", "0.52855885", "0.5253212", "0.5246176", "0.52226985", "0.5213268", "0.52123475", "0.52050054", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.5199515", "0.51993364", "0.51925504", "0.5181505", "0.5181031", "0.51674616", "0.5165066", "0.515665", "0.5152739", "0.5149997", "0.5149997", "0.5149997", "0.5138559", "0.5138559", "0.5125472", "0.5124517", "0.51213205", "0.5120609", "0.51170963", "0.51135886", "0.51025486", "0.5092081", "0.5089255", "0.50850415", "0.5079478", "0.50788444", "0.5077099", "0.50761855", "0.5069647", "0.5067391", "0.5066449" ]
0.0
-1
Construct Artillery YAML configuration
def set_yaml_config(self) -> None: # LT-248: We can pick Artillery Phase configuration from conf file self.yaml_config = { "config": { "target": self.get_swagger_url(), "processor": f"./{self.OUT_FILE}", "phases": [ { "duration": settings.DURATION or 1, "arrivalRate": settings.SPAWN_RATE or 1 } ] }, "scenarios": self.task_set.yaml_flow }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters", "def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)", "def build_configs():", "def user_create_yaml(self):\n pass", "def minimal_config():\n return yaml.round_trip_load(\n textwrap.dedent(\n r\"\"\"\n static_data_config:\n reference:\n path: /path/to/ref.fa\n\n step_config:\n ngs_mapping:\n tools:\n rna: ['star']\n star:\n path_index: /path/to/star/index\n\n data_sets:\n first_batch:\n file: sheet.tsv\n search_patterns:\n - {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'}\n search_paths: ['/path']\n type: matched_cancer\n naming_scheme: only_secondary_id\n \"\"\"\n ).lstrip()\n )", "def get_configured_yaml() -> ModuleType:\n import yaml\n\n from manubot.cite.csl_item import CSL_Item\n\n yaml.add_representer(str, _yaml_str_representer)\n # CSL_Item: pyyaml chokes on dict subclass\n # https://github.com/yaml/pyyaml/issues/142\n # https://stackoverflow.com/a/50181505/4651668\n yaml.add_representer(\n CSL_Item,\n lambda dumper, data: dumper.represent_mapping(\n tag=\"tag:yaml.org,2002:map\", mapping=data.items()\n ),\n )\n return yaml", "def yaml(self):\n raise NotImplementedError", "def configs(self):\n yield \"singleimage\", build_config.BuildConfig()", "def exp_config():\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"experiment.yaml\")\n ) as f:\n exp_config = list(yaml.safe_load_all(f))\n\n for config in exp_config[0]:\n backward.populate_space(config)\n\n return exp_config", "def test_yaml_creation():\n ligand_path = examples_paths()['p-xylene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n molecules = \"\"\"\n T4lysozyme:\n filepath: {}\n leap: {{parameters: oldff/leaprc.ff14SB}}\"\"\".format(examples_paths()['lysozyme'])\n solvent = \"\"\"\n vacuum:\n nonbonded_method: NoCutoff\"\"\"\n protocol = indent(standard_protocol)\n system = \"\"\"\n system:\n ligand: p-xylene\n receptor: T4lysozyme\n solvent: vacuum\"\"\"\n experiment = \"\"\"\n protocol: absolute-binding\n system: system\"\"\"\n\n yaml_content = \"\"\"\n ---\n options:\n output_dir: {}\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n benzene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n GBSA-OBC2:\n nonbonded_method: NoCutoff\n implicit_solvent: OBC2\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(os.path.relpath(tmp_dir), molecules,\n os.path.relpath(ligand_path), toluene_path,\n solvent, system, protocol, experiment)\n\n # We need to check whether the relative paths to the output directory and\n # for p-xylene are handled correctly while absolute paths (T4lysozyme) are\n # left untouched\n expected_yaml_content = textwrap.dedent(\"\"\"\n ---\n version: '{}'\n options:\n experiments_dir: .\n output_dir: .\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(HIGHEST_VERSION, molecules, os.path.relpath(ligand_path, tmp_dir),\n solvent, system, protocol, experiment))\n expected_yaml_content = expected_yaml_content[1:] # remove first '\\n'\n\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n\n # during setup we can modify molecule's fields, so we need\n # to check that it doesn't affect the YAML file exported\n experiment_dict = yaml.load(experiment, Loader=yaml.FullLoader)\n exp_builder._db.get_system(experiment_dict['system'])\n\n generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')\n exp_builder._generate_yaml(experiment_dict, generated_yaml_path)\n with open(generated_yaml_path, 'r') as f:\n assert yaml.load(f, Loader=yaml.FullLoader) == yank_load(expected_yaml_content)", "def celery_config() -> Dict:\n with open(script_dir + 'config.yml', 'r') as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.SafeLoader)\n celery_cfg = cfg['celery']\n result = {\n 'main': celery_cfg['main'],\n 'broker': celery_cfg['broker_url'],\n 'backend': celery_cfg['backend_url'],\n }\n return result", "def _separate(self):\n s = self.as_yamlstr()\n self._config = yaml.load(s, Loader=yaml.Loader)\n self._comments = self._extract_comments(self._yaml_config)", "def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs", "def configuration():", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def config():", "def config():", "def _get_yaml_parser():\n # using a function here so settings are always the same\n parser = YAML(typ=\"jinja2\")\n parser.indent(mapping=2, sequence=4, offset=2)\n parser.width = 320\n parser.preserve_quotes = True\n return parser", "def setup_config():\n\n config = configparser.ConfigParser()\n config.read(CONFIG_PATH)\n\n return config", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def configure(self, yaml_file):\n with open(yaml_file, \"r\") as f:\n panorama_conf = yaml.load(f)\n\n # Configuring factories to:\n # - get only title, date and category from article metadata\n # - rename the first 4 tags with the names defined below\n\n self.data_factory = DataFactory(\n metadata_columns=panorama_conf[\"metadata_columns\"],\n tag_columns=panorama_conf[\"tag_columns\"],\n )\n self.chart_factory = ChartFactory()\n\n # Configuring the charts if a chart configuration information is available in the conf file\n if \"chart_conf\" in panorama_conf:\n self.chart_factory.chart_conf = panorama_conf[\"chart_conf\"]\n\n # Creating the configurations\n for yaml_conf in panorama_conf[\"confs\"]:\n chart_id = yaml_conf[\"chart_id\"]\n try:\n producer = self._create_producer(yaml_conf[\"producer\"])\n renderer = self._create_renderer(yaml_conf[\"renderer\"], chart_id)\n self.append_conf(\n chart_id=chart_id, producer=producer, renderer=renderer\n )\n except ValueError as err:\n logger.exception(\n \"Error while initializing [%s] conf. -> chart not available.\",\n chart_id,\n )", "def create_boot_config(configuration_manager, credential, storage_uri, password):\n\n config = ConfigParser.SafeConfigParser()\n \n rabbit_dict = {'rabbit_host': 'localhost', \n 'rabbit_port': '5672', \n 'rabbit_use_ssl': 'False',\n 'rabbit_userid': 'user',\n 'rabbit_password': 'password',\n 'rabbit_virtual_host': '/',\n 'amqp_connection_uri': None }\n \n section = 'messaging'\n config.add_section(section) \n for k in rabbit_dict.keys():\n v = configuration_manager.get(k, rabbit_dict[k])\n if v:\n config.set(section, k, v)\n\n section = 'database'\n config.add_section(section)\n config.set(section, 'initial_password', password)\n\n if storage_uri and len(storage_uri) > 0:\n section = 'snapshot'\n config.add_section(section)\n config.set(section, 'snapshot_uri', storage_uri)\n config.set(section, 'swift_auth_url', configuration_manager.get('reddwarf_proxy_swift_auth_url', 'http://0.0.0.0:5000/v2.0'))\n config.set(section, 'swift_auth_user', \"%s:%s\" % (credential['tenant_id'], credential['user_name']))\n config.set(section, 'swift_auth_key', credential['password'])\n config.set(section, 'snapshot_key', configuration_manager.get('snapshot_key',\"changeme\"))\n \n mem_file = StringIO.StringIO()\n config.write(mem_file)\n \n return mem_file.getvalue()", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def load_args(self):\n\n # retrieve module path\n dir_path = os.path.dirname(os.path.abspath(__file__))\n dir_path = os.path.split(dir_path)[0]\n # get all the default yaml configs with glob\n dir_path = os.path.join(dir_path, 'configs', '*.yml')\n\n # -- From default yapt configuration\n self._defaults_path = {}\n self._defaults_yapt = OmegaConf.create(dict())\n for file in glob.glob(dir_path):\n # split filename from path to create key and val\n key = os.path.splitext(os.path.split(file)[1])[0]\n self._defaults_path[key] = file\n # parse default args\n self._defaults_yapt = OmegaConf.merge(\n self._defaults_yapt, OmegaConf.load(file))\n\n # -- From command line\n self._cli_args = OmegaConf.from_cli()\n if self._cli_args.config is not None:\n self.default_config = self._cli_args.config\n del self._cli_args['config']\n self.console_log.warning(\"override default config with: %s\", self.default_config)\n\n # -- From experiment default config file\n self._default_config_args = OmegaConf.create(dict())\n if self.default_config is not None:\n self._default_config_args = OmegaConf.load(self.default_config)\n\n # -- Merge default args\n self._args = OmegaConf.merge(\n self._defaults_yapt,\n self._default_config_args)\n\n # -- Resolve interpolations to be sure all nodes are explicit\n # self._args = OmegaConf.to_container(self._args, resolve=True)\n # self._args = OmegaConf.create(self._args)\n\n # -- make args structured: it fails if accessing a missing key\n OmegaConf.set_struct(self._args, True)", "def configs(self):\n raise NotImplementedError()", "def test_load_config_with_aliases(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n f.write(\"aliases:\\n\")\n f.write(\" foo: bar\\n\")\n f.write(\" snap: crackle pop\\n\")\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"bosybux\"\n assert len(config.aliases) == 2\n assert config.aliases[\"foo\"].script == [\"bar\"]\n assert config.aliases[\"snap\"].script == [\"crackle pop\"]", "def test_create_namespaced_build_config(self):\n pass", "def build_configuration() -> Config:\n logger.debug('Building configuration.')\n config = Config(roman_url=sanitize_url(get_prop('ROMAN_URL')),\n redis_url=get_prop('REDIS_URL'),\n redis_port=int(get_prop('REDIS_PORT')),\n redis_username=get_prop('REDIS_USERNAME', True),\n redis_password=get_prop('REDIS_PASSWORD', True),\n charon_url=sanitize_url(get_prop('CHARON_URL')))\n logger.debug(f'Used configuration: {config}')\n return config", "def config():\n return Config()", "def config():\n return Config()", "def __init__(self):\n with open(\"conf/gears.yaml\", \"rb\") as stream:\n try:\n self.gears = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def config() -> Config:\n return Config()", "def get_parser():\r\n parser = yamlargparse.ArgumentParser(\r\n prog='train_forcast',\r\n description='configurations realted to training process of forcasting mechanism'\r\n )\r\n parser.add_argument('--info.run_id', default='',\r\n help='the unique identifier for logging and metadata creation')\r\n parser.add_argument('--info.m', default=10,\r\n help='use past m values for prediction')\r\n parser.add_argument('--info.n', default=5,\r\n help='predict next n values')\r\n parser.add_argument('--info.operation_type',\r\n choices=[const.TRAIN_OP, const.DEPLOY_OP],\r\n help='choosing whether to perform training or deployment')\r\n parser.add_argument('--info.model_type',\r\n choices=[const.LIN_REG, const.RAN_FOR_REG, const.DEC_TREE_REG, const.MULT_OP_REG],\r\n help='choosing model type in case of training operation')\r\n parser.add_argument('--info.model_file', default='',\r\n help='the relative path to the stored model file')\r\n parser.add_argument('--info.output_dir', default='output',\r\n help='the relative path to the directory for storing results')\r\n parser.add_argument('--train_test_split.type',\r\n choices=[const.SPLIT_BY_DATE, const.SPLIT_BY_FILES],\r\n help='determines the way in which train-test split should be done')\r\n parser.add_argument('--train_test_split.date', default='',\r\n help='the date string in \\'YYYY-mm-dd\\' format, indicating the date at which split should be made')\r\n parser.add_argument('--train_test_split.train', default='',\r\n help='the relative path to the .tsv file containing train data')\r\n parser.add_argument('--train_test_split.test', default='',\r\n help='the relative path to the .tsv file containing test data')\r\n parser.add_argument('--visualize.train_data', action=yamlargparse.ActionYesNo, default=False,\r\n help='determines if the training visualizations are to be stored')\r\n parser.add_argument('--visualize.train_fname', default='',\r\n help='the relative path to the .pdf file storing train data visualizations')\r\n parser.add_argument('--random_forest_regression.max_depth', default=20,\r\n help='choosing hyperparams for random forest')\r\n parser.add_argument('--random_forest_regression.random_state', default=7,\r\n help='choosing hyperparams for random forest')\r\n parser.add_argument('--decison_tree_regression.max_depth', default=20,\r\n help='choosing hyperparams for decision tree')\r\n parser.add_argument('--multi_output_regression.n_estimators', default=100,\r\n help='choosing hyperparams for multioutput regression')\r\n\r\n parser.add_argument('--cfg', action=yamlargparse.ActionConfigFile, required=True)\r\n return parser", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def showconfig():\n print(yaml.dump(CONFIG))", "def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)", "def make_default_config(project):\n return {\n \"breathe_projects\": {\n project: \"./_doxygen/xml\"\n },\n \"breathe_default_project\": project,\n \"exhale_args\": {\n # required arguments\n \"containmentFolder\": \"./api\",\n \"rootFileName\": \"{0}_root.rst\".format(project),\n \"rootFileTitle\": \"``{0}`` Test Project\".format(project),\n \"doxygenStripFromPath\": \"..\",\n # additional arguments\n \"exhaleExecutesDoxygen\": True,\n \"exhaleDoxygenStdin\": \"INPUT = ../include\"\n }\n }", "def _augment_pipeline_cfg(self):", "def test_yaml_parsable_configuration(self):\n applications = {\n Application(\n name='mysql-hybridcluster',\n image=DockerImage(repository='flocker/mysql', tag='v1.0.0'),\n ports=frozenset(),\n volume=AttachedVolume(\n name='mysql-hybridcluster',\n # Mountpoint will only be available once\n # https://github.com/ClusterHQ/flocker/issues/289 is\n # fixed.\n mountpoint=None)\n ),\n Application(\n name='site-hybridcluster',\n image=DockerImage(repository='flocker/wordpress',\n tag='v1.0.0'),\n ports=frozenset([Port(internal_port=80,\n external_port=8080)])\n )\n }\n expected_applications = {\n b'mysql-hybridcluster': Application(\n name=b'mysql-hybridcluster',\n image=DockerImage(repository='unknown'),\n ports=frozenset(),\n volume=AttachedVolume(\n name=b'mysql-hybridcluster',\n mountpoint=None,\n )\n ),\n b'site-hybridcluster': Application(\n name=b'site-hybridcluster',\n image=DockerImage(repository='unknown'),\n ports=frozenset([Port(internal_port=80,\n external_port=8080)])\n )\n }\n result = configuration_to_yaml(applications)\n config = Configuration(lenient=True)\n apps = config._applications_from_configuration(safe_load(result))\n self.assertEqual(apps, expected_applications)", "def setup() -> Generator[Any, None, None]:\n yield\n config.utils.journal_abbreviations = []\n JournalAbbreviations._abbreviations = {}\n JournalAbbreviations._fullwords = {}", "def create_yml(self):\n fid = open(os.path.join(RESOURCE_PATH,\n '11079419_SNA_SNA.txt'),\n MODE_ASCII_READ)\n\n stream_handle = fid\n\n self.create_parser(stream_handle, True)\n\n particles = self.parser.get_records(1000)\n\n self.particle_to_yml(particles, '11079419_SNA_SNA_telem.yml')\n fid.close()", "def setup(app):\n # Register builder.\n app.add_builder(BeamerBuilder)\n\n # Add setting for allowframebreaks.\n app.add_config_value(\"beamer_allowframebreaks\", True, \"beamer\")\n # Add setting for Beamer theme.\n app.add_config_value(\"beamer_theme\", \"Warsaw\", \"beamer\")\n # Adjust titles upon doctree-resolved.\n app.connect(\"doctree-resolved\", adjust_titles)\n\n return {\n \"version\": \"1.0\",\n \"parallel_read_safe\": True,\n \"parallel_write_safe\": True,\n }", "def test_load_config_image_from_yaml_nested_keys(self):\n with open(\".gitlab.yml\", \"w\") as f:\n f.write(\"somewhere:\\n\")\n f.write(\" down:\\n\")\n f.write(\" here: dummian:8.2\\n\")\n\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: !from_yaml .gitlab.yml somewhere.down.here\\n\")\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"dummian:8.2\"", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def test_generate_default_daemon_config():\n default_config = alien_wallpaper.daemon.DaemonConfig()\n default_toml = default_config.dumps_toml()\n assert (\n default_toml\n == \"\"\"subreddits = []\ncustom_feeds = []\noutput_directory = \"\"\n\"\"\"\n )", "def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)", "def load_configuration():\n argparser = ArgumentParser(description=\"Zoe application tester - Container Analytics as a Service core component\",\n default_config_files=CONFIG_PATHS,\n auto_env_var_prefix=\"ZOE_MASTER_\",\n args_for_setting_config_path=[\"--config\"],\n args_for_writing_out_config_file=[\"--write-config\"])\n\n argparser.add_argument('--debug', action='store_true', help='Enable debug output')\n argparser.add_argument('--swarm', help='Swarm/Docker API endpoint (ex.: zk://zk1:2181,zk2:2181 or http://swarm:2380)', default='http://localhost:2375')\n argparser.add_argument('--deployment-name', help='name of this Zoe deployment', default='prod')\n\n argparser.add_argument('--dbname', help='DB name', default='zoe')\n argparser.add_argument('--dbuser', help='DB user', default='zoe')\n argparser.add_argument('--dbpass', help='DB password', default='')\n argparser.add_argument('--dbhost', help='DB hostname', default='localhost')\n argparser.add_argument('--dbport', type=int, help='DB port', default=5432)\n\n # Master options\n argparser.add_argument('--api-listen-uri', help='ZMQ API listen address', default='tcp://*:4850')\n argparser.add_argument('--influxdb-dbname', help='Name of the InfluxDB database to use for storing metrics', default='zoe')\n argparser.add_argument('--influxdb-url', help='URL of the InfluxDB service (ex. http://localhost:8086)', default='http://localhost:8086')\n argparser.add_argument('--influxdb-enable', action=\"store_true\", help='Enable metric output toward influxDB')\n argparser.add_argument('--gelf-address', help='Enable Docker GELF log output to this destination (ex. udp://1.2.3.4:1234)', default='')\n argparser.add_argument('--workspace-base-path', help='Path where user workspaces will be created by Zoe. Must be visible at this path on all Swarm hosts.', default='/mnt/zoe-workspaces')\n argparser.add_argument('--overlay-network-name', help='Name of the Swarm overlay network Zoe should use', default='zoe')\n\n # API options\n argparser.add_argument('--listen-address', type=str, help='Address to listen to for incoming connections', default=\"0.0.0.0\")\n argparser.add_argument('--listen-port', type=int, help='Port to listen to for incoming connections', default=5001)\n argparser.add_argument('--master-url', help='URL of the Zoe master process', default='tcp://127.0.0.1:4850')\n\n # API auth options\n argparser.add_argument('--auth-type', help='Authentication type (text or ldap)', default='text')\n\n argparser.add_argument('--auth-file', help='Path to the CSV file containing user,pass,role lines for text authentication', default='zoepass.csv')\n\n argparser.add_argument('--ldap-server-uri', help='LDAP server to use for authentication', default='ldap://localhost')\n argparser.add_argument('--ldap-base-dn', help='LDAP base DN for users', default='ou=something,dc=any,dc=local')\n argparser.add_argument('--ldap-admin-gid', type=int, help='LDAP group ID for admins', default=5000)\n argparser.add_argument('--ldap-user-gid', type=int, help='LDAP group ID for users', default=5001)\n argparser.add_argument('--ldap-guest-gid', type=int, help='LDAP group ID for guests', default=5002)\n\n argparser.add_argument('jsonfile', type=FileType(\"r\"), help='Application description')\n\n opts = argparser.parse_args()\n\n opts.gelf_address = '' # For debugging we want to easily look at logs with 'docker logs'\n opts.influxdb_enable = False # don't send metrics for these test runs\n opts.deployment_name = 'zapp-test'\n\n if opts.debug:\n argparser.print_values()\n\n return opts", "def test_config_from_yaml(self):\n\n # Make yaml-file\n path = self.write_temp_file(\"\"\"\nsection1:\n string1:\n string2: string2\n int1: 0\n int2: 1\n float1: 0.0\n float2: 1.1\n boolean1: false\n boolean2: true\n list1:\n - list1item1\n - list1item2\nsection2:\n string2: string2\n int2: 2\n float2: 2.2\n boolean2: false\n list2:\n - list2item1\n\"\"\")\n\n for namespace in [None, 'namespace']:\n config = Config()\n config.load_from_yaml(path, namespace=namespace)\n\n namespace_prefix = '%s.' % namespace if namespace is not None else ''\n\n # Test section 1\n self.assert_equal_deep(9, len(config('%ssection1' % namespace_prefix)))\n self.assert_equal_deep(None, config('%ssection1.string1' % namespace_prefix))\n self.assert_equal_deep('string2', config('%ssection1.string2' % namespace_prefix))\n self.assert_equal_deep(0, config('%ssection1.int1' % namespace_prefix))\n self.assert_equal_deep(1, config('%ssection1.int2' % namespace_prefix))\n self.assert_equal_deep(0.0, config('%ssection1.float1' % namespace_prefix))\n self.assert_equal_deep(1.1, config('%ssection1.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection1.boolean1' % namespace_prefix))\n self.assert_equal_deep(True, config('%ssection1.boolean2' % namespace_prefix))\n self.assert_equal_deep(['list1item1', 'list1item2'], config('%ssection1.list1' % namespace_prefix))\n\n # Test section 2\n self.assert_equal_deep(5, len(config('%ssection2' % namespace_prefix)))\n self.assert_equal_deep('string2', config('%ssection2.string2' % namespace_prefix))\n self.assert_equal_deep(2, config('%ssection2.int2' % namespace_prefix))\n self.assert_equal_deep(2.2, config('%ssection2.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection2.boolean2' % namespace_prefix))\n self.assert_equal_deep(['list2item1'], config('%ssection2.list2' % namespace_prefix))\n\n # Test section 3\n self.assert_equal(None, config('%ssection3' % namespace_prefix))", "def __init__(self, config):\n\n with open(config) as file:\n self.config = yaml.load(file, Loader=yaml.FullLoader)\n\n self.contents = []\n\n self.templateLoader = jinja2.FileSystemLoader(searchpath=\"./\")\n self.templateEnv = jinja2.Environment(loader=self.templateLoader)", "def create_zoo_config(physical_path, config: dict):\n logging.debug(\"physical_path='{0}', settings={1}\".format(physical_path, config))\n physical_path = os.path.join(physical_path, '.zoo')\n if \"description\" in config:\n config[\"description\"] = Literal(config[\"description\"])\n\n if \"find_installed_command\" in config:\n config[\"find_installed_command\"] = Literal(config[\"find_installed_command\"])\n\n if \"install_command\" in config:\n config[\"install_command\"] = Literal(config[\"install_command\"])\n\n if \"uninstall_command\" in config:\n config[\"uninstall_command\"] = Literal(config[\"uninstall_command\"])\n\n if \"upgrade_command\" in config:\n config[\"upgrade_command\"] = Literal(config[\"upgrade_command\"])\n\n YamlHelper.save(config, physical_path)", "def init_config() -> Config:\n ...", "def default_config_yaml(cls):\n return DEFAULT_CONFIG", "def make_tfx_configs(metadata: Dict) -> Dict:\n system_config = get_config(metadata, \"system_configurations\")\n \n\n # %% pipeline_root\n # TFX produces two types of outputs, files and metadata.\n # - Files will be created under \"pipeline_root\" directory.\n pipeline_root = {\n \"description\": \"\"\"TFX produces two types of outputs, files and metadata.\n Files will be created under 'pipeline_root' directory.\"\"\",\n \"type\": \"string\",\n \"value\": os.path.join(\n system_config[\"gcs_bucket_name\"],\n \"tfx_pipeline_output\",\n metadata[\"pipeline_name\"] + \"_\" + metadata[\"pipeline_version\"],\n ),\n }\n metadata[\"system_configurations\"][\"pipeline_root\"] = pipeline_root\n\n # %% model_serve_dir\n # The last component of the pipeline, \"Pusher\" will produce serving model under\n # model_serve_dir.\n model_serve_dir = {\n \"description\": \"\",\n \"type\": \"string\",\n \"value\": os.path.join(pipeline_root[\"value\"], \"serving_model\"),\n }\n metadata[\"system_configurations\"][\"model_serve_dir\"] = model_serve_dir\n\n return metadata", "def apps_yaml(requests_mock, app_git_url, app_metadata, app_logo):\n apps_yaml = {\n \"test\": {\n \"releases\": [f\"git+{app_git_url}\"],\n \"metadata\": app_metadata,\n \"categories\": [\"utilities\"],\n \"logo\": app_logo.url,\n }\n }\n requests_mock.get(app_git_url)\n yield apps_yaml", "def mkconfig():\n basedir = os.path.join(os.path.expanduser('~'), '.strikepackage')\n\n # Try to populate dirs\n defaultdirs = [os.path.join(basedir, leaf)\n for leaf in ['examples', 'keys', 'templates']]\n\n for dirpath in defaultdirs:\n if not os.path.exists(dirpath):\n try:\n os.makedirs(dirpath, 0755)\n except (os.error, IOError) as ex:\n warn(\"Error while creating default directory: {}\".format(ex))\n\n # Try to place example confs if not present\n exdir = os.path.join(basedir, 'examples')\n exfiles = [(os.path.join(exdir, exfile[0]), exfile[1])\n for exfile in [('config.yaml', config_src),\n ('metadata.jinja2', metadata_src),\n ('userdata.jinja2', userdata_src)]]\n for exfile in exfiles:\n if not os.path.isfile(exfile[0]):\n try:\n with open(exfile[1], 'r') as f:\n src = f.read()\n with open(exfile[0], 'w+') as f:\n f.write(src)\n except IOError as ex:\n warn(\"Error writing example file: {}\".format(ex))", "def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )", "def _setup_pipeline_cfg(self):", "def create_yaml_workflow_schema():\n reana_yaml_schema = \\\n '''\n version: 0.4.0\n inputs:\n files:\n - code/helloworld.py\n - inputs/names.txt\n parameters:\n sleeptime: 2\n inputfile: inputs/names.txt\n helloworld: code/helloworld.py\n outputfile: outputs/greetings.txt\n outputs:\n files:\n - outputs/greetings.txt\n workflow:\n type: serial\n specification:\n steps:\n - environment: 'python:2.7'\n commands:\n - python \"${helloworld}\" --sleeptime ${sleeptime} \\\n --inputfile \"${inputfile}\" --outputfile \"${outputfile}\"\n '''\n return reana_yaml_schema", "def __build_configuration_from_yml_file(yaml_content: dict) -> Configuration:\n\n log_level = ConfigurationFactory.__get_log_level(yaml_content)\n dry_run = ConfigurationFactory.__get_dry_run(yaml_content)\n driver_path = ConfigurationFactory.__get_driver_path(yaml_content)\n driver_type = ConfigurationFactory.__get_driver_type(yaml_content)\n hon_home_url = ConfigurationFactory.__get_hon_home_url(yaml_content)\n start_month_tag = ConfigurationFactory.__get_step1_start_month_tag(yaml_content)\n start_month = ConfigurationFactory.__get_step1_start_month(yaml_content)\n start_year = ConfigurationFactory.__get_step1_start_year(yaml_content)\n end_month_tag = ConfigurationFactory.__get_step1_end_month_tag(yaml_content)\n end_month = ConfigurationFactory.__get_step1_end_month(yaml_content)\n end_year = ConfigurationFactory.__get_step1_end_year(yaml_content)\n room_choices = ConfigurationFactory.__get_step1_room_choices(yaml_content)\n\n configuration_info = ConfigurationInfo()\n configuration_info.log_level = log_level\n configuration_info.dry_run = dry_run\n configuration_info.driver_path = driver_path\n configuration_info.driver_type = driver_type\n configuration_info.hon_home_url = hon_home_url\n configuration_info.start_month_tag = start_month_tag\n configuration_info.start_month = start_month\n configuration_info.start_year = start_year\n configuration_info.end_month_tag = end_month_tag\n configuration_info.end_month = end_month\n configuration_info.end_year = end_year\n configuration_info.room_choices = room_choices\n\n return Configuration(configuration_info)", "def parser_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\", type=str, help=\"yaml configuration file name\")\n return parser.parse_args()", "def set_conf_files(application):\n example_dir = \"./docs/examples/configs/example_4\"\n application.config['GROUPS_FILE'] = example_dir + \"/groups.yml\"\n application.config['POLICIES_FILE'] = example_dir + \"/policies.yml\"", "def _yamlMakeInstance(cls):\n return cls()", "def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)", "def load_yaml(self, repo_owner, repo_name):\n # TODO(chunhsiang): for now all the paths including gcs and local sides\n # are set using repo_owner/repo_name (see repo_config.py), meaning the\n # paths returned from `RepoConfig(...)` are related to the specific\n # repo_owner/repo_name.\n # Will update them after finish the config map.\n self.config = RepoConfig(repo_owner=repo_owner, repo_name=repo_name)", "def __init__(self, config):\n logging.info(\"Creating footprint\")\n # self.infra = yaml.load(config)\n self.infra = config\n self.footprint_name = self.infra.get(\"footprint\", \"ehw\")\n self.images = self.infra.get(\"images\")\n self.old_images = self.infra.get(\"old_images\", [])\n self.container_name = \"%s-metadata\" % self.footprint_name\n \n self.admin_password = self.infra.get('admin-password')\n self.savefile = self.infra.get(\"footprint\", \"outfile\") + \"-save.yaml\"\n if os.path.exists(self.savefile):\n self.saved_images = yaml.load(open(self.savefile))\n self.footprint_status=self.infra.get(\"footprint_status\", None)\n logging.debug(\"Loaded saved images: %s\" % self.saved_images)\n # sys.exit(0) ", "def config( **kwargs ):", "def parser_setup():\n ap = argparse.ArgumentParser(description=__doc__)\n ap.add_argument(\"-c\", \"--config-dir\", default=\".\",\n help=\"Configuration directory. Contains YAML configuration\"\n \"files.\")\n ap.add_argument(\"-v\", \"--verbose\", action=\"count\", default=1,\n help=\"Print copious debugging info.\")\n ap.add_argument(\"-q\", \"--quiet\", action=\"count\", default=0,\n help=\"Suppress output. -qq to suppress ALL output.\")\n ap.add_argument(\"-p\", \"--profile\", default=\"all\",\n help=\"Dashboard profile to load from dashdef.yml\")\n ap.add_argument(metavar=\"HOST\", nargs=\"*\", dest=\"host_globs\",\n help=\"Host glob.\")\n return ap", "def configure_for_pokered(config=config):\n attrs = {\n \"version\": \"red\",\n\n \"map_dir\": os.path.join(config.path, 'maps/'),\n \"gfx_dir\": os.path.join(config.path, 'gfx/tilesets/'),\n \"to_gfx_name\": red_gfx_name,\n \"block_dir\": os.path.join(config.path, 'gfx/blocksets/'), # not used\n \"block_ext\": '.bst', # not used\n\n \"palettes_on\": False,\n\n \"constants_filename\": os.path.join(config.path, 'constants.asm'),\n\n \"time_of_day\": 1,\n }\n return attrs", "def register(cls):\n yaml.add_constructor(cls.label(), cls.parse_yaml)\n yaml.add_representer(cls, cls.dump_yaml)", "def setup(app) -> Dict[str, Any]:\n app.add_config_value(\"uqbar_book_console_setup\", [], \"env\")\n app.add_config_value(\"uqbar_book_console_teardown\", [], \"env\")\n app.add_config_value(\n \"uqbar_book_extensions\", [\"uqbar.book.extensions.GraphExtension\"], \"env\"\n )\n app.add_config_value(\"uqbar_book_strict\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_black\", False, \"env\")\n app.add_config_value(\"uqbar_book_use_cache\", True, \"env\")\n app.add_config_value(\"uqbar_book_block_options\", {}, \"env\")\n app.add_directive(\"book\", UqbarBookDirective)\n app.add_directive(\"book-defaults\", UqbarBookDefaultsDirective)\n app.add_directive(\"book-import\", UqbarBookImportDirective)\n\n for node_class in [uqbar_book_defaults_block, uqbar_book_import_block]:\n app.add_node(\n node_class,\n html=[skip_node, None],\n latex=[skip_node, None],\n text=[skip_node, None],\n )\n app.connect(\"builder-inited\", on_builder_inited)\n app.connect(\"config-inited\", on_config_inited)\n app.connect(\"doctree-read\", on_doctree_read)\n app.connect(\"build-finished\", on_build_finished)\n return {\n \"version\": uqbar.__version__,\n \"parallel_read_safe\": False,\n \"parallel_write_safe\": True,\n }", "def config(self) -> Dict[str, Any]:", "def load_config(config: str):\n\n def join(loader, node):\n seq = loader.construct_sequence(node)\n return \"/\".join([str(i) for i in seq])\n\n yaml.add_constructor(\"!join\", join)\n with open(config, \"r\") as config_file:\n config = yaml.load(config_file, yaml.FullLoader)\n\n return config", "def test_load_config__no_spaces_in_aliases(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n f.write(\"aliases:\\n\")\n f.write(\" this has spaces: whatever\\n\")\n\n self._invalid_config()", "def configure(self, options, conf):", "def setup_parser_config(subparsers):\r\n parser = subparsers.add_parser('config', help='Freeseer configuration functions')\r\n subparsers = parser.add_subparsers(dest=\"config_service\")\r\n setup_parser_config_reset(subparsers)\r\n setup_parser_config_youtube(subparsers)", "def setup_roles(self, cfg, path):\n\n if cfg and cfg.get(\"meta\"):\n with open(os.path.join(path, \"meta\", \"main.yml\"), \"r\") as f:\n loaded_cfg = yaml.safe_load(f)\n loaded_cfg[\"galaxy_info\"] = cfg[\"meta\"]\n with open(os.path.join(path, \"meta\", \"main.yml\"), \"w\") as f:\n yaml.dump(loaded_cfg, f)", "def test_load_config_image_from_yaml_nested_keys_with_escaped_characters(self):\n with open(\".gitlab.yml\", \"w\") as f:\n f.write(\".its:\\n\")\n f.write(\" somewhere.down:\\n\")\n f.write(\" here: dummian:8.2\\n\")\n\n with open(\".scuba.yml\", \"w\") as f:\n f.write('image: !from_yaml .gitlab.yml \"\\\\.its.somewhere\\\\.down.here\"\\n')\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"dummian:8.2\"", "def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def test_construct_2_single_bootstrap(self):\n configerus.new_config(bootstraps=[\"dict\"])", "def main():\n parser = argparse.ArgumentParser(description='Compose a yaml file.')\n parser.add_argument(\n 'root',\n type=argparse.FileType('r'),\n help='The root yaml file to compose.'\n )\n\n args = parser.parse_args()\n\n result = yaml.load(args.root, Loader=ComposeLoader)\n\n print(yaml.dump(result))", "def generate_config(context):\n resources = []\n\n # Create an initial 'STARTED' pubsub notification.\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n depends_on=[],\n status_string='STARTED',\n ))\n\n # Required properties.\n billing_account_id = context.properties['billingAccountId']\n parent_organization = context.properties['parentOrganization']\n project_id = context.properties['projectId']\n\n # Optional properties, with defaults.\n high_security_network = context.properties.get('highSecurityNetwork', False)\n private_ip_google_access = context.properties.get('privateIpGoogleAccess', False)\n storage_bucket_lifecycle = context.properties.get('storageBucketLifecycle', 180)\n billing_account_friendly_name = context.properties.get('billingAccountFriendlyName', billing_account_id)\n # Use a project name if given, otherwise it's safe to fallback to use the\n # project ID as the name.\n project_name = context.properties.get('projectName', project_id)\n labels_obj = context.properties.get('labels', {})\n\n # Save this template's version number and all parameters inputs to the project metadata to keep track of what\n # operations were performed on a project.\n labels_obj.update({\n \"firecloud-project-template-version\" : str(FIRECLOUD_PROJECT_TEMPLATE_VERSION_ID)\n })\n\n for k, v in context.properties.items():\n label_k, label_v = satisfy_label_requirements('param--' + str(k), v)\n labels_obj.update({\n label_k: label_v\n })\n\n\n if high_security_network:\n labels_obj.update({\n \"vpc-network-name\" : FIRECLOUD_VPC_NETWORK_NAME,\n \"vpc-subnetwork-name\" : FIRECLOUD_VPC_SUBNETWORK_NAME\n })\n\n if 'parentFolder' in context.properties:\n parent_obj = {\n 'id': context.properties['parentFolder'],\n 'type': 'folder',\n }\n else:\n parent_obj = {\n 'id': context.properties['parentOrganization'],\n 'type': 'organization',\n }\n\n # Create the main project resource.\n resources.append({\n 'type': 'templates/project.py',\n 'name': 'fc-project',\n 'properties': {\n 'activateApis': FIRECLOUD_REQUIRED_APIS,\n 'billingAccountId': billing_account_id,\n 'billingAccountFriendlyName': billing_account_friendly_name,\n 'iamPolicies': create_iam_policies(context),\n 'labels': labels_obj,\n 'name': project_name,\n # The project parent. For FireCloud, this should refer to the\n # firecloud.org (or equivalent) GCP organization ID.\n 'parent': parent_obj,\n 'projectId': project_id,\n # If true, this would remove the default compute egine service\n # account. FireCloud doesn't use this SA, but we're leaving this set\n # to False to avoid changing any legacy behavior, at least initially.\n 'removeDefaultSA': False,\n # Removes the default VPC network for projects requiring stringent\n # network security configurations.\n 'removeDefaultVPC': high_security_network,\n 'createUsageExportBucket': False,\n # Always set up the storage logs and cromwell auth buckets for Firecloud\n 'storageLogsBucket': True,\n 'storageBucketLifecycle': storage_bucket_lifecycle,\n 'cromwellAuthBucket': True\n }\n })\n\n if high_security_network:\n resources.extend(create_high_security_network(context))\n resources.extend(create_firewall(context))\n if private_ip_google_access:\n resources.extend(create_private_google_access_dns_zone(context))\n else:\n resources.extend(create_default_network(context))\n\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n # This is somewhat hacky, but we can't simply collect the name of each\n # collected resource since template call nodes aren't \"real\" resources\n # that can be part of a dependsOn stanza. So instead, we collect the\n # names of all resources that are output by the network (which itself\n # depends on the project). It doesn't seem to be possible to concatenate\n # dependsOn arrays within the reference syntax, otherwise we could make\n # this depend explicitly on all resources from the template nodes.\n depends_on='$(ref.fc-network.resourceNames)',\n status_string='COMPLETED'))\n\n return {'resources': resources}", "def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf", "def YamlDumper(kg: KubraGen):\n def k(*args, **kwargs):\n return YamlDumperImpl(*args, kg=kg, **kwargs)\n return k", "def config(ctx):\n return", "def generate_yaml() -> None:\n import yaml\n import networkx as nx\n\n def dump(d):\n logger.debug(\"Dumping %s %s\", d[\"type\"], d[\"name\"])\n return \"---\\n\" + yaml.dump(d, default_flow_style=False, indent=4)\n\n g = nx.Graph()\n for eclassifier in mm_root.eClassifiers:\n logger.debug(\"Processing eclassifier %s\", eclassifier.name)\n\n if isinstance(eclassifier, EClass):\n if not _utterances_filename(eclassifier.name):\n logger.warn(\n \"Eclassifier %s does not contain any utterances, skipping\",\n eclassifier.name,\n )\n continue\n\n d = {\"type\": \"intent\", \"name\": eclassifier.name, \"slots\": []}\n for eattribute in eclassifier.eAttributes:\n d_a = _eattribute_to_dict(eattribute)\n type_name = d_a[\"name\"]\n d[\"slots\"].append({\"name\": eattribute.name, \"entity\": type_name})\n if isinstance(eattribute.eType, EEnum):\n # EEnums should be handled later in the loop\n g.add_edge(eclassifier, eattribute.eType)\n continue\n if \"type\" in d_a:\n g.add_node(type_name, intent=False, d=d_a)\n g.add_edge(eclassifier, type_name)\n g.add_node(eclassifier, intent=True, d=d)\n elif isinstance(eclassifier, EEnum):\n g.add_node(eclassifier, intent=False, d=_enum_to_dict(eclassifier))\n else:\n logger.error(\n \"eclassifier %s is of unkown type %s\", eclassifier, type(eclassifier)\n )\n\n for node, data in g.nodes(data=True):\n if not data:\n logger.debug(\"eclassifier %s didn't get any data\", node.name)\n continue\n if data[\"intent\"]:\n others = \"\\n\".join(\n dump(g.nodes[eattribute][\"d\"])\n for eattribute in g.adj[node]\n if len(g.adj[eattribute]) == 1\n )\n d = data[\"d\"]\n with open(\"intent_\" + d[\"name\"] + \".yaml\", \"w\") as f:\n if others:\n print(others, file=f)\n print(dump(d).strip(), file=f)\n else:\n if len(g.adj[node]) == 1:\n continue\n d = data[\"d\"]\n with open(\"entity_\" + d[\"name\"] + \".yaml\", \"w\") as f:\n print(dump(d).strip(), file=f)", "def parse_yaml_config(config_file):\n\n def construct_keras_model(loader, node):\n from tensorflow.keras.models import load_model\n\n model_str = loader.construct_scalar(node)\n return load_model(model_str)\n\n yaml.add_constructor('!keras_model', construct_keras_model, Loader=yaml.SafeLoader)\n\n print('Loading configuration from', config_file)\n with open(config_file) as file:\n parsed_config = yaml.safe_load(file)\n return parsed_config", "def config(*subconfig):\n\n with open('configure.yaml', 'r') as stream:\n args = yaml.load(stream)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\n '--node',\n '-n',\n help='The node ID.'\n )\n parser.add_argument(\n '--processes',\n '-p',\n help='The total number of processes.'\n )\n # Store command line arguments in a dict\n cl_args = parser.parse_args()\n cl_args_dict = vars(cl_args)\n # Combine\n args.update(cl_args_dict)\n # Find subconfig if argument is passed\n for s in subconfig:\n try:\n args = args[s]\n except:\n pass\n # Return\n return args", "def web_archive_config():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n socket_timeout = flask.request.args.get('ytdl-socket-timeout', '120')\n retries = flask.request.args.get('ytdl-retries', 'infinite')\n output = flask.request.args.get('ytdl-output', '%(uploader_id)s/%(id)s.%(ext)s')\n overwrites = flask.request.args.get('ytdl-overwrites', 'false') == 'true'\n info_json = flask.request.args.get('ytdl-info-json', 'true') == 'true'\n thumbnail = flask.request.args.get('ytdl-thumbnail', 'true') == 'true'\n format = flask.request.args.get('ytdl-format', 'bestvideo[vcodec^=vp]' +\n '+bestaudio[acodec=opus]/bestvideo+bestaudio[acodec=opus]' +\n '/bestvideo+bestaudio/best')\n merge_format = flask.request.args.get('ytdl-merge-format', 'mkv')\n all_subs = flask.request.args.get('ytdl-all-subs', 'true') == 'true'\n sub_format = flask.request.args.get('ytdl-sub-format', 'srt/best')\n convert_subs = flask.request.args.get('ytdl-convert-subs', 'srt')\n\n config = io.BytesIO()\n\n config.write(('--socket-timeout ' + socket_timeout + '\\n').encode('utf-8'))\n config.write(('--retries ' + retries + '\\n').encode('utf-8'))\n config.write(('--output ' + output + '\\n').encode('utf-8'))\n if not overwrites:\n config.write('--no-overwrites\\n'.encode('utf-8'))\n if info_json:\n config.write('--write-info-json\\n'.encode('utf-8'))\n if thumbnail:\n config.write('--write-thumbnail\\n'.encode('utf-8'))\n config.write(('--format ' + format + '\\n').encode('utf-8'))\n config.write(('--merge-output-format ' + merge_format + '\\n').encode('utf-8'))\n if all_subs:\n config.write('--all-subs\\n'.encode('utf-8'))\n config.write(('--sub-format ' + sub_format + '\\n').encode('utf-8'))\n config.write(('--convert-subs ' + convert_subs + '\\n').encode('utf-8'))\n\n config.seek(0)\n\n return flask.Response(config,\n mimetype = 'text/plain',\n headers = { 'Content-Disposition': 'attachment;filename=config.txt' }\n )", "def _init_cli_config() -> None:\n conf_dir = os.path.dirname(CLI_CONFIG_PATH)\n if not os.path.exists(conf_dir):\n os.makedirs(conf_dir)\n with open(CLI_CONFIG_PATH, \"w+\") as f:\n yaml.dump({}, f, default_flow_style=False)", "def config(self):\n pass", "def config(self):\n pass", "def get_market_config():\n\n logger.info(\"MarketFlow Configuration\")\n\n # Read the configuration file\n\n full_path = SSEP.join([PSEP, 'config', 'market.yml'])\n with open(full_path, 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n # Store configuration parameters in dictionary\n\n specs = {}\n\n # Section: market [this section must be first]\n\n specs['create_model'] = cfg['market']['create_model']\n fractal = cfg['market']['data_fractal']\n try:\n _ = pd.to_timedelta(fractal)\n except:\n logger.info(\"data_fractal [%s] is an invalid pandas offset\",\n fractal)\n specs['data_fractal'] = fractal\n specs['data_history'] = cfg['market']['data_history']\n specs['forecast_period'] = cfg['market']['forecast_period']\n fractal = cfg['market']['fractal']\n try:\n test_interval = pd.to_timedelta(fractal)\n except:\n logger.info(\"fractal [%s] is an invalid pandas offset\",\n fractal)\n specs['fractal'] = fractal\n specs['lag_period'] = cfg['market']['lag_period']\n specs['leaders'] = cfg['market']['leaders']\n specs['predict_history'] = cfg['market']['predict_history']\n specs['schema'] = cfg['market']['schema']\n specs['subschema'] = cfg['market']['subschema']\n specs['api_key_name'] = cfg['market']['api_key_name']\n specs['api_key'] = cfg['market']['api_key']\n specs['subject'] = cfg['market']['subject']\n specs['target_group'] = cfg['market']['target_group']\n\n # Set API Key environment variable\n if specs['api_key']:\n os.environ[specs['api_key_name']] = specs['api_key']\n\n # Create the subject/schema/fractal namespace\n\n sspecs = [specs['subject'], specs['schema'], specs['fractal']]\n space = Space(*sspecs)\n\n # Section: features\n\n try:\n logger.info(\"Getting Features\")\n specs['features'] = cfg['features']\n except:\n logger.info(\"No Features Found\")\n specs['features'] = {}\n\n # Section: groups\n\n try:\n logger.info(\"Defining Groups\")\n for g, m in list(cfg['groups'].items()):\n Group(g, space)\n Group.groups[g].add(m)\n except:\n logger.info(\"No Groups Found\")\n\n # Section: aliases\n\n try:\n logger.info(\"Defining Aliases\")\n for k, v in list(cfg['aliases'].items()):\n Alias(k, v)\n except:\n logger.info(\"No Aliases Found\")\n\n # Section: system\n\n try:\n logger.info(\"Getting System Parameters\")\n specs['system'] = cfg['system']\n except:\n logger.info(\"No System Parameters Found\")\n specs['system'] = {}\n\n # Section: variables\n\n logger.info(\"Defining AlphaPy Variables [phigh, plow]\")\n\n Variable('phigh', 'probability >= 0.7')\n Variable('plow', 'probability <= 0.3')\n\n try:\n logger.info(\"Defining User Variables\")\n for k, v in list(cfg['variables'].items()):\n Variable(k, v)\n except:\n logger.info(\"No Variables Found\")\n\n # Section: functions\n\n try:\n logger.info(\"Getting Variable Functions\")\n specs['functions'] = cfg['functions']\n except:\n logger.info(\"No Variable Functions Found\")\n specs['functions'] = {}\n\n # Log the stock parameters\n\n logger.info('MARKET PARAMETERS:')\n logger.info('api_key = %s', specs['api_key'])\n logger.info('api_key_name = %s', specs['api_key_name'])\n logger.info('create_model = %r', specs['create_model'])\n logger.info('data_fractal = %s', specs['data_fractal'])\n logger.info('data_history = %d', specs['data_history'])\n logger.info('features = %s', specs['features'])\n logger.info('forecast_period = %d', specs['forecast_period'])\n logger.info('fractal = %s', specs['fractal'])\n logger.info('lag_period = %d', specs['lag_period'])\n logger.info('leaders = %s', specs['leaders'])\n logger.info('predict_history = %s', specs['predict_history'])\n logger.info('schema = %s', specs['schema'])\n logger.info('subject = %s', specs['subject'])\n logger.info('subschema = %s', specs['subschema'])\n logger.info('system = %s', specs['system'])\n logger.info('target_group = %s', specs['target_group'])\n\n # Market Specifications\n return specs" ]
[ "0.6141873", "0.6054223", "0.6019494", "0.5994903", "0.59923637", "0.5968164", "0.59273314", "0.57849234", "0.5761136", "0.57510424", "0.57198846", "0.5717234", "0.57150394", "0.5667676", "0.5636936", "0.56257606", "0.55977577", "0.55945677", "0.55880404", "0.55880404", "0.55735403", "0.5540175", "0.551129", "0.5499784", "0.5492602", "0.54755855", "0.54755855", "0.5462785", "0.5429664", "0.541362", "0.5405347", "0.5378949", "0.5363287", "0.5363287", "0.5362322", "0.5361343", "0.5360909", "0.5359034", "0.5357278", "0.53515935", "0.5345893", "0.53407294", "0.5339063", "0.5336948", "0.5335996", "0.5335071", "0.53317493", "0.53267956", "0.5320444", "0.53181136", "0.5304861", "0.52956384", "0.5289192", "0.52876836", "0.52843076", "0.52793497", "0.5273701", "0.52641845", "0.52630013", "0.5261905", "0.52606153", "0.5259282", "0.5257387", "0.5254178", "0.5249969", "0.52481234", "0.5245364", "0.52380514", "0.5226954", "0.5226277", "0.5223292", "0.52194", "0.52166325", "0.521645", "0.5214534", "0.5206472", "0.51952374", "0.51933056", "0.51911914", "0.51880443", "0.5187835", "0.51861674", "0.5182964", "0.5182964", "0.5182964", "0.5182964", "0.5169619", "0.5167971", "0.51624244", "0.5159394", "0.5158747", "0.51578176", "0.5154914", "0.51520646", "0.51496774", "0.5143552", "0.5143086", "0.5141152", "0.5141152", "0.51305413" ]
0.69550043
0
Write to YAML and JS files the final constructed configurations
def write_to_file(self, file_name=None, sub_path=None) -> None: super().write_to_file(file_name, settings.ARTILLERY_FOLDER) self.set_yaml_config() self.write_file_to_output( settings.ARTILLERY_YAML, self.yaml_config, append_mode=False, project_sub_folder=settings.ARTILLERY_FOLDER )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n for p, c in self.configs_:\n c.write(p)", "def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)", "def write_config(self, config_file):\n \n # write root paths\n \n # write reference data\n \n # write tool paths\n \n pass", "def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)", "def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()", "def write(self):\n print yaml.dump(self._config, default_flow_style=False),", "def write(filename):\n log.msg(\"Saving configuration information to \\\"\" + filename + \"\\\"\", lvl='i', ss='ss_configfile')\n\n f = open(filename, 'w')\n cp = ConfigParser.SafeConfigParser()\n #a little string hacking because our section names are un-normalized\n #this builds a list of all the sections names\n sectionslst = []\n sections = []\n for k in _loaded.keys():\n sectionslst.append(k.split('.')[0])\n #get unique entries\n sections = _uniquer(sectionslst)\n for sec in sections:\n log.msg(\"\\tCompiling section \\\"\" + sec + \"\\\"\",\n lvl='d3', ss='ss_configfile')\n #make the headers\n cp.add_section(sec)\n #for each item in my dictionary\n #it splits the key in two and uses that for the first and second \"set\" args\n #then it uses the item.value for the 3rd arg\n # from 'section.option:value'\n \n for k in _loaded.items():\n cp.set(str(k[0]).split('.')[0], str(k[0]).split('.')[1], str(k[1]))\n cp.write(f)\n f.close()", "def save():\n with open(CONFIG_FILE, 'w') as f:\n json.dump(config, f, indent=4, sort_keys=True)", "def save_config_file(self):\n wkdir = Path(self.config_dict[\"outputdir\"])\n config_filename = str(wkdir / f\"{self.config_dict['name']}.json\")\n save_config(self.config_dict, config_filename)", "def save():\n print(\"Saving config file..\")\n\n res = yaml.round_trip_dump(_conf, indent=2, block_seq_indent=1)\n\n with open(__config_file, 'w', encoding='utf-8') as stream:\n stream.write(res)", "def save(self):\r\n with open(self.filename, 'w') as f:\r\n if self.pretty:\r\n json.dump(self.__config, f, sort_keys=False,\r\n indent=4, separators=(',', ': '))\r\n else:\r\n json.dump(self.__config, f)", "def write_config(self):\n xshear_conf=XShearConfig(self['run'])\n xshear_conf.write()", "def write(self, filename: str):\n obj = self.to_dict(self)\n config.write(obj, filename)", "def setup_plugins(self, cfg, path):\n\n if cfg:\n with open(path, \"w\") as f:\n print(\"DOCUMENTATION='''\", file=f)\n print(\"---\", file=f)\n for key in cfg:\n print(f\"{key}: {cfg[key]}\", file=f)\n print(\"'''\", file=f)", "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def create_yaml_languages():\n with open(join(dirname(__file__), 'languages.yaml'), 'w') as f:\n yaml.dump(list(iter_languages()), f)", "def write_config(self):\r\n obj = [\r\n [self.ip,\r\n self.gate,\r\n self.mask,\r\n self.name,\r\n self.time]\r\n ]\r\n with open('config.json', 'wt') as jsonfile:\r\n json.dump(obj, jsonfile)", "def save_configurations(self):\n # Get the file path\n self.data_path = self.data_path_entry.get()\n # Open the file\n with open(self.data_path, 'rb') as file:\n self.log('Opened ' + str(self.data_path))\n # Un-serialize\n info = pickle.load(file)\n # Write the new properties\n self.main_window.overwrite_properties(info)\n\n self.exit()", "def save_config_file(self):\n with open(self.config_file_name, 'w',encoding='utf-8') as outfile:\n json.dump(self._config, outfile,indent=2)", "def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))", "def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)", "def writeConfig(self):\n targetFile = \"%s/%s\" % (self.workingDir, self.merge_pset_file)\n handle = open(targetFile, 'w')\n handle.write(self.mergeConfig())\n handle.close()\n return", "def write_config_file(self):\n for opt, opt_desc in self.opt_dict.iteritems():\n if 'permanent' in opt_desc and opt_desc['permanent'] == True:\n enabled = 'Always'\n else:\n enabled = opt_desc['enabled'].__str__()\n\n self.file_parser.set(opt, 'enabled', enabled)\n self.file_parser.set(opt, 'implementation',\n opt_desc['selected_imp'])\n self.file_parser.set(opt, 'optype', opt_desc['imptype'])\n\n for config, config_desc in self.config_dict.iteritems():\n enabled = config_desc['enabled'].__str__()\n self.file_parser.set(config, 'enabled', enabled)\n\n scratch_file = self.config_filename + '.scratch'\n with open(scratch_file, 'w') as cfile:\n for config in sorted(self.config_dict.keys()):\n self.write_section(cfile, config)\n\n for opt in sorted(self.opt_dict.keys()):\n self.write_section(cfile, opt)\n\n for imp in sorted(self.imp2opt_dict.keys()):\n self.write_section(cfile, imp)\n\n cfile.write(\"\\n\")\n\n os.rename(scratch_file, self.config_filename)", "def write_configs(logconf_dir):\n for name in list_logging_conf():\n conf = load_logging_conf(name)\n with io.open(os.path.join(logconf_dir, name), 'w') as f:\n f.write(json.dumps(conf))", "def writeConfig(self):\n\n qU.writeConfig(quickLogger=self.logger,\n\t\t curveDictionary=self.curveConf,\n\t\t likelihoodDictionary=self.likelihoodConf,\n commonDictionary=self.commonConf,\n analysisDictionary=self.analysisConf)", "def export_configurations():\n pass", "def save(self):\r\n if not self.filename:\r\n raise IOError(errors['NoConfigFileYet'])\r\n self.onSave()\r\n stuff = dict()\r\n for thing in ['aliases', 'triggers']:\r\n stuff[thing] = [] # Populate with (args, kwargs) pairs.\r\n if self.config.get('saving', thing):\r\n for c, o in getattr(self, thing).iteritems():\r\n stuff[thing].append(o.serialise())\r\n stuff['variables'] = dict()\r\n if self.config.get('saving', 'variables'):\r\n for v in self.variables:\r\n if hasattr(self, v):\r\n var = getattr(self, v)\r\n if type(var) in self.basicTypes:\r\n stuff['variables'][v] = var\r\n stuff['config'] = self.config.get_dump()\r\n with open(self.filename, 'w') as f:\r\n json.dump(stuff, f, indent = 1, sort_keys = True) # Finally write the completed dictionary.\r", "def write_config_file(content, filepath):\n if filepath.endswith('.yml') or filepath.endswith('.yaml'):\n return write_yaml(content, filepath)\n return write_json(content, filepath)", "def WriteConfig(self):\n config = wx.Config.Get()\n config.DeleteGroup(DEPS_CONFIG)\n config.SetPath(DEPS_CONFIG)\n pos = 0\n for entry in self.array:\n config.Write(\"Dependency%s\" % pos, entry)\n pos += 1\n config.SetPath('..')", "def save_to_config(self) -> None:\n config_path = os.path.join(self.base_path, \"config.json\")\n\n with open(config_path, \"r\") as _json:\n c_dict = json.load(_json)\n\n c_dict[\"mean_similarity_error\"] = self.ME\n c_dict[\"similarity_correlation\"] = self.pearson_corr\n c_dict[\"similarity_spearman_correlation\"] = self.spearman_corr\n\n with open(config_path, \"w\") as _json:\n json.dump(c_dict, _json)", "def save(self):\n with open(self._config, 'w') as f:\n json.dump(self.data, f, indent=2, sort_keys=True)", "def generate(self):\n logger.info(\"Starting yml generation..\")\n if not self.is_generatable_file:\n logger.error(\n f\"[red]Not running file {self.filename} without metadata collector.[/red]\"\n )\n return\n # Collect the wrapped functions with the details.\n self.collect_functions()\n # Make sure when they are ran, only collecting data will be performed.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(True)\n # Run the functions and by that, collect the data.\n self.run_functions()\n # Write the yml file according to the collected details.\n self.extract_metadata()\n # Make sure the functions are back to normal running state.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(False)\n # Remove imports from file\n self.remove_collector_imports()", "def save_to_file(self):\n check_path(self.config_path)\n\n with open(self.settings_file, 'w') as settings_file:\n options = self._get_options()\n json.dump(options,\n \t settings_file,\n \t indent=4,\n \t separators=(',', ': '))", "def save(self):\n # Always write out components in alphabetical order for determinism,\n # especially in tests.\n for function_name in sorted(self._components.keys()):\n self._config_parser[_COMPONENTS_SECTION][\n function_name] = self._components[function_name]\n\n with open(str(self._config_filepath), 'w') as f:\n self._config_parser.write(f)", "def saveToFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n stream = open(path,\"w\")\n yaml.dump(self.parameters(),stream)", "def write_scripts(self, out, ref, file1, file2):\n for config in self.configurations:\n program_folder = os.path.join(out, self.out)\n config.write_Strelka_script(program_folder, self.path2exe, ref, file1, file2, self.template_config)\n return None", "def save(self):\n try:\n with open(self._filename, 'w') as conf_file:\n conf_file.write(json.dumps(self._data))\n except OSError:\n _LOGGER.exception(\"Can't store config in %s\", self._filename)", "def save_config(self):\n if not os.path.exists(self._conf_dir):\n os.makedirs(self._conf_dir)\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n with open(conf_file, \"w\") as ofile:\n json.dump(self.conf, ofile, indent=2)", "def config():\n for key, value in JS_FILES.items():\n pn.config.js_files[key] = value\n pn.config.css_files.append(CSS_FILES[\"all\"])", "def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)", "def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)", "def _save_config_log(self, data):\n config_path = os.path.join(self.runtime.working_dir, '.config')\n with open(config_path, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)", "def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()", "def save(self):\n with open(self._CONFIG_FILE_PATH, 'w') as config_file:\n json.dump(vars(self), config_file)\n return self._CONFIG_FILE_PATH", "def save_config(self):\n data = json.dumps(self.cfg)\n\n try:\n file = open(self.cfg_file_name, 'w')\n file.write(data)\n except OSError as err:\n print(\"can't save property: {0}\".format(err))\n else:\n file.close()", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def write_config(self):\n cfg = {\n 'channel':self.channel,\n 'seuil_min':self.seuil_min,\n 'last_level':self.last_level,\n 'last_level_date':self.last_level_date\n }\n write_conf(self.CONF_FILE,cfg)", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def __writeConfig(self):\n page = None\n\n #TODO: get values of configurations here\n particles = \"#f\" if not base.particleMgrEnabled else \"#t\"\n volume = str(round(base.musicManager.getVolume(), 2))\n mute = \"#f\" if base.AppHasAudioFocus else \"#t\"\n #TODO: add any configuration variable name that you have added\n customConfigVariables = [\n \"\", \"particles-enabled\", \"audio-mute\", \"audio-volume\"]\n if os.path.exists(prcFile):\n # open the config file and change values according to current\n # application settings\n page = loadPrcFile(Filename.fromOsSpecific(prcFile))\n removeDecls = []\n for dec in range(page.getNumDeclarations()):\n # Check if our variables are given.\n # NOTE: This check has to be done to not loose our base or other\n # manual config changes by the user\n if page.getVariableName(dec) in customConfigVariables:\n decl = page.modifyDeclaration(dec)\n removeDecls.append(decl)\n for dec in removeDecls:\n page.deleteDeclaration(dec)\n # NOTE: particles-enabled and audio-mute are custom variables and\n # have to be loaded by hand at startup\n # Particles\n page.makeDeclaration(\"particles-enabled\", particles)\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", mute)\n else:\n # Create a config file and set default values\n cpMgr = ConfigPageManager.getGlobalPtr()\n page = cpMgr.makeExplicitPage(\"%s Pandaconfig\"%appName)\n # set OpenGL to be the default\n page.makeDeclaration(\"load-display\", \"pandagl\")\n # get the displays width and height\n w = self.pipe.getDisplayWidth()\n h = self.pipe.getDisplayHeight()\n # set the window size in the config file\n page.makeDeclaration(\"win-size\", \"%d %d\"%(w, h))\n # set the default to fullscreen in the config file\n page.makeDeclaration(\"fullscreen\", \"1\")\n # particles\n page.makeDeclaration(\"particles-enabled\", \"#t\")\n # audio\n page.makeDeclaration(\"audio-volume\", volume)\n page.makeDeclaration(\"audio-mute\", \"#f\")\n # create a stream to the specified config file\n configfile = OFileStream(prcFile)\n # and now write it out\n page.write(configfile)\n # close the stream\n configfile.close()", "def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)", "def save(self) -> None:\n logger.info(\"Saving to config...\")\n yml.save(self._config, self.configpath)", "def save():\n\t\ttry:\n\t\t\t#paths\n\t\t\tConfig.conf.set('paths', 'source_dir', Config.source_dir)\n\t\t\tConfig.conf.set('paths', 'lyrics_dir', Config.lyrics_dir)\n\n\t\t\t#actions\n\t\t\tConfig.setBool('actions', 'save_to_file', Config.save_to_file)\n\t\t\tConfig.setBool('actions', 'save_to_tag', Config.save_to_tag)\n\n\t\t\t#sources\n\t\t\tConfig.setBool('sources', 'lyric_wikia', Config.lyric_wikia)\n\t\t\tConfig.setBool('sources', 'musix_match', Config.musix_match)\n\t\t\tConfig.setBool('sources', 'lyricsmode', Config.lyricsmode)\n\t\t\tConfig.setBool('sources', 'az_lyrics', Config.az_lyrics)\n\n\t\t\twith open(Config.config_path, 'w') as configfile:\n\t\t\t\tConfig.conf.write(configfile)\n\t\t\treturn True\n\n\t\t# Catch all config parser errors\n\t\texcept BaseConfigParserError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to save settings to config.')\n\t\t\tprint(e)\n\t\t\treturn False", "def configs() -> None:\n config_data = cast(bytes, pkgutil.get_data('DLA', 'config.yml'))\n server_config_data = cast(\n bytes, pkgutil.get_data('DLA.server', 'server_config.yml')\n )\n\n current_path = Path('.')\n\n (current_path / 'config.yml').write_bytes(config_data)\n (current_path / 'server_config.yml').write_bytes(server_config_data)\n\n click.echo('Copied default configuration files.')", "def _convert(self):\n\n json_data = xjson.loads(self.jfile_path)\n\n with io.open(self.yfile_path, 'w', encoding='utf8') as f:\n yaml.dump(json_data, f, default_flow_style=False, allow_unicode=True)", "def _write_yaml(self, parameter_set_files):\n text_list = []\n # Construct the output text\n for parameter_set_file, parameter_set in self.parameter_study.groupby(_set_coordinate_key):\n text = yaml.safe_dump(\n parameter_set.squeeze().to_array().to_series().to_dict()\n )\n text_list.append(text)\n # If no output file template is provided, printing to stdout or single file. Prepend set names.\n if not self.provided_output_file_template:\n # If no output file template is provided, printing to stdout or a single file\n # Adjust indentation for syntactically correct YAML.\n prefix = \" \"\n # TODO: split up text prefix change for readability\n text_list = [\"\\n\".join([f\"{prefix}{item}\" for item in text.split('\\n')[:-1]])+\"\\n\" for text in text_list]\n text_list = [f\"{parameter_set_file.name}:\\n{text}\" for parameter_set_file, text in\n zip(parameter_set_files, text_list)]\n output_text = \"\".join(text_list)\n if self.output_file and not self.dryrun:\n self._conditionally_write_yaml(self.output_file, yaml.safe_load(output_text))\n elif self.output_file and self.dryrun:\n sys.stdout.write(f\"{self.output_file.resolve()}\\n{output_text}\")\n else:\n sys.stdout.write(output_text)\n # If output file template is provided, writing to parameter set files\n else:\n for parameter_set_file, text in zip(parameter_set_files, text_list):\n if self.overwrite or not parameter_set_file.is_file():\n # If dry run is specified, print the files that would have been written to stdout\n if self.dryrun:\n sys.stdout.write(f\"{parameter_set_file.resolve()}\\n{text}\")\n else:\n self._conditionally_write_yaml(parameter_set_file, yaml.safe_load(text))", "def save_config(self):\n with open(self.config_file, 'w') as fout:\n json.dump({'name_dict': self._name_dict, 'metric_dict': self._metric_dict, 'credential_path': self.credential_path, 'path_for_worksheet_name': self.path_for_worksheet_name}, fout)", "def create_config(context, target_repoids, debug, test, tasks, on_aws=False):\n context.makedirs(os.path.dirname(DNF_PLUGIN_DATA_PATH), exists_ok=True)\n with context.open(DNF_PLUGIN_DATA_PATH, 'w+') as f:\n config_data = build_plugin_data(\n target_repoids=target_repoids, debug=debug, test=test, tasks=tasks, on_aws=on_aws\n )\n json.dump(config_data, f, sort_keys=True, indent=2)", "def _write_config_file(self, config_data, indent=2):\r\n with open(self.config, 'w') as f:\r\n json.dump(config_data, f, indent=indent)", "def writeConfig(quickLogger, commonDictionary, analysisDictionary = {}, likelihoodDictionary = {}, plotDictionary = {}, curveDictionary = {}):\n \n basename = commonDictionary['base']\n\n config = ConfigParser.RawConfigParser()\n config.read(basename+'.cfg')\n if(not config.has_section('common')):\n config.add_section('common')\n\n for variable, value in commonDictionary.iteritems():\n config.set('common', variable, value)\n quickLogger.info(\"wrote common config to \"+basename+\".cfg.\")\n\n if(analysisDictionary):\n if(config.has_section('quickAnalysis')):\n quickLogger.info(\"quickAnalysis config exists, overwriting...\") \n else:\n config.add_section('quickAnalysis') \n for variable, value in analysisDictionary.iteritems():\n config.set('quickAnalysis', variable, value)\n quickLogger.info(\"wrote quickAnalysis config to \"+basename+\".cfg.\")\n\n if(likelihoodDictionary):\n if(config.has_section('quickLike')):\n quickLogger.info(\"quickLike config exists, overwriting...\") \n else:\n config.add_section('quickLike') \n for variable, value in likelihoodDictionary.iteritems():\n config.set('quickLike', variable, value)\n quickLogger.info(\"wrote quickLikeconfig to \"+basename+\".cfg.\")\n\n if(plotDictionary):\n if(config.has_section('quickPlot')):\n quickLogger.info(\"quickPlot config exists, overwriting...\") \n else:\n config.add_section('quickPlot') \n for variable, value in plotDictionary.iteritems():\n config.set('quickPlot', variable, value)\n quickLogger.info(\"wrote quickPlot config to \"+basename+\".cfg.\")\n\n if(curveDictionary):\n if(config.has_section('quickCurve')):\n quickLogger.info(\"quickCurve config exists, overwriting...\") \n else:\n config.add_section('quickCurve') \n for variable, value in curveDictionary.iteritems():\n config.set('quickCurve', variable, value)\n quickLogger.info(\"wrote quickCurve config to \"+basename+\".cfg.\")\n\n with open(basename+'.cfg', 'wb') as configfile:\n config.write(configfile)", "def save_config(conf, save_path):\n with open(os.path.join(save_path), \"w\") as f:\n f.write(yaml.dump({'param': conf}, default_flow_style=False))", "def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)", "def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)", "def build_configs():", "def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()", "def save_config() -> None:\n with open(_config_file, \"w\", newline=\"\") as config_file:\n json.dump(_config, config_file, indent=4)\n config_file.truncate()", "def save_conf(self, name=None):\n \n if name:\n filename = name\n \n else:\n filename = \"conf_\" + str(self.conf[\"device\"]) + \"_\" + datetime.today().strftime('%Y-%m-%d') + \".txt\"\n \n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename, \"w\") as file:\n json.dump(self.conf, file)", "def _dump_config_to_yaml_file(self,\n sim_config_params = None,\n sim_inputs = None,\n sim_outputs = None,\n sim_other_vars = None,\n is_aux_yaml = False):\n\n if sim_config_params is None:\n sim_config_params = self.sim_config_params\n if sim_inputs is None:\n sim_inputs = self.sim_inputs\n if sim_outputs is None:\n sim_outputs = self.sim_outputs\n if sim_other_vars is None:\n sim_other_vars = self.sim_other_vars\n\n if not is_aux_yaml:\n config_file = self.sim_config_filepath\n else:\n config_file = self.sim_config_filepath.replace(\".yaml\", \"_EDIT.yaml\")\n\n # Prepare set of unused data ( to be shared with user for editing )\n full_sim_config = {\"config_params\": sim_config_params,\n \"inputs\": sim_inputs,\n \"outputs\": sim_outputs,\n \"other_vars\": sim_other_vars}\n full_sim_data = {\"simulation\": full_sim_config}\n\n # Dump configuration to YAML file for later reuse (or user editing if \"is_aux_yaml==True\")\n with open(config_file, 'w') as file:\n dump = yaml.dump(full_sim_data, sort_keys = False, default_flow_style=False)\n file.write( dump )\n\n # Raise error, and avoid continuing using model\n log = \"\\n[FMU Validator] A YAML file with bonsai required fields, as well as available \"\n log += \"sim variables, has been created at: \\n --> '{}'\\n\".format(config_file)\n \n if is_aux_yaml:\n log += \"[FMU Validator] Edit the YAML file, and remove the '_EDIT' nametag to use this model.\\n\"\n \n print(log)\n\n return", "def save(self, config_file: typing.TextIO):\n json.dump(self.to_dict(), config_file, indent=4)", "def write_all(self):\n self.write_config()\n self.write_wq()", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def set_yaml_config(self) -> None:\n\n # LT-248: We can pick Artillery Phase configuration from conf file\n self.yaml_config = {\n \"config\": {\n \"target\": self.get_swagger_url(),\n \"processor\": f\"./{self.OUT_FILE}\",\n \"phases\": [\n {\n \"duration\": settings.DURATION or 1,\n \"arrivalRate\": settings.SPAWN_RATE or 1\n }\n ]\n },\n \"scenarios\": self.task_set.yaml_flow\n }", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def write_hass_config(self):\n if not os.path.isdir(hass_output_dir):\n os.mkdir(hass_output_dir)\n # Add the home assistant template dir to path and import the device's domain\n try:\n sys.path.append(hass_template_dir)\n self.yaml_template = __import__(self.hass_template)\n except:\n print('Device {f_name} domain error, cannot write hass config.'.format(**self))\n return()\n # Go through all types of components for domain\n # (i.e. domain light has components light & sensor)\n for c in self.yaml_template.components:\n if not os.path.isdir(os.path.join(hass_output_dir, c)):\n os.mkdir(os.path.join(hass_output_dir, c))\n with open('{dir}/{c}/{name}_{c}.yaml'.format(dir=hass_output_dir, c = c, **self), 'w') as yamlf:\n yamlf.write(self.yaml_template.components[c].format(**self))", "def write_init_file(name, data, path=\"\"):\n\n # find the resource and exclude it from the file\n data = data.copy()\n\n # Removes the Visa resource if needed\n try:\n data.remove(\"Visa_Resource\")\n except:\n pass\n\n if os.path.isfile(os.path.abspath(str(path) + str(name.split(\".\")[0]) + \".yaml\")):\n\n os.remove(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\"))\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n yaml.dump(data, filename, indent=4)\n close_file(filename)\n\n elif not os.path.isfile(os.path.abspath(path + str(name.split(\".\")[0]) + \".yaml\")):\n\n # directory = path[:len(path) - len(path.split(\"/\")[-1])]\n\n filename, version = create_new_file(\n str(name.split(\".\")[0]), path, os_file=False, suffix=\".yaml\"\n )\n\n yaml.dump(data, filename, indent=4)\n\n close_file(filename)\n\n # Debricated\n # for items in data.items():\n # if type(items[1]) != type([]):\n # string = str(items[0]) + \" = \\\"\" + str(items[1]) + \"\\\"\\n\"\n # os.write(filename, str(string))\n # else:\n # string = str(items[0]) + \" = \\\"\"\n # for i in items[1]:\n # string += str(i).strip(\"'\").strip(\"[\").strip(\"]\") + \",\"\n # string = string[:-1]\n # string += \"\\\"\\n\"\n # print string\n # os.write(filename, string)\n\n else:\n return -1", "def write_config_json(self, config_js: Dict[str, Any]):\n # from_json replaces simulator!\n self.rsimulator.from_json(config_js)\n # new_game replaces state!\n self.new_game()", "def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()", "def run(self):\n make_sure_path_exists(OUT_FOLDER)\n\n if self.config['type'] == 'website':\n make_sure_path_exists(self.config['out_folder'])\n\n\n for file in self.config['bodies']:\n if file['type'] == 'content':\n self.pandoc_file(file)\n if self.config['type'] == 'website':\n shutil.copyfile(file['generated'], os.path.join(self.config['out_folder'], os.path.basename(file['source'])))\n\n if self.config['type'] == 'website':\n return\n\n for file in self.config['abstract']:\n self.pandoc_file(file)\n for file in self.config['summary']:\n self.pandoc_file(file)\n\n template = LATEX_JINJA_ENV.get_template(self.config['template_file'])\n\n logging.info('Rendering template')\n out = template.render(**self.config)\n with open(self.config['name'] + self.get_file_extension(), 'w') as file:\n file.write(out)\n\n if not self.args.pandoc:\n logging.info('Rendering latex')\n self.write()\n if not self.args.fast:\n logging.info('Rendering latex, again')\n self.write() # twice for the toc\n\n logging.info('Done!')", "def _save_cfg_to_file(self, server_id, cfg):\n\t\tfile = self.SettingsFolder + '{}.yml'.format(server_id)\n\t\twith open(file, 'w') as f:\n\t\t\tyaml.dump(cfg, f, default_flow_style=False)", "def save_configuration_overrides(self):\n _logging_location = self.configuration_widgets.logging_location_label.text().replace('Logging Location: ', '')\n _output_location = self.configuration_widgets.integrate_location_label.text().replace('Output Location: ', '')\n _DEFAULT_CONFIG = {\n 'loggingLocation': self.configuration_widgets.logging_location_label.text().replace('Logging Location: ', ''),\n 'outputLocation': self.configuration_widgets.integrate_location_label.text().replace('Output Location: ', ''),\n 'loggingStatus': 'True' if self.configuration_widgets.logging_status_checkBox.isChecked() else 'False'\n }\n\n write_json(_DEFAULT_CONFIG)", "def save(config, filename=None):\n filename = add_directory(filename or 'configure.json')\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory, 0o700)\n with open(filename, \"w\") as f:\n json.dump(config, f, indent=2, sort_keys=True)", "def generate_write_yaml_to_file(file_name):\n def write_yaml(config):\n with open(file_name, 'w+') as fh:\n fh.write(yaml.dump(config))\n return write_yaml", "def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()", "def write_settings(settings_path):\n priority = []\n host = '%s' % args.xnat_host if args.xnat_host else ''\n if args.p_order:\n priority = args.p_order.split(\",\")\n p_mod = '{'\n p_proc = '{'\n for ind, project in enumerate(priority):\n if ind == 0:\n p_mod += '\"%s\": [],\\n' % project\n p_proc += '\"%s\": [],\\n' % project\n else:\n # 12 = length of proj_mod = {\n p_mod += '%s\"%s\": [],\\n' % (' ' * 12, project)\n # 13 = length of proj_proc = {\n p_proc += '%s\"%s\": [],\\n' % (' ' * 13, project)\n p_mod = p_mod[:-2] + '}'\n p_proc = p_proc[:-2] + '}'\n else:\n p_mod = '{\"proj1\": [\"module1\", \"...\", \"moduleN\"], \\\n\"proj2\": [\"module1\", \"...\", \"moduleN\"]}'\n p_proc = '{\"proj1\": [\"processor1\", \"...\", \"processorN\"], \\\n\"proj2\": [\"processor1\", \"...\", \"processorN\"]}'\n\n settings_code = SE_TEMPLATE.format(author=args.author,\n email_addr=args.email,\n name=args.name,\n now=str(datetime.now()),\n q_limit=args.q_limit,\n p_order=priority,\n e_opts=args.e_opts,\n p_mod=p_mod,\n p_proc=p_proc,\n host=host)\n with open(settings_path, 'w') as f_obj:\n f_obj.writelines(settings_code)", "def save_config(self):\n\n if not self.__conf.has_section(self.section):\n self.__conf.add_section(self.section)\n\n for key in self._params:\n val = self._params[key]\n self.__conf.set(self.section, key, val)\n\n with open(self.conf_path, 'w') as f:\n self.__conf.write(f)", "def rewrite(self):\n for f in self.files:\n metadata = dict()\n metadata[\"description\"] = f.metadata.get(\"desc\", \"Unknown\")\n metadata[\"script\"] = os.path.basename(f.filename)\n metadata[\"requires\"] = []\n for package, component in f.requires:\n if package == self.key:\n metadata[\"requires\"].append(\"/\" + component)\n else:\n metadata[\"requires\"].append(package + \"/\" + component)\n metadata[\"provides\"] = [ p[1] for p in f.provides ]\n # Resolve symlinks\n real_filename = os.path.realpath(f.filename)\n LOG.info(\"Editing: \" + real_filename)\n new_filename = f.filename + \".new\"\n new = file(new_filename, \"w\")\n new.write(\"/*\\n---\\n\")\n new.write(yaml.dump(metadata))\n new.write(\"\\n...\\n*/\\n\")\n new.write(file(f.filename).read())\n new.close()\n os.rename(new_filename, real_filename)\n\n package_data = dict()\n package_data[\"name\"] = self.key\n package_data[\"sources\"] = []\n package_data[\"version\"] = \"Unknown\"\n package_data[\"copyright\"] = \"Unknown\"\n package_data[\"description\"] = \"Unknown\"\n target_dir = os.path.dirname(self.scripts_json_filename)\n # package.yml is typically in the parent of the scripts.json dir\n if os.path.basename(target_dir) == \"Source\":\n target_dir = os.path.dirname(target_dir)\n target_filename = os.path.join(target_dir, \"package.yml\")\n for f in self.files:\n common = os.path.commonprefix([target_filename, f.filename])\n source_file = f.filename[len(common):]\n package_data[\"sources\"].append(source_file)\n LOG.info(\"Writing: \" + target_filename)\n out = file(target_filename, \"w\")\n out.write(yaml.dump(package_data))\n out.close()", "def save_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename, \"w\") as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)", "def save_config():\n # Order the load flags using load_keys...\n od_load_flags = OrderedDict()\n for k in load_keys:\n od_load_flags[k] = load_flags[k]\n pawstools.save_cfg(od_load_flags,cfg_file)", "def testWriteConfigurations(self):\n solution_configurations = resources.VSConfigurations()\n solution_project = resources.VSSolutionProject('name', 'filename', 'guid')\n\n file_writer = writers.VS2017SolutionFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer.WriteConfigurations(solution_configurations, [solution_project])\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n # TODO: add ExtensibilityGlobals\n expected_output_data = (\n b'Global\\r\\n'\n b'\\tGlobalSection(SolutionProperties) = preSolution\\r\\n'\n b'\\t\\tHideSolutionNode = FALSE\\r\\n'\n b'\\tEndGlobalSection\\r\\n'\n b'EndGlobal\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def write(self, fname=None):\n fname = fname or self.path\n with open(fname, \"w\") as fl:\n yaml.dump(self._as_dict(), fl)\n self.path = Path(fname)", "def write_scripts(self, out, ref, file1, file2):\n for config in self.configurations:\n program_folder = os.path.join(out, self.out)\n config.write_MuTect2_script(program_folder, self.path2exe, ref, file1, file2)\n return None", "def write_locales(config: Config) -> Config:\n strings_rendered = render_strings(reduce_strings(config.root))\n\n destination_files = []\n\n for key, contents in strings_rendered.items():\n destination_file = os.path.join(\n config.destination,\n \"res\",\n key,\n \"description\",\n \"{}.str\".format(config.name)\n )\n\n contents = \"\\n\".join([COMMENT_C + PREFIX, contents])\n\n assert_directories(destination_file, True)\n\n with open(destination_file, \"w\") as f:\n f.write(contents)\n\n destination_files.append(destination_file)\n\n return config", "def _write_js(output_root, classes):\r\n contents = {}\r\n\r\n js_fragments = set()\r\n for class_ in classes:\r\n module_js = class_.get_javascript()\r\n for filetype in ('coffee', 'js'):\r\n for idx, fragment in enumerate(module_js.get(filetype, [])):\r\n js_fragments.add((idx, filetype, fragment))\r\n\r\n for idx, filetype, fragment in sorted(js_fragments):\r\n filename = \"{idx:0=3d}-{hash}.{type}\".format(\r\n idx=idx,\r\n hash=hashlib.md5(fragment).hexdigest(),\r\n type=filetype)\r\n contents[filename] = fragment\r\n\r\n _write_files(output_root, contents, {'.coffee': '.js'})\r\n\r\n return [output_root / filename for filename in contents.keys()]", "def save_config(self, path):\n if os.path.isdir(path):\n path = os.path.join(path, 'config.json')\n print('Save config to {}'.format(path))\n with open(path, 'w', encoding='utf-8') as w:\n w.write(json.dumps(self.to_dict(), indent=2,\n sort_keys=True))", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def write_schema_files():\n print(\"\\nStarting to generate Provider JSON Schemas...\\n\")\n\n for name, generator in schema_generators().items():\n schema = generator()\n with open(f\"../provider/{name}.json\", \"w\") as schemafile:\n schemafile.write(json.dumps(schema, indent=2))\n print(f\"Wrote {name}.json\")\n\n print(\"\\nFinished generating Provider JSON Schemas\")", "def save_config(config_path: str, data: dict):\n with open(config_path, 'w') as j:\n dump(data,j)", "def configuration(config):\n create_str_dir(config)\n add_skymap(config)\n save_configuration(config)", "def write_to_json(config: dict, filename: str):\n\n with open(filename, 'w', encoding='utf-8') as f:\n mmengine.dump(config, f, file_format='json')", "def to_yaml(self, path):\n logger.info(\"Serializing Experiment to \" + path)\n\n if (callable(self.output_suffix) or callable(self.output_prefix)):\n raise ValueError(\"Cannot serialize function-based suffix/prefix \"\n \"naming schemes as yaml\")\n\n d = self.to_dict()\n\n with open(path, 'w') as yaml_file:\n yaml.dump(d, yaml_file, default_flow_style=False)" ]
[ "0.68511325", "0.66506356", "0.6543113", "0.64862216", "0.6447968", "0.6264091", "0.61741364", "0.6169828", "0.61404073", "0.61322165", "0.6058652", "0.604859", "0.60388285", "0.6029666", "0.60256445", "0.60251164", "0.6015038", "0.60138613", "0.6009589", "0.59702086", "0.5957868", "0.5932503", "0.59307325", "0.5926955", "0.59242827", "0.5921704", "0.5911889", "0.5908241", "0.5903947", "0.58867645", "0.5874287", "0.5856455", "0.58513635", "0.5845609", "0.5842458", "0.58260286", "0.5824816", "0.5809588", "0.58088964", "0.58008933", "0.57995236", "0.57960725", "0.5792436", "0.5789698", "0.57883465", "0.57733554", "0.5772907", "0.5771647", "0.5760573", "0.57584816", "0.57540125", "0.5753418", "0.5741662", "0.5724645", "0.5708248", "0.5697005", "0.5690746", "0.5684634", "0.5683388", "0.56819195", "0.568076", "0.56791735", "0.56743604", "0.5671956", "0.56651944", "0.5663372", "0.5663283", "0.5661166", "0.56459105", "0.56383526", "0.5636618", "0.56226087", "0.56179714", "0.56152123", "0.56121063", "0.56006294", "0.55904496", "0.5585062", "0.55805576", "0.55562574", "0.55555123", "0.5554785", "0.5551516", "0.5534044", "0.55272645", "0.5525579", "0.5523803", "0.55197304", "0.5515373", "0.5511978", "0.5504718", "0.55045986", "0.54924875", "0.5489966", "0.5488955", "0.5481201", "0.54791534", "0.5476553", "0.5473509", "0.54733294" ]
0.5634283
71
Tell if a person if allergic to the given allergen.
def is_allergic_to(self, allergen): return allergen in self.list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_allergen(self, is_allergen):\n\n self._is_allergen = is_allergen", "def in_garden(obj):\n print(\"Searching the garden's random objects\")\n return obj in _random_objects", "def allergies(self, allergies):\n\n self.logger.debug(\"In 'allergies' setter.\")\n\n self._allergies = allergies", "def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False", "def _bot_assigned_bell(self, bell: Bell) -> bool:\n return self._tower.is_bell_assigned_to(bell, self._user_name)", "def is_bothell_student():\n return _is_member('uw_affiliation_bothell-student')", "def satisfies(self, reg):\n ### If no value, there is no need for filtering\n if self.getValues()==['']:\n return True\n affiliation = self.getValues()[0]\n return True if (affiliation == reg.getRepresentationType()[\"organizationRepresentative\"]) else False", "async def get_guardian_email(guardian_id: UUID, angel_name: str) -> str:\n try:\n user = await User.get(id=guardian_id)\n except DoesNotExist:\n return False\n\n angels = await user.fetch_related(\"angels\")\n for angel in angels:\n if angel.name == angel_name:\n return user.email\n return False", "def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def all(x) -> bool:\n pass", "def addAllergies(self):\n if int(self.pid)%100 < 85: # no allergies for ~ 85%\n exclusion = NO_ALLERGY.sub({\n 'exclusion':\"no known allergies\",\n 'exclusion_id':\"160244002\",\n }).done()\n self.data.append(SDMX.sub({'models':exclusion}, escape=False).done())\n else: # Sprinkle in some sulfa allergies\n al = DRUG_CLASS_ALLERGY.sub({\n 'reaction': \"skin rash\",\n 'reaction_id': \"271807003\",\n 'category': \"drug allergy\",\n 'category_id': \"416098002\",\n 'allergen': \"sulfonamide antibacterial\",\n 'allergen_id': \"N0000175503\",\n 'severity': \"mild\",\n 'severity_id': \"255604002\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())\n \n if int(self.pid)%2: # and throw in peanut allergies for every other patient\n al = FOOD_ALLERGY.sub({\n 'reaction': \"anaphylaxis\",\n 'reaction_id': \"39579001\",\n 'category': \"food allergy\",\n 'category_id': \"414285001\",\n 'allergen': \"peanut\",\n 'allergen_id': \"QE1QX6B99R\",\n 'severity': \"severe\",\n 'severity_id': \"24484000\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())", "def feasible(individual):\n val=0;\n for i in individual:\n if viajes[val][6]==True and False==vehiculos_esp[i]:\n return False\n val+=1\n return True", "def check_any_light_on(bridge):\n for i,group in bridge.get_group().items():\n if group['state']['any_on']:\n return True\n return False", "def check_if_group_member(self, organism):\n for key, item in self.phen_dict.items():\n if organism in item:\n self.declare(Organism(name=key))", "def allele_semblable(self, mere):\n Similarite = 0\n for Allele in range(3):\n if self.allele[Allele] in mere.allele and self.allele[Allele] != 0.0:\n Similarite = Similarite + 1\n if Similarite == 2:\n self.informatif = 2", "def is_grad_student():\n return _is_member('uw_affiliation_graduate')", "def is_ligand(cls):\n return LigandComponent.residue_id == cls.residue_id", "def bust(person):\n if person.total > GOAL_TOTAL() and person.aceCount == 0:\n return True\n elif person.total > GOAL_TOTAL() and person.aceCount > 0:\n adjust_ace(person)\n return person.total > GOAL_TOTAL()\n else: # person.total <= GOAL_TOTAL()\n return False", "def enter_night_club(individual):\n if individual.age > LEGAL_DRINKING_AGE:\n print(\"Allowed to enter.\")\n else:\n print(\"Enterance of minors is denited.\")", "def is_monster_lord(self):\n return True", "def wife(backpack):\n print(\"\\nYour wife says: \")\n if \"corn\" in backpack:\n if backpack['corn'][0] < 20:\n print(\"-You need to gather 20 corn cob so get back to work! \")\n enter()\n else:\n print(\"-Ahh you are a bastard but I know your dream...\\nNow go to city and buy your ticket my love :* \")\n enter()\n return True # because of this we can change lvl\n if \"corn\" not in backpack:\n print(\"-Where have u been u f...... drunkard, \\nget back to work and collect 20 corn cobs! \")\n enter()", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def any(self) -> bool:", "def is_all(variable):\n\n if isinstance(variable, str):\n return variable in ['all', 'All', 'ALL']\n\n return False", "def __contains__(self, ngram):\n return ngram in self.root", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def check_other(seq_iter):\n\treturn any(filter(has_abba, seq_iter))", "def check(iters):\n\treturn check_hypernet(iters[0]) and check_other(iters[1])", "def isELG(gflux=None, rflux=None, zflux=None, w1flux=None, w2flux=None,\n gfiberflux=None, gsnr=None, rsnr=None, zsnr=None,\n gnobs=None, rnobs=None, znobs=None,\n maskbits=None, south=True, primary=None):\n if primary is None:\n primary = np.ones_like(rflux, dtype='?')\n\n nomask = notinELG_mask(\n maskbits=maskbits, gsnr=gsnr, rsnr=rsnr, zsnr=zsnr,\n gnobs=gnobs, rnobs=rnobs, znobs=znobs, primary=primary)\n\n elglop, elghip = isELG_colors(gflux=gflux, rflux=rflux, zflux=zflux,\n w1flux=w1flux, w2flux=w2flux,\n gfiberflux=gfiberflux, south=south,\n primary=primary)\n\n return elglop & nomask, elghip & nomask", "def an_check(self):\n\t\tfor filles in self.xelt:\n\t\t\t# parcours rapide des branches niveau 1\n\t\t\tif search(r'analytic$', filles.tag):\n\t\t\t\treturn True\n\t\treturn False", "def est_fruit(self): \n if self.age > 20 and self.age <31 and (self.fecondee==True):\n return True\n else:\n return False", "def all_manslaughter(x): \n for elem in x:\n if elem == 'Manslaughter' or elem == 'Voluntary Manslaughter':\n return 1\n return 0", "def all_enter(self):\n return self.num_enters == self.num_workers", "def display_allergens(self):\n return ', '.join(allergens.name for allergens in self.allergens.all()[:3])", "def is_all_visited(self):\n cond = [node.visited if node and node.belongs and node.valid else True for node in self.nodes.flatten()]\n return all(cond)", "def compute_allergens(foods):\n\n # Create a dictionary mapping allergens to lists\n # of ingredients that may contain that allergen\n allergen_foods = {}\n for ingredients, allergens in foods:\n for allergen in allergens:\n allergen_foods.setdefault(allergen, []).append(set(ingredients))\n\n # For each allergen, compute the intersection of the lists\n # computed above. This will give us the set of ingredienta\n # that could contain that allergen\n candidate_ingredients = {}\n for allergen in allergen_foods:\n candidate_ingredients[allergen] = set.intersection(*allergen_foods[allergen])\n\n # Repeatedly find an allergen that can only be matched to a single\n # ingredient, and remove that ingredient from the list of candidate\n # ingredients for all the other allergens.\n allergens = {}\n while len(candidate_ingredients) > 0:\n\n for single_allergen, cings in candidate_ingredients.items():\n if len(cings) == 1:\n ingredient = cings.pop()\n allergens[single_allergen] = ingredient\n break\n\n del candidate_ingredients[single_allergen] \n\n for allergen in candidate_ingredients:\n if allergen != single_allergen:\n ingredient = allergens[single_allergen]\n candidate_ingredients[allergen].discard(ingredient)\n\n return allergens", "def check_rpt_status(self) -> bool:\n return self.allele == self.fasta_alt", "def __contains__(self, ngram):\n return ngram in self._ngrams", "def deformer_check(obj, *args):\n deformers = rig.get_deformers(obj)\n if deformers:\n cmds.confirmDialog(title='Deformer Alert!',\n message='Found some deformers on {0}.\\nYou may want to put the softmod\\n early in the '\n 'input list\\n or check \"front of chain\"'.format(obj),\n button=['OK'], defaultButton='OK', cancelButton='OK', dismissString='OK')", "def notifyer(notifyer):\n\n if '@' in notifyer:\n return True\n else:\n return False", "def check_hypernet(seq_iter):\n\treturn not any(filter(has_abba, seq_iter))", "def is_ringing(self) -> bool:", "def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True", "def testHealthAssessArthralgia(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"arthralgia\")\n\n self.util.boolPropertyTest(self, attr, \"arthralgia\")", "def isResidueAssigned(residue):\n\n for atom in residue.atoms:\n if atom.atomSet:\n if atom.atomSet.resonanceSets:\n return True\n \n return False", "def is_gold(self):\n levels_range = [\n 20, # Normal members\n 30, # Gold members.\n 31, # Platinum \n 32, # Builder\n 33, # Contributor \n 35, # Janitor \n 40, # Moderator \n 50 # Admin\n ]\n\n if any([self.user_list(name=self.username, level=i) for i in levels_range[1:]]):\n return True\n else:\n return False", "def send_letter_everyone(d):\n print(\"Letters have been sent to all the donors!!!\")\n d.send_letter_everyone()", "def luck_check(chance):\n return randint(0, 100) < chance", "def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False", "def is_offensive(drug_name, bad_words):\n\n for bad_word in bad_words:\n if bad_word in drug_name:\n return True\n return False", "def knownTo(self, observer, asName):\n return (asName == \"rune\")", "def test_can_have_a_ring():\n ben = Hobbit(\"Ben\")\n danielle = Hobbit(\"Danielle\")\n stacy = Hobbit(\"Stacy\")\n assert ben.has_ring() is False\n assert stacy.has_ring() is False\n assert danielle.has_ring() is True", "def some(self, func=bool):\n for i in self._:\n if func(i):\n return True\n return False", "def is_undergrad_student():\n return _is_member('uw_affiliation_undergraduate')", "def already_booked(slots, attendees, user_name):\n already_joined = False\n for i in attendees:\n if i[\"email\"] == user_name+'@student.wethinkcode.co.za':\n already_joined = True\n\n if already_joined == True:\n return False\n else:\n return True", "def check_bt(donor, recipient):\n\n donor_int = convert_input_to_int(donor)\n recipient_int = convert_input_to_int(recipient)\n\n result_pac = _particular_antigen_comp(donor_int, recipient_int)\n \n # Resurn False if there is problem\n if -1 in result_pac:\n result = False\n else:\n result = True\n \n return result", "def robbery(x): \n for elem in x:\n if elem == 'Robbery':\n return 1\n return 0", "def all_bees_lowered_flag(self):\n pos, com, success = self.perception\n return all(map(lambda x: x[1][\"flag\"] == 0, com))", "def __contains__(self, arg):\r\n\r\n return arg in self.grfx[0]", "def has_lgr( self , lgr_name ):\n if self._has_lgr( lgr_name ):\n return True\n else:\n return False", "def is_satisfied_by(self, val):", "def hgmallpass_evaluation(input_generator,branches,nlfunction,iden_method,Plot,reference=None):\n input_signal = input_generator.GetOutput()\n allpass = sumpf.modules.ImpulseGenerator(samplingrate=input_signal.GetSamplingRate(),length=len(input_signal)).GetSignal()\n filter_spec_tofind = [allpass,]*branches\n ref_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nlsp.nl_branches(nlfunction,branches),\n filter_irs=filter_spec_tofind,\n max_harmonics=range(1,branches+1))\n\n found_filter_spec, nl_functions = iden_method(input_generator,ref_nlsystem.GetOutput(),branches)\n iden_nlsystem = nlsp.HammersteinGroupModel_up(input_signal=input_signal,\n nonlinear_functions=nl_functions,\n filter_irs=found_filter_spec,\n max_harmonics=range(1,branches+1))\n if reference is not None:\n reference = nlsp.change_length_signal(reference,length=len(input_signal))\n ref_nlsystem.SetInput(reference)\n iden_nlsystem.SetInput(reference)\n if Plot is True:\n plot.relabelandplot(sumpf.modules.FourierTransform(ref_nlsystem.GetOutput()).GetSpectrum(),\"Reference System\",show=False)\n plot.relabelandplot(sumpf.modules.FourierTransform(iden_nlsystem.GetOutput()).GetSpectrum(),\"Identified System\",show=True)\n print \"SNR between Reference and Identified output with all pass filters: %r\" %nlsp.snr(ref_nlsystem.GetOutput(),\n iden_nlsystem.GetOutput())", "def all_seen_fun(self):\n return self.get_all_j(self.id) and \\\n (set(self.get_fd_part_j(self.id)) <= (self.all_seen | {self.id}))", "def is_farmers_agent(self, agent_username, farmers_username):\n # Check if it is a real agent and a real farmer\n agent_check = self.check_agent_by_role(agent_username, \"LOAN_OFFICER\")\n farmer_check = self.check_agent_by_role(farmers_username, \"FARMER\")\n if not (agent_check and farmer_check):\n return False\n\n # Check if the agent is incharge of the farmer\n if farmer_check[\"officer_incharge\"] == agent_check[\"officer_id\"]:\n payload = {}\n payload[\"farmer_data\"] = farmer_check\n payload[\"officer_data\"] = agent_check\n return payload\n return False", "def eval_genome_eer(g, conf, batch, backprop=False, use_gate=True):\n\n # inputs: batch_size x t x bins\n # outputs: batch_size\n inputs, targets = batch\n # inputs: t x batch_size x bins\n inputs = inputs.transpose(0, 1)\n\n net = neat_local.nn.RecurrentNet.create(g, conf, device=\"cpu\", dtype=torch.float32)\n assert not backprop\n net.reset(len(targets))\n\n contribution = torch.zeros(len(targets))\n norm = torch.zeros(len(targets))\n for input_t in inputs:\n # input_t: batch_size x bins\n\n xo = net.activate(input_t) # batch_size x 2\n score = xo[:, 1]\n confidence = xo[:, 0] if use_gate else torch.ones_like(score)\n contribution += score * confidence # batch_size\n norm += confidence # batch_size\n\n jitter = 1e-8\n prediction = contribution / (norm + jitter) # batch_size\n\n target_scores = prediction[targets == 1].numpy() # select with mask when target == 1\n non_target_scores = prediction[targets == 0].numpy() # select with mask when target == 0\n\n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n\n return 2 * (.5 - eer)", "def is_all_day(self):\n return self.__is_all_day", "def constrain_everyone(self, value: str) -> bool:\n return self._constrain_presence(\"everyone\", value)", "def allready(antReady) :\n return numNotready(antReady) == 0", "def isspeech(phone):\n return phone not in OTHERS", "def evaluate(self,gene):\n return prng.choice(gene.allele_set)", "def all(self, func=bool):\n return all(map(func, self._))", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def forall(seq,cond):\n for x in seq:\n if not cond(x): return False\n return True", "def is_every(self):\n return self._tag == 'every'", "def isProteic(self):\n from MolKit.PDBresidueNames import AAnames\n\n self.AARes = [x for x in self.residues if x.type in AAnames]\n\n water = [x for x in self.residues if x.type in ['HOH', 'WAT']]\n\n if len(self.AARes) and len(self.AARes)+len(water) == len(self.residues):\n return True\n else:\n return False", "def gallbladder(self, gallbladder):\n\n self.logger.debug(\"In 'gallbladder' setter.\")\n\n self._gallbladder = gallbladder", "def __is_belong_to_me(self, iceberg):\n return iceberg.owner.equals(self.__game.get_myself())", "def is_indeed(self) -> bool:\n return self.mukluk > 5", "def done(self, streetlearn):\n return not bool(self._coin_pano_id_set)", "def is_seattle_student():\n return _is_member('uw_affiliation_seattle-student')", "def someone_home(self) -> bool:\n return self._someone_home", "def isGoal(self, state):\n x, y = state\n return (x, y) in self.food.asList()", "def lucky_enough(luck=0):\n return random.randint(0, 99) < luck", "def have_mister(self):\n return bool(self.mister)", "def is_building_eye(self):\r\n pass", "def isGoalState(self, state):\n wds = get_words(state)\n # checks if every word in corpus - USELESS/Possible damage\n # for i in range(len(wds)):\n # if (self.bigramCost(wds[i], self.not_word) >= self.threshold):\n # return False\n for i in range(len(wds)):\n if (wds[i] not in self.fills[i]):\n return False\n return True", "def __call__(self, possibility: object) -> bool:\n if {truth(possibility) for truth in self.truths} == {True}:\n return True\n else:\n return False", "def is_done(self):\n dead = not any(agent.is_alive() for agent in self.agents)\n winner = any(agent.is_winner() for agent in self.agents)\n if dead:\n print 'Your Agent Died'\n return dead\n elif winner:\n print 'Gold Found!\\nOozplorer wins!'\n return winner\n return False", "def sjekkTallISekvens(listeTall, listeSekvens):\n for tall in listeSekvens:\n if tall not in listeTall:\n return False\n return True", "def is_fallen(self):\n orientation = self.minitaur_env.minitaur.GetBaseOrientation()\n rot_mat = self.minitaur_env._pybullet_client.getMatrixFromQuaternion(orientation)\n local_up = rot_mat[6:]\n return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.3)", "def echo(self, foetus):\n Allele_semblable = 0\n for Allele in range(3):\n if self.allele[Allele] in foetus.allele and self.allele[Allele] != 0.0:\n Allele_semblable = Allele\n if Allele_semblable == 0:\n Allele_Echo = self.allele[Allele_semblable + 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3\n elif Allele_semblable == 1:\n Allele_Echo = self.allele[Allele_semblable - 1]\n for Alleles_foetus in range(3):\n if foetus.allele[Alleles_foetus] - 1 == Allele_Echo:\n foetus.informatif = 3", "def contains(self, gi):\n if gi is None:\n return False\n if gi in self.gradual_items:\n return True\n return False", "def __le__(self, other):\n return isinstance(other, GPred) and (self.name == '?' or self.name == other.name)", "def eligible(CGPA:float, Year:int, program:str) -> bool:\n return CGPA >= 2 and Year == (2 or 3) and program == \"CS\"", "def isInAera(self):\n opp = self.get_opponent\n for players in opp:\n if (players.distance(self.playerPos)<10):\n return True\n return False" ]
[ "0.6099033", "0.5558612", "0.5521234", "0.5301362", "0.5294011", "0.5216652", "0.5088434", "0.507583", "0.5070816", "0.50542706", "0.5041537", "0.50312674", "0.49993923", "0.49899283", "0.49749395", "0.49660623", "0.49616873", "0.49227342", "0.49170405", "0.49064264", "0.48977208", "0.48425022", "0.48294932", "0.48152772", "0.48003718", "0.47859085", "0.47855812", "0.4783256", "0.478226", "0.4775519", "0.4773847", "0.47733495", "0.47604233", "0.47344443", "0.4731618", "0.47158363", "0.46895173", "0.46850643", "0.4681282", "0.4676332", "0.46662062", "0.4655639", "0.46517938", "0.46509954", "0.46278223", "0.4626545", "0.46254766", "0.46179903", "0.46007225", "0.45958096", "0.45942637", "0.45921376", "0.45866236", "0.45839074", "0.45687172", "0.45673323", "0.4545003", "0.4542103", "0.453447", "0.45322692", "0.45319396", "0.45316303", "0.45298207", "0.45224497", "0.45210502", "0.4518466", "0.45177415", "0.45160276", "0.45147657", "0.45060903", "0.44918287", "0.44901705", "0.44842628", "0.44819298", "0.4479697", "0.44770727", "0.447639", "0.447639", "0.44732037", "0.44727373", "0.4467713", "0.44587576", "0.44586408", "0.44540873", "0.44528636", "0.4450165", "0.44446155", "0.44354212", "0.4433912", "0.4433395", "0.44299892", "0.44287914", "0.44274122", "0.44269377", "0.4425326", "0.44193587", "0.44167277", "0.44138247", "0.44069698", "0.4404663" ]
0.77161974
0
Initialise an allergy storage with the given allergy score.
def __init__(self, score): self.score = score self.list = [candidate[0] for candidate in self._allergens if candidate[1] & self.score]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _krls_init(self, iALDth=1e-4, iMaxDict=1e3):\n \n dAldKRLS = {} # Initialize dictionary with data for aldkrls algorithm\n \n # Store all the parameters in the dictionary\n dAldKRLS['iALDth'] = iALDth; # ALD threshold\n dAldKRLS['iMaxDt'] = iMaxDict; # Maximum size of the dictionary\n \n dAldKRLS['bInit'] = 0 # Clear 'Initialization done' flag\n \n return dAldKRLS", "def __init__(self, score = 0):\n self.score = score", "def __init__(self, score=0):\n self.score = score", "def __init__(self, db_path):\n self.db_path = db_path\n self.split_ratio = 0\n\n self.training_subjects = []\n self.development_subjects = []\n self.validation_subjects = []\n self.evaluation_subjects = []\n self.testing_subjects = []\n\n self.n_train = 0\n self.n_develop = 0\n self.n_valid = 0\n self.n_eval = 0\n self.n_test = 0", "def __init__(self):\n self._db = db\n # Connect to DB\n self._db.connect()\n # Create tables\n self._db.create_tables([Teachers, Parents, Tutors, Students, Homework, Groups, StudentsGroups, Courses])\n # Create filling entries\n self.__create_dummies()\n self._db.close()", "def __init__(self, fief):\n self.surplus = False\n self.wealth = 0\n self.upkeep = 0\n self.store = fief", "def __init__(self, scoring_function=None, partition_index=0):\n self.trained_data = dict()\n self.scoring_function = scoring_function or diff_score\n self.last_pred = []\n self.partitions = dict()\n self.partition_index = partition_index", "def __init__(self, name, score):\r\n self.name = name\r\n self.score = float(score)", "def __init__(self, idy, name):\n self.idy = idy\n self.name = name\n self.active = True\n self.grades = {}", "def __init__(self):\r\n self.score = 0", "def at_object_creation(self):\n self.db.max_hp = 100 # Set maximum HP to 100\n self.db.hp = self.db.max_hp # Set current HP to maximum\n self.db.spells_known = [] # Set empty spells known list\n self.db.max_mp = 20 # Set maximum MP to 20\n self.db.mp = self.db.max_mp # Set current MP to maximum", "def __init__(self):\n self._yahtzee_count = 0\n self._aces = ScoreBox('Aces')\n self._twos = ScoreBox('Twos')\n self._threes = ScoreBox('Threes')\n self._fours = ScoreBox('Fours')\n self._fives = ScoreBox('Fives')\n self._sixes = ScoreBox('Sixes')\n self._upperSum = 0\n self._upperTotal = 0\n self._bonus = 0\n self._threeOfAKind = ScoreBox('3 of a Kind')\n self._fourOfAKind = ScoreBox('4 of a Kind')\n self._fullHouse = ScoreBox('Full House')\n self._smallStraight = ScoreBox('Small Straight')\n self._largeStraight = ScoreBox('Large Straight')\n self._yahtzee = ScoreBox('Yahtzee')\n self._chance = ScoreBox('Chance')\n self._lowerTotal = 0\n self._grandTotal = 0\n self._upper_section = [self._aces, self._twos, self._threes, self._fours, self._fives, self._sixes]\n self._lower_section = [self._threeOfAKind, self._fourOfAKind, self._fullHouse, self._smallStraight, self._largeStraight, self._yahtzee, self._chance]\n self._boxes = [self._aces, self._twos, self._threes, self._fours, self._fives, self._sixes, self._threeOfAKind,\n self._fourOfAKind, self._fullHouse, self._smallStraight, self._largeStraight, self._yahtzee, self._chance\n ]", "def __init__(self):\n self.TECRDB_compounds_data_dict = {}\n self.TECRDB_compounds_pH7_species_id_dict = {}\n self.TECRDB_compounds_least_H_sid_dict = {}\n self.get_TECRDB_compounds_data()", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self, name, address, phone, badge, salary):\r\n\r\n self.name = name\r\n self.address = address\r\n self.phone = phone\r\n self.badge = badge\r\n self.salary = salary", "def __init__(self, eid: str, name: str, weekly_salary: int):\n pay.SalaryPolicy.__init__(self, weekly_salary)\n super().__init__(eid, name)", "def initialize(self):\r\n self.bucket_array.initialize()", "def _init_global_value_by_governance_score(self):\n context: 'IconScoreContext' = self._context_factory.create(IconScoreContextType.QUERY)\n # Clarifies that This Context does not count steps\n context.step_counter = None\n\n try:\n self._push_context(context)\n # Gets the governance SCORE\n governance_score: 'Governance' = context.get_icon_score(GOVERNANCE_SCORE_ADDRESS)\n if governance_score is None:\n raise ServerErrorException(f'governance_score is None')\n\n # Gets the step price if the fee flag is on\n # and set to the counter factory\n if context.is_service_flag_on(IconServiceFlag.fee):\n step_price = governance_score.getStepPrice()\n else:\n step_price = 0\n\n self._step_counter_factory.set_step_price(step_price)\n\n # Gets the step costs and set to the counter factory\n step_costs = governance_score.getStepCosts()\n\n for key, value in step_costs.items():\n try:\n self._step_counter_factory.set_step_cost(\n StepType(key), value)\n except ValueError:\n # Pass the unknown step type\n pass\n\n # Gets the max step limit and keep into the counter factory\n self._step_counter_factory.set_max_step_limit(\n IconScoreContextType.INVOKE,\n governance_score.getMaxStepLimit(\"invoke\"))\n self._step_counter_factory.set_max_step_limit(\n IconScoreContextType.QUERY,\n governance_score.getMaxStepLimit(\"query\"))\n\n finally:\n self._pop_context()\n\n self._context_factory.destroy(context)", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def __init__(self, low_score=0, high_score=0):\n self.low_score = low_score\n self.high_score = high_score", "def _initialize(self):\n for s in Subsidiary.all():\n self.__create_stock(s)\n self.get_stock()", "def run_compute_weighted_score_exterior_amenities(self, survey, home_score):\n if survey.wants_patio:\n self.handle_weighted_question('patio_balcony', survey.patio_weight, home_score, home_score.home.patio_balcony)\n if survey.wants_pool:\n self.handle_weighted_question('pool', survey.pool_weight, home_score, home_score.home.pool)\n if survey.wants_gym:\n self.handle_weighted_question('gym', survey.gym_weight, home_score, home_score.home.gym)\n if survey.wants_storage:\n self.handle_weighted_question('storage', survey.storage_weight, home_score, home_score.home.storage)", "def __init__(self, name, fuel, reliability):\n super().__init__(name, fuel)\n self.reliability = reliability", "def run_compute_weighted_score_interior_amenities(self, survey, home_score):\n if survey.wants_furnished:\n self.handle_weighted_question('furnished', survey.furnished_weight, home_score,\n home_score.home.furnished)\n if survey.wants_hardwood_floors:\n self.handle_weighted_question('hardwood_floors', survey.hardwood_floors_weight,\n home_score, home_score.home.hardwood_floors)\n if survey.wants_AC:\n self.handle_weighted_question('air_conditioning', survey.AC_weight, home_score,\n home_score.home.air_conditioning)\n if survey.wants_dishwasher:\n self.handle_weighted_question('dishwasher', survey.dishwasher_weight, home_score,\n home_score.home.dishwasher)\n if survey.wants_dogs:\n self.handle_weighted_question('dogs_allowed', survey.dog_weight, home_score, home_score.home.dogs_allowed,\n can_eliminate=True)\n if survey.wants_cats:\n self.handle_weighted_question('cats_allowed', survey.cat_weight, home_score, home_score.home.cats_allowed,\n can_eliminate=True)", "def __init__(self, objective):\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, score=None, id=None, external_id=None, unique_key=None, hash_key=None, description=None, narrative=None, regulatory_body=None, customer=None, version=None, regulatory_body_approved=None, regulatory_body_approved_by=None, direction=None, guideline_body=None, guideline_version=None, clinical_significance=None, biomarker_class=None, expression=None, sources=None, no_therapy_available=None, therapeutic_context=None, tier=None, tier_explanation=None, criteria_unmet=None, criteria_met=None, classifications=None, prevalence=None, variant_info=None): # noqa: E501 # noqa: E501\n self._score = None\n self._id = None\n self._external_id = None\n self._unique_key = None\n self._hash_key = None\n self._description = None\n self._narrative = None\n self._regulatory_body = None\n self._customer = None\n self._version = None\n self._regulatory_body_approved = None\n self._regulatory_body_approved_by = None\n self._direction = None\n self._guideline_body = None\n self._guideline_version = None\n self._clinical_significance = None\n self._biomarker_class = None\n self._expression = None\n self._sources = None\n self._no_therapy_available = None\n self._therapeutic_context = None\n self._tier = None\n self._tier_explanation = None\n self._criteria_unmet = None\n self._criteria_met = None\n self._classifications = None\n self._prevalence = None\n self._variant_info = None\n self.discriminator = None\n if score is not None:\n self.score = score\n if id is not None:\n self.id = id\n if external_id is not None:\n self.external_id = external_id\n if unique_key is not None:\n self.unique_key = unique_key\n if hash_key is not None:\n self.hash_key = hash_key\n if description is not None:\n self.description = description\n if narrative is not None:\n self.narrative = narrative\n self.regulatory_body = regulatory_body\n self.customer = customer\n self.version = version\n if regulatory_body_approved is not None:\n self.regulatory_body_approved = regulatory_body_approved\n if regulatory_body_approved_by is not None:\n self.regulatory_body_approved_by = regulatory_body_approved_by\n if direction is not None:\n self.direction = direction\n if guideline_body is not None:\n self.guideline_body = guideline_body\n if guideline_version is not None:\n self.guideline_version = guideline_version\n if clinical_significance is not None:\n self.clinical_significance = clinical_significance\n if biomarker_class is not None:\n self.biomarker_class = biomarker_class\n if expression is not None:\n self.expression = expression\n if sources is not None:\n self.sources = sources\n if no_therapy_available is not None:\n self.no_therapy_available = no_therapy_available\n if therapeutic_context is not None:\n self.therapeutic_context = therapeutic_context\n if tier is not None:\n self.tier = tier\n if tier_explanation is not None:\n self.tier_explanation = tier_explanation\n if criteria_unmet is not None:\n self.criteria_unmet = criteria_unmet\n if criteria_met is not None:\n self.criteria_met = criteria_met\n if classifications is not None:\n self.classifications = classifications\n if prevalence is not None:\n self.prevalence = prevalence\n if variant_info is not None:\n self.variant_info = variant_info", "def addAllergies(self):\n if int(self.pid)%100 < 85: # no allergies for ~ 85%\n exclusion = NO_ALLERGY.sub({\n 'exclusion':\"no known allergies\",\n 'exclusion_id':\"160244002\",\n }).done()\n self.data.append(SDMX.sub({'models':exclusion}, escape=False).done())\n else: # Sprinkle in some sulfa allergies\n al = DRUG_CLASS_ALLERGY.sub({\n 'reaction': \"skin rash\",\n 'reaction_id': \"271807003\",\n 'category': \"drug allergy\",\n 'category_id': \"416098002\",\n 'allergen': \"sulfonamide antibacterial\",\n 'allergen_id': \"N0000175503\",\n 'severity': \"mild\",\n 'severity_id': \"255604002\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())\n \n if int(self.pid)%2: # and throw in peanut allergies for every other patient\n al = FOOD_ALLERGY.sub({\n 'reaction': \"anaphylaxis\",\n 'reaction_id': \"39579001\",\n 'category': \"food allergy\",\n 'category_id': \"414285001\",\n 'allergen': \"peanut\",\n 'allergen_id': \"QE1QX6B99R\",\n 'severity': \"severe\",\n 'severity_id': \"24484000\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())", "def weighted_setup(self):\r\n\r\n grading_policy = {\r\n \"GRADER\": [{\r\n \"type\": \"Homework\",\r\n \"min_count\": 1,\r\n \"drop_count\": 0,\r\n \"short_label\": \"HW\",\r\n \"weight\": 0.25\r\n }, {\r\n \"type\": \"Final\",\r\n \"name\": \"Final Section\",\r\n \"short_label\": \"Final\",\r\n \"weight\": 0.75\r\n }]\r\n }\r\n self.add_grading_policy(grading_policy)\r\n\r\n # set up a structure of 1 homework and 1 final\r\n self.homework = self.add_graded_section_to_course('homework')\r\n self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')\r\n self.final = self.add_graded_section_to_course('Final Section', 'Final')\r\n self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')", "def __init__(self, *, guesser=False, buzzer=False, split='train', class2index=None):\n\t\tsuper().__init__()\n\t\tif not guesser and not buzzer:\n\t\t\traise ValueError('Requesting a dataset which produces neither guesser or buzzer training data is invalid')\n\n\t\tif guesser and buzzer:\n\t\t\tprint('Using QuizBowlDataset with guesser and buzzer training data, make sure you know what you are doing!')\n\n\t\tself.db = QantaDatabase(split, class2index)\n\t\tself.guesser = guesser\n\t\tself.buzzer = buzzer", "def initialize():\r\n\r\n score['text'] = 'Score: 0'\r\n correct_words.delete(0, 'end')", "def __init__(self, name, address, phone, credit_score):\r\n\r\n self.name = name\r\n self.address = address\r\n self.phone = phone\r\n self.credit_score = credit_score", "def __init__(self,student_id,lname,fname, major='Computer Science',gpa='0.0'):\n super().__init__(lname,fname) # Call init on parent class\n self._student_id = student_id\n self._major = major\n self._gpa = gpa", "def __init__(self, db):\n\n # Add database object\n self.db = db\n\n # Initialize a dictionary to store maps of meg data (oscillation bands)\n self.meg_maps = dict()\n self.bands = dict()\n\n # Initialize a dictionary to store exponent map\n self.exponent_map = dict({'Exponents': np.array([])})\n\n # Initialize booleans that keep track of what is loaded\n self.oscs_loaded = False\n self.exponents_loaded = False", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def initialize(self, scores, word_seq, path):\n for label in self.label_type_map:\n label_prob = self.get_ngram_prob([label])\n lexical_generation_prob = self.get_lexical_generation_prob(word_seq[0], label)\n # scores[0][label] = label_prob * lexical_generation_prob\n scores[0][label] = math.log(lexical_generation_prob)\n path[label] = [label]", "def __int__(self, storage):\r\n self._storage = storage\r\n self.tables={}", "def __init__(self, sampling):\n # check if the equi7grid.data have been loaded successfully\n if Equi7Grid._static_data is None:\n raise ValueError(\"cannot load Equi7Grid ancillary data!\")\n # check if sampling is allowed\n if sampling not in Equi7Grid._static_sampling:\n raise ValueError(\"Sampling {}m is not supported!\".format(sampling))\n\n # initializing\n super(Equi7Grid, self).__init__(sampling, tag='Equi7')\n self.core.projection = 'multiple'", "def __init__(self, game):\n self.rooms = self.load_rooms(f\"data/{game}Rooms.txt\")\n self.current_room = self.rooms[1]\n # use self.over to determine if the game if over\n self.over = 0\n self.load_items(f\"data/{game}Items.txt\")\n self.inventory = Inventory()\n # synonyms\n self.synonyms = {}\n self.load_synonyms(\"data/SmallSynonyms.txt\")", "def __init__(self, name='', fuel=0, reliability=0.0):\n super().__init__(name, fuel)\n self.reliability = reliability", "def init_game(self):\n self.blind_manager = BlindManager(hands_per_level=10,\n bots=self.living_bot_names())", "def __init__(self, warehouse):\n assert isinstance(warehouse, Warehouse), 'Please supply existing warehouse.'\n assert Level.levelID <= warehouse.get_levelnr()\n self.levelID = 'P' + str(Level.levelID)\n Level.levelID += 1\n self.home = warehouse\n self.shelves = {}\n warehouse.levels[self] = self.shelves", "def __init__(self, name, sdTable,vars=None, basis=None, uptoDeg=None):\n super(FreeAlgebras,self).__init__(name,sdTable)\n self.__vars = vars\n self.__basis = basis\n self.__uptoDeg = uptoDeg", "def __init__(self, env: BaseAviary):\n self.gravity = env.GRAVITY\n \"\"\"float: gravity, the product of the drone's mass M by acc g.\"\"\"\n self.timestep = env.TIMESTEP\n \"\"\"float: simulation and control timestep.\"\"\"\n self.kf_coeff = env.KF\n \"\"\"float: RPMs to force coefficient.\"\"\"\n self.km_coeff = env.KM\n \"\"\"float: RPMs to torque coefficient.\"\"\"\n ############################################################\n ############################################################\n #### HOMEWORK CODE (START) #################################\n ############################################################\n ############################################################\n self.p_coeff_position = None\n \"\"\"proportional coefficient(s) for position control.\"\"\"\n self.i_coeff_position = None\n \"\"\"integral coefficient(s) for position control.\"\"\"\n self.d_coeff_position = None\n \"\"\"derivative coefficient(s) for position control.\"\"\"\n self.p_coeff_attitude = None\n \"\"\"proportional coefficient(s) for attitude control.\"\"\"\n self.i_coeff_attitude = None\n \"\"\"integral coefficient(s) for attitude control.\"\"\"\n self.d_coeff_attitude = None\n \"\"\"derivative coefficient(s) for attitude control.\"\"\"\n ############################################################\n ############################################################\n #### HOMEWORK CODE (END) ###################################\n ############################################################\n ############################################################\n self.reset()", "def __init__(self, times, hardness):\n self.times = times\n self.hardness = hardness\n self.questions = [\n self.Question(hardness)\n for _ in range(self.times)\n ]\n self.score = 0\n self.score_step = TOTAL_SCORE / self.times", "def __init__(self, agi):\n self.agi = agi\n self.agi.status = \"NEW\"\n self.context = self.agi.variables['agi_context']\n ivrlib.__init__(self)\n self.initLogger()\n self.agi.onClose().addErrback(self.onHangup) #register a callback to clean up on Hangup.\n self.dbtries = 0\n self.entries=0\n self.agi.enrollment = \"None\"\n self.times = None\n self.checkenroll()", "def __init__(\n self, initial_year, people, living_expenses_strategy):\n # Recall that, as a Ledger object, we need to call the\n # superclass initializer and let it know what the first\n # year is so that `this_year` is usable.\n # NOTE Issue #53 removes this requirement.\n super().__init__(initial_year)\n\n self.living_expenses_strategy = living_expenses_strategy\n self.people = people", "def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = DashboardConfiguration.objects.filter(type=\"default\").first()\n\n # Add all players from dataset\n group = self.add_players(dashboard_configuration)\n\n # Add all games from the dataset\n self.add_games()\n\n # Create the games played for this group\n self.add_game_played(group)", "def __init__(self, galaxy_instance=None):\n self._galaxy_instance = None\n self._workflows = {}\n # if galaxy_instance exists, complete initialization\n if galaxy_instance:\n self._initialize(galaxy_instance)", "def initialize(self):\n for _ in range(self.configuration.n_pop):\n individual = self.random_edge_toggle_list()\n rating = self.rate_edge_toggle_list(individual)\n self._population.append((individual, rating))\n\n # Set Hall of Fame individual\n self.update_hall_of_fame(self._population)", "def initialize(self, train_ratio=0.8):\n logger.info(\"Initializing CBR case base ...\")\n\n # Separate ratings dataframe between train and test\n train, test = split_data(self.all_ratings.shape[0], train_ratio)\n self.ratings = self.all_ratings.loc[self.all_ratings.index.values[train]]\n self.test_ratings = self.all_ratings.loc[self.all_ratings.index.values[test]]\n\n # Sort test ratings so they are grouped by the user they belong to\n self.test_ratings = self.test_ratings.sort_values(['user_id'])\n\n # Compute global structures\n logger.info(\"Initializing movie popularity ...\")\n self.update_popularity()\n logger.info(\"Initializing mean movie score ...\")\n self.update_mean_movie_rating()\n logger.info(\"Initializing mean user score ...\")\n self.update_mean_user_rating()", "def __init__(self, name, hall_ID, password, monthly_salary,\n rebuild=False, worker_ID=None):\n\n # The rebuild flag, if true, denotes that the object is being made from\n # data already present in the database\n # If False, a new data row is added to the specific table\n if not rebuild:\n self.worker_ID = db.add(\"worker\")\n db.update(\"worker\", self.worker_ID, \"worker_type\", \"M\")\n self.password = password\n else:\n self.worker_ID = worker_ID\n self._password = password\n\n self.monthly_salary = monthly_salary\n worker.Worker.__init__(self, self.worker_ID, name, hall_ID)", "def __init__(self, path):\n\n log.info(\"KenwoodDatabase created at: {}\".format(path))\n\n self.db_path = path\n # Open a file for writing\n self.db_file = open(\n os.path.join(self.db_path, \"kenwood.dap\"), mode='wb')\n\n # Create the empty list of offsets\n self.offsets = []\n for offset in range(constants.end_offsets):\n self.offsets.append(0)\n\n self.subIndex = []\n for sub in range(constants.end_subindex_offsets):\n self.subIndex.append(SubIndexEntry())", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def __init__(self, affinity, game_type, game_space, opponent=None):\n \n super().__init__(affinity, game_type, game_space, opponent)", "def __init__(self, index_scores):\n self.index_scores = index_scores", "def _init_storage(self):\n if self._ is None:\n self._ = Parameters(self)", "def __init__(self, eid: str, name: str, hours_worked: int, hours_rate: int):\n pay.HourlyPolicy.__init__(self, hours_worked, hours_rate)\n super().__init__(eid, name)", "def __init__(self, ksize=20, alpha=3, id=None, storage=None):\n self.ksize = ksize\n self.alpha = alpha\n self.log = Logger(system=self)\n self.storage = storage or ForgetfulStorage()\n self.node = Node(id or digest(random.getrandbits(255)))\n self.protocol = KademliaProtocol(self.node, self.storage, ksize)\n self.refreshLoop = LoopingCall(self.refreshTable).start(3600)", "def __init__(self, firstname, lastname):\r\n\t\tsuper().__init__(firstname, lastname)\r\n\t\tself.privelages= Privelages()", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0", "def __init__(self, auth, home_id=None):\n self.auth = auth\n self.homedata = None\n self.homestatus = None\n self.room_ids = []\n self.room_status = {}\n self.schedules = []\n self.home_id = home_id\n self.home_name = None\n self.away_temperature = None\n self.hg_temperature = None\n self.boilerstatus = None\n self.setpoint_duration = None", "def __init__(self, auth, home=None):\n self.auth = auth\n self.homedata = None\n self.home_ids = []\n self.home_names = []\n self.room_names = []\n self.schedules = []\n self.home = home\n self.home_id = None", "def __init__(self, udb_path):\n self.udb_path = udb_path\n self.__qmood = DesignMetrics(udb_path=udb_path)\n # Calculating once and using multiple times\n self.DSC = self.__qmood.DSC # Design Size\n self.NOH = self.__qmood.NOH # Hierarchies\n self.ANA = self.__qmood.ANA # Abstraction\n self.MOA = self.__qmood.MOA # Composition, Aggregation\n self.DAM = self.__qmood.DAM # Encapsulation\n self.CAMC = self.__qmood.CAMC # Cohesion, CAM\n self.CIS = self.__qmood.CIS # Messaging\n self.NOM = self.__qmood.NOM # Complexity\n self.DCC = self.__qmood.DCC # Coupling\n self.MFA = self.__qmood.MFA # Inheritance\n self.NOP = self.__qmood.NOP # Polymorphism\n\n # For caching results\n self._reusability = None\n self._flexibility = None\n self._understandability = None\n self._functionality = None\n self._extendability = None\n self._effectiveness = None", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def __init__(self, treshold: float = 0.5):\n self.threshold: float = treshold\n self.dice_scores: list = []\n self.iou_scores: list = []\n self.sens_scores: list = []\n self.spec_scores: list = []\n self.accu_scores: list = []", "def construct_score_book(self, items_and_size: List[Tuple[str, float]]) -> None:\n self.score_book = {}\n\n for item, size in items_and_size:\n self.score_book[item] = size", "def __init__(self, caldb_name, irf_name, verbose=False):\n\n self.caldb_path = os.environ['CALDB']\n self.caldb_name = caldb_name\n self.irf = irf_name\n self.verbose = verbose\n\n self.am_ok = True\n\n self._aeff = dict()\n self._psf = dict()\n self._edips = dict()\n\n self._check_available_irfs()\n\n self.input_irf_file_name = '{path:s}/data/cta/{caldb:s}/bcf/{irf:s}/irf_file.fits'.format(path=self.caldb_path,\n caldb=self.caldb_name,\n irf=irf_name)", "def _refresh(self):\n self.__init__(self.db, self.roster_set, self.constraint_fns)\n self.add_objective()\n for fns in self.constraint_fns.keys():\n fns(*self.constraint_fns[fns])", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self):\n self.default_score = 1500\n self.run_past_matches()", "def __init__(self):\n self.set_recharge(random.randint(1000, 2000) / 10000)\n operator_count = random.randint(1, 3)\n self.operators = [Solder() for _ in range(0, operator_count)]\n list_operators = [i.get_health for i in self.operators]\n self.set_health(sum(list_operators) / len(list_operators))", "def __init__(self, num_of_cheeses, num_of_stools):\n\n self.toah_model = TOAHModel(num_of_stools)\n self.toah_model.fill_first_stool(num_of_cheeses)", "def __init__(self, ID, habitatType, anthro):\t\n\t\tself.agentID = ID\n\t\tself.habitatType = int(habitatType)\n\t\tself.humanPresence = bool(np.random.binomial(1,(anthro / 100),1))#False#int(random.choice([True,False]))\n\t\tself.predatorPresence = False\n\t\tself.nestInfo = None\n\t\tself.chickWeight = list()\n\t\tself.nestLocation = list()\n\t\tself.closestNest = 0", "def init(self, grouping_epsg_code: int = None):\n\n # Add any missing schema items or patches.\n _schema.create_schema(\n self._engine, epsg_code=grouping_epsg_code or DEFAULT_EPSG\n )\n\n # If they specified an epsg code, make sure the existing schema uses it.\n if grouping_epsg_code:\n crs_used_by_schema = self.grouping_crs\n if crs_used_by_schema != f\"EPSG:{grouping_epsg_code}\":\n raise RuntimeError(\n f\"\"\"\n Tried to initialise with EPSG:{grouping_epsg_code!r},\n but the schema is already using {crs_used_by_schema}.\n\n To change the CRS, you need to recreate Explorer's schema.\n\n Eg.\n\n # Drop schema\n cubedash-gen --drop\n\n # Create schema with new epsg, and summarise all products again.\n cubedash-gen --init --epsg {grouping_epsg_code} --all\n\n (Warning: Resummarising all of your products may take a long time!)\n \"\"\"\n )\n refresh_also = _schema.update_schema(self._engine)\n\n if refresh_also:\n _refresh_data(refresh_also, store=self)", "def at_object_creation(self):\n\n # in GRAMS mofo, this isn't the 17th century you know!\n self.db.weight = 0\n # In calories, main currency will be calories and bullets\n self.db.cost = 0\n # Not completely decided on implementing this - potenially a L W H or a \"tiny, small \" etc\n # or perhaps volume. Was thinking to stop people putting a chair in a fannypack\n # 10 tiny in one small - Tiny 1\n # 10 Small in one medium - Small - 10\n # 10 Medium in one large - Medium - 100\n # 10 Large - VeryLarge - Large - 1000\n # 10 VeryLarge - Room - 10000 VeryLarge - 10000\n # Room - 100000 Room - 100000\n # Fanny Pack - one Medium, 100 bullets,\n # Regular Back Page - 5 Medium\n # Hiking Pack - 1=2 large\n # Car - vary large, truck 3-4 Very Large to 2-3 rooms\n #\n # To carry something you have be able to carry the WEIGHT and fit in a container\n #\n # we'll assume you can carry 300 by default - Worn items removed from this\n # We'll also assume users can carry ONE item to their maximum weight without a container.. i.e. body\n # Or perhaps we add a \"Carry\" command, anything can be carried in your hands, just one of them\n # Other commands \"Drag\" - might be a Weight * 3 amount\n\n self.db.size = 0\n # some items will have a minimum size object they can fit in\n # self.db.min_size_fit = 0 # Removed - we were going to enforce this - to hard/complicated/saddens game\n\n # many items will have a noise modifier which will effect sneaking\n # We may also have it for weapon attacks.. i.e. firing bow vs gun\n # will it attract more zombies to you than another person\n # Increase chance of random encounters\n self.db.noise_modifier = 0\n\n # Can this be broken to create other parts\n # IDEA: Move this to a \"material_source\" object.. an object that we can create and when broken\n # generates further objects for collection\n\n self.db.breakable = 0\n # Hidden indicates this object is hidden in the room\n self.db.hidden_in_room = 0\n\n self.locks.add(\"equip:false()\")", "def __init__(self, fitness_function=None):\n Function.initialize(self, fitness_function)", "def __init__(self, alpha=80, beta=13, gamma=3, spatial_ker_weight=3, bilateral_ker_weight=10):\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.spatial_ker_weight = spatial_ker_weight\n self.bilateral_ker_weight = bilateral_ker_weight", "def save_agea(self, data, suffix=''):\n self.title = data.get('title', self.title)\n self.question = data.get('question', self.question)\n self.raw_question = data.get('raw_question',self.raw_question)\n self.raw_solution= data.get('raw_solution',self.raw_solution)\n self.max_attempts = data.get('max_attempts', self.max_attempts)\n # Validate points before saving\n points = data.get('points', self.points)\n # Check that we are an int\n try:\n points = int(points)\n except ValueError:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be an integer')\n\n # Check that we are positive\n if points < 0:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be a positive integer')\n self.points = points\n\n # Validate weight before saving\n \n weight = data.get('weight', self.weight)\n # Check that weight is a float.\n if weight:\n try:\n weight = float(weight)\n except ValueError:\n raise JsonHandlerError(400, 'Weight must be a decimal number')\n # Check that we are positive\n if weight < 0:\n raise JsonHandlerError(\n 400, 'Weight must be a positive decimal number'\n )\n self.weight = weight \n submission = self.get_question()\n if submission:\n uploaded_submission = submission.get(\"question\").get(\"filename\", None)\n if uploaded_submission:\n question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])\n question = os.path.join(IMAGEDIFF_ROOT, question)\n actual=total_marks(question)\n if actual < points:\n raise JsonHandlerError(400, '\"Score to be graded out of\" should be less than equal to the maximum attainable score for the question paper you uploaded')\n \n self.save()\n log.info(self)\n \n #self.weight = data.get('weight', self.max_score())", "def __init__(self, alg, key):\n self.alg = alg\n self.key = key", "def __init__(self):\n self.incidents_models = {}\n self.risks = []\n self.incidents_models = None", "def __init__(self, dbname):\n self.__name__ = 'ram'\n self.__Trace__ = Trace\n self.dbname = dbname\n self.variables_to_tally = [] # A list of sequences of names of the objects to tally.\n self._traces = {} # A dictionary of the Trace objects.\n self.chains = 0\n self._default_chain = -1", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n family: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if family is not None:\n pulumi.set(__self__, \"family\", family)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def initialize_database():\n # TODO: Refactor the funtime library\n this.db = Store(this.host).create_lib(this.store_name).get_store()", "def __init__(self, kBoundedRing):\n KBoundedQuotientBasis.__init__(self, kBoundedRing, 'dks')\n\n kHLP = kBoundedRing.kHallLittlewoodP()\n self.module_morphism(self._dks_to_khlp_on_basis,codomain=kHLP).register_as_coercion() # morphism from dual-k-Schurs to k-bounded-HLP\n kHLP.module_morphism(self._khlp_to_dks_on_basis,codomain=self).register_as_coercion() # morphism from k-bounded-HLP to dual-k-Schurs", "def __init__(self, number_of_cheeses, number_of_stools):\n self.model = TOAHModel(number_of_stools)\n self.model.fill_first_stool(number_of_cheeses)", "def perform_create(self, serializer):\n # required for perform_create(); creates the score object in database\n score = serializer.save()\n\n # trigger update function for engine (bayes update if adaptive)\n log.debug(\"Triggering engine update from score\")\n engine = get_engine()\n engine.update_from_score(score.learner, score.activity, score.score)", "def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()", "def __init__(self, filename = None, dbalias = None, hltpskey = None ):\n super(HLTPrescalesSetAccess,self).__init__( ConfigType.HLTPS, mainkey = \"prescales\",\n filename = filename, dbalias = dbalias, dbkey = hltpskey )\n self.loader.setQuery([\n \"SELECT HPS_DATA FROM {schema}.HLT_PRESCALE_SET WHERE HPS_ID={dbkey}\" # for current and new db schema\n ])\n self.load()", "def initialize(self):\n for i in range(self.number_of_persons):\n gender = 0 if np.random.random() < self.gender_ratio else 1\n age = np.random.randint(15,65)\n days = np.random.randint(0,365)\n if age >= 15 and age < 35:\n if np.random.random() < self.sexual_activity_high:\n sexual_activity = 1\n self.high_sexual_activity.add(i)\n else:\n sexual_activity = 0\n else:\n sexual_activity = 0\n \n p = Person(i,gender, age, days, sexual_activity)\n self.persons.append(p)\n if gender == 0:\n self.straight_males.add(i)\n else:\n self.females.add(i)\n self.singles.add(i)\n \n age_group = int(np.floor((age+5)/10))-2\n self.age_group[age_group].add(i)\n \n self.number_of_singles = self.number_of_persons", "def Init(self):\r\n print(\"Initiating...\")\r\n if (self.Get_FullScale_Value() == self.FullScaleEnum[0]):\r\n self.gain = 0.00875\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[1]):\r\n self.gain = 0.0175\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[2]):\r\n self.gain = 0.07\r\n print(\"Gain set to:{0}\".format(self.gain))", "def home_team_score(self, home_team_score):\n\n self._home_team_score = home_team_score", "def __init__(self):\n # The logging object. \n # Example: self.log.info(f\"Current value of var: {my_var}\")\n self.log = logging.getLogger()\n\n # Create object of EDH\n self.edh = EDH.EDH()\n\n # Parameters to be used in sale\n self.item_amount = \"$0.01\"\n self.fuel_amount = \"$1.00\"\n self.fuel_grade = \"Diesel 1\"", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def db2score(self):\n print(\"db2score\")\n self.score.array_frame_start = self.arrayFrameStart\n self.score.array_frame_end = self.arrayFrameEnd\n self.score.arraySet = self.arraySet\n self.score.arrayGame = self.arrayGame\n self.score.arrayScore = self.arrayScore\n self.score.arrayScoreResult = self.arrayScoreResult\n self.score.arrayFirstSecond = self.arrayFirstSecond\n self.score.arrayServer = self.arrayServer\n\n self.score.arrayPointWinner = self.arrayPointWinner\n self.score.pointWin = self.pointWin\n self.score.arrayPointPattern = self.arrayPointPattern\n self.score.arrayForeBack = self.arrayForeBack\n\n self.score.arrayContactServe = self.arrayContactServe\n self.score.arrayCourt = self.arrayCourt\n\n self.score.playerA = self.playerA\n self.score.playerB = self.playerB\n self.score.number = self.number\n self.score.totalGame = self.totalGame\n self.score.faultFlug = self.faultFlug\n self.score.arrayFault = self.arrayFault\n\n # size = len(self.score.array_frame_start)\n\n self.score.shot_frame = self.shot_frame\n self.score.array_ball_position_shot_x = self.array_ball_position_shot_x\n self.score.array_ball_position_shot_y = self.array_ball_position_shot_y\n self.score.arrayPlayerAPosition_x = self.arrayPlayerAPosition_x\n print(self.score.arrayPlayerAPosition_x)\n self.score.arrayPlayerAPosition_y = self.arrayPlayerAPosition_y\n self.score.arrayPlayerBPosition_x = self.arrayPlayerBPosition_x\n self.score.arrayPlayerBPosition_y = self.arrayPlayerBPosition_y\n self.score.arrayHitPlayer = self.arrayHitPlayer\n self.score.arrayBounceHit = self.arrayBounceHit\n self.score.arrayForeBack = self.arrayForeBack\n self.score.arrayDirection = self.arrayDirection\n\n self.score.array_x1 = self.array_x1\n self.score.array_y1 = self.array_y1\n self.score.array_x2 = self.array_x2\n self.score.array_y2 = self.array_y2\n self.score.array_x3 = self.array_x3\n self.score.array_y3 = self.array_y3\n self.score.array_x4 = self.array_x4\n self.score.array_y4 = self.array_y4\n\n self.score.array_frame_start = self.arrayFrameStart\n self.score.shot_index = self.score.create_index_shot(\n self.score.array_frame_start, self.score.shot_frame\n )\n\n # for i in len(self.score.array_frame_start):\n # self.score.shot_index = [0 for i in range(len(self.array_ball_position_shot))]#あとで変更の必要あり\n\n # self.score.array_ball_position_shot = self.check_size_return_array(\n # self.array_ball_position_shot, size\n # )\n # self.score.arrayPlayerAPosition = self.check_size_return_array(\n # self.arrayPlayerAPosition, size\n # )\n # self.score.arrayPlayerBPosition = self.check_size_return_array(\n # self.arrayPlayerBPosition, size\n # )\n # self.score.arrayHitPlayer = self.check_size_return_array(\n # self.arrayHitPlayer, size\n # )\n # self.score.arrayBounceHit = self.check_size_return_array(\n # self.arrayBounceHit, size\n # )\n # self.score.arrayForeBack = self.check_size_return_array(\n # self.arrayForeBack, size\n # )\n # self.score.arrayDirection = self.check_size_return_array(\n # self.arrayDirection, size\n # )\n\n # self.score.array_x1 = self.check_size_return_array(self.array_x1, size)\n # self.score.array_y1 = self.check_size_return_array(self.array_y1, size)\n # self.score.array_x2 = self.check_size_return_array(self.array_x2, size)\n # self.score.array_y2 = self.check_size_return_array(self.array_y2, size)\n # self.score.array_x3 = self.check_size_return_array(self.array_x3, size)\n # self.score.array_y3 = self.check_size_return_array(self.array_y3, size)\n # self.score.array_x4 = self.check_size_return_array(self.array_x4, size)\n # self.score.array_y4 = self.check_size_return_array(self.array_y4, size)\n\n return self.score", "def allergies(self, allergies):\n\n self.logger.debug(\"In 'allergies' setter.\")\n\n self._allergies = allergies", "def __init__(self):\n \n self.mineLatLong = np.array([0.0,0.0]) \n self.theOreBody = OreBodyDataManager()\n self.theMiningSystem = MiningSystemDataManager()\n self.theProcessingSystem = ProcessingSystemDataManager()\n self.theEconomicDataManager = EconomicDataManager()\n self.theInfrastructureManager = InfrastructureDataManager()", "def __init__(self, width, height):\n super().__init__(width, height)\n arcade.set_background_color(arcade.color.SMOKY_BLACK)\n\n self.held_keys = set()\n\n \n # TODO: declare anything here you need the game class to track\n self.ship = Ship()\n self.asteroid_array = []\n self.bullets_list = []\n self.create_asteroids()", "def init2(self):\n self.skill_points = self.count_skill_points()\n self.count_saves()\n self.lives = self.count_lives()\n self.base_attack = fetch_data.get_base_attack(self.BASE_ATTACK_LVLS, self.lvl)" ]
[ "0.5347252", "0.5322024", "0.5263044", "0.5225015", "0.5212653", "0.51022935", "0.5069073", "0.50057083", "0.50024194", "0.49871403", "0.49801096", "0.49767306", "0.49670827", "0.49553522", "0.4951048", "0.49419042", "0.4933451", "0.48849487", "0.48819917", "0.48739007", "0.48611495", "0.48207745", "0.4819583", "0.48166606", "0.48124182", "0.48099658", "0.48068225", "0.47858834", "0.47806332", "0.47776258", "0.47768906", "0.47675836", "0.47626", "0.4757795", "0.4756771", "0.47507244", "0.4746333", "0.4745856", "0.47442418", "0.47353363", "0.47301877", "0.47196212", "0.4703247", "0.46935496", "0.4688852", "0.4682465", "0.46805343", "0.4680533", "0.4677625", "0.4672564", "0.4670606", "0.466747", "0.46664873", "0.4664954", "0.4663438", "0.4652718", "0.46506584", "0.4641497", "0.46346554", "0.46209148", "0.46199855", "0.46122733", "0.4611773", "0.4603301", "0.46032855", "0.4601856", "0.45988312", "0.45963877", "0.45914516", "0.45861325", "0.45818436", "0.45797604", "0.45744628", "0.45739356", "0.4572149", "0.45637655", "0.45609772", "0.45557305", "0.4555295", "0.45534778", "0.45534563", "0.45529458", "0.454799", "0.45448342", "0.4538854", "0.45339584", "0.45320684", "0.45305762", "0.45235822", "0.4518741", "0.45171294", "0.4515892", "0.45144227", "0.45108032", "0.450777", "0.4507342", "0.45045173", "0.4504419", "0.4502397", "0.4501957" ]
0.5045687
7
Gets the header of the explog.yml file. the returned text is as below.
def getExplogHeader(FolderID, NumberofExps): HeaderLines = [] HeaderLines.append("ExpFolderID: {ExpDirID}".format(ExpDirID=FolderID)) HeaderLines.append("NumberofEntries: {NumExps}".format(NumExps=NumberofExps)) HeaderLines.append("ExpEntries: ") return "\n".join(HeaderLines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_header():\n try:\n yml_iter = cfg.yml_config[\"header\"]\n except:\n # Probably no \"comments\" section in the yml-file.\n return \"\"\n\n return (\"\\n\".join(yml_iter) + \"\\n\\n\") if yml_iter is not None else \"\\n\"", "def _get_compose_header(ctx):\n return get_artifact('compose_header.yml')", "def readHeader():\n with open(\"./header.conf\", \"r\") as fd:\n header = fd.readlines()\n return header", "def get_header():\n title = \"\"\"\n ___ __\n | o _|_ _|_ _ ._ (_ _ ._ _|_ o ._ _ _ ._ _|_ /\\ ._ _. | _ o _\n | \\/\\/ | |_ |_ (/_ | __) (/_ | | |_ | | | | (/_ | | |_ /--\\ | | (_| | \\/ _> | _>\n /\"\"\"\n\n sub_title = \"Get sentiments from your tweets fast and easy!\"\n header = bcolors.HEADER + title + bcolors.ENDC + \"\\n\" + bcolors.WARNING + \"\\t\\t\" + sub_title + bcolors.ENDC + \"\\n\"\n return header", "def title(self):\n for cell in self.markdown_cells():\n m = MARKDOWN_HEADER.match(cell.source)\n if m and len(m.group('level')) == 1:\n return m.group('header').strip()\n return None", "def header_text(self):\n return os.linesep.join(map(str, self.headers))", "def get_heading(self):\n return self.heading[0]", "def getHeader(self):\n return self.data.header", "def get_header(file):\n with open(file, 'r') as f:\n return f.readline()", "def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig", "def get_headers():\n file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'cfg', 'headers.json'))\n return open_json_file(file_path)", "def getHeader():\n return _HEADER", "def headerstring(self):\n sss = 'IVO LEGEND:\\n'\n sss += ' Created from 152 or 155\\n'\n sss += ' Pct number\\n'\n sss += ' Found in 152 (Y/N)\\n'\n sss += ' Found in 155 (Y/N)\\n'\n sss += ' Ivo serial number\\n'\n sss += ' PEB used for opening\\n'\n sss += ' Opening date/time\\n'\n sss += ' Date/time of first vote\\n'\n sss += ' PEB used for closing\\n'\n sss += ' Closing date/time\\n'\n sss += ' Date/time of last vote\\n'\n sss += ' Number of vote events 152\\n'\n sss += ' Number of vote events 155\\n'\n sss += ' Number of vote events 155 by precinct\\n'\n sss += ' Number of late vote events 152\\n'\n sss += ' Pct numbers\\n'\n sss += ' Ballot styles\\n'\n sss += ' Memory collection times\\n'\n return sss", "def get_header(self):\n return self._header", "def extract_header(message_dict):\n header = message_dict[\"structured_text\"][\"header\"]\n\n return header", "def tsv_header(self):\n return self.tsv_lines[0]", "def title(self):\n return self.header", "def header(self):\n return self[0]", "def header(self):\r\n return self.__header", "def get_title(self):\n title = (None, 7)\n for text, level in self._headers:\n if level < title[1]:\n title = (text, level)\n return title[0]", "def GetIndexFileHeaderText(headerinfo):#{{{\n (dbname, version, ext, prefix) = headerinfo\n indexFileHeaderText = []\n indexFileHeaderText.append(\"DEF_VERSION %s\"%(version))\n indexFileHeaderText.append(\"DEF_DBNAME %s\"%(dbname))\n indexFileHeaderText.append(\"DEF_EXTENSION %s\"%(ext))\n indexFileHeaderText.append(\"DEF_PREFIX %s\"%(prefix))\n return indexFileHeaderText", "def getTableHeader(self, filename):\n hdr = \"\"\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \">\":\n hdr += line\n else:\n return hdr", "def header( self ):\n\t\treturn '; '.join( [ '='.join(i) for i in self.items() ] )", "def __header(self, conf):\n result = \"\"\n\n i = conf[\"conf_json\"][0]\n result += \"\"\"\n<a id='toc'></a>\n# %s\n\n**Version:** %s <br/>\n**API URL:** <a href=\"%s\">%s</a><br/>\n**Contact:** %s<br/>\n**License:** %s<br/>\n\n\n\n## <a id=\"description\"></a>Description [back to top](#toc)\n\n%s\n\n%s\"\"\" % (\n i[\"title\"],\n i[\"version\"],\n i[\"base\"] + i[\"url\"],\n i[\"base\"] + i[\"url\"],\n i[\"contacts\"],\n i[\"license\"],\n i[\"description\"],\n self.__parameters(),\n )\n # (i[\"title\"], i[\"version\"], i[\"base\"] + i[\"url\"], i[\"base\"] + i[\"url\"], i[\"contacts\"], i[\"contacts\"], i[\"license\"],\n # \"\".join([\"<li>[%s](#%s): %s</li>\" % (op[\"url\"], op[\"url\"], op[\"description\"].split(\"\\n\")[0])\n # for op in self.conf_json[1:]]),\n # i[\"description\"], self.__parameters())\n return markdown(result)", "def read_header(file_path):\n with open(file_path, 'r') as f:\n header = f.readline()\n return header.strip()", "def __getCopyrightHeader():\n\n # Get framework version\n file = open('package.json', 'r')\n version = json.loads(file.read())['version']\n file.close()\n\n # Get header template\n file = open('build/LICENSE.HEADER', 'r')\n header = file.read()\n file.close\n now = datetime.datetime.now()\n header = header.replace('$VERSION', version).replace('$YEAR', str(now.year)).replace('$DATE', now.ctime())\n\n return header", "def header(self):\n return self._header", "def header(self):\n return self._header", "def header(self):\n return self._header", "def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'", "def get_header(filename):\n if not os.path.isfile(filename):\n sys.exit('ERROR: input {} does not exist'.format(filename))\n try:\n hdr = dcm.read_file(filename)\n return hdr\n except:\n sys.exit('ERROR: failed to parse {}'.format(filename))", "def get_heading(self):\n return self.__heading", "def get_heading(self):\n return self.__heading", "def getTextWithHeaders():", "def header(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"header\")", "def header():\n record = cfg.get_current_site_record()\n header = \"{0} ({1})\".format(record['url'], record['id'])\n size = len(header) + 2 + 2\n return \"\"\"{sep}\n# {header} #\n{sep}\"\"\".format(sep='#'*size, header=header)", "def peek_header(self):\n header = None\n if self._headers:\n # returns the last element on the list\n header = self._headers[-1:]\n\n return header", "def description(self):\n return self._hdr", "def description(self):\n return self._hdr", "def heading(self):\n return self._heading", "def generate_header():\n env = getattr(g, 'env', 'unknown')\n return {'X-LOGGLY-TAG': env}", "def mail_header(self):\n return self._hdr", "def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"", "def get_headers_for_print(self):\n lines_for_print = []\n for header in self.metadata:\n lines_for_print.append(self.metadata[header])\n lines_for_print.append('\\t'.join(self.header))\n lines_for_print[-1] = '#' + lines_for_print[-1]\n return lines_for_print", "def _extract_headers(self):\n\n with open(self.file_path, \"rt\", encoding=self._encoding) as csv_file:\n for row in csv.reader(csv_file):\n if self._file_headings:\n return [header if header != \"\" else f\"Untitled_{index + 1}\" for index, header in enumerate(row)]\n\n else:\n return [f\"Untitled_{i + 1}\" for i in range(len(row[0]))]", "def get_header(filepath):\n header = None\n for i, x in enumerate(open(filepath)):\n if i == 0:\n header = x\n return(header)", "def header(self):\n if not self._ast:\n return None\n else:\n return self._ast[0]", "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetHeader(self)", "def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines", "def get_config_header(_config_global, _debug_log, _dpid, _hardware):\n return ''", "def get_vars_header(lfile):\n # Get variable list\n vlist = []\n with open(lfile) as fh:\n for line in fh:\n vlist.append(line.split()[-1])\n # Get extension\n ext = \".h5\"\n if vlist[0] == \"ASCII\":\n ext = \".csv\"\n elif vlist[0] == \"HDF5\":\n ext = \".h5\"\n else: # Unsupported type\n pass\n return vlist[1:], ext # First line is a header line of sorts", "def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]", "def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data", "def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')", "def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetHeader(self)", "def _get_header(self, header):\n if header is None:\n html = self.header()\n else:\n html = header\n return html", "def pp_file_header(self):\n self.separator()\n for item in self.file_header:\n print(item.ljust(27, ' ') + \": {}\".format(self.file_header[item]))\n \n self.separator()", "def section_header(text):\n\n print \"---- %s ----\" % text", "def estimated_heading(self):\n return self._estimates[2].item(0)", "def _horizontal_header(self):\n return self.header()", "def _horizontal_header(self):\n return self.header()", "def as_text(self):\r\n header_text = \"|--- SCHC Fragment Header {}---|\\n\"\r\n return self.base_as_text(header_text)", "def block_header(self):\n return self._current_block[0]", "def print_header_information():\n\t\tprint \"Elijah Molloy\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"PROGRAMMING ASSIGNMENT #4\\n\"", "def header(self):\n ...", "def getNexHeader( nexFile ):\r\n\tnexHeader = []\r\n\tnexIn = open(nexFile,'r')\r\n\treadFile = nexIn.read()\r\n\tfor line in readFile.splitlines(True):\r\n\t\tif \"nexus\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"NEXUS\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"begin data\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"dimensions\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"format\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"matrix\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\tnexIn.close()\r\n\treturn nexHeader", "def header(self):\n return \"Step {}: {}\".format(\".\".join(str(e) for e in self._id), self.title)", "def get_reference_header(file):\n\n with open(file, \"r\") as typing_report:\n lines = typing_report.readlines()\n return lines[1].split('\\\\t')[3]", "def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list", "def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"", "def get_heading(self):\n raise NotImplementedError()", "def print_header(filename):\n\n date_list = filename[0:10].split('_')\n # Hint: CWB Metadata cannot contain dashes -\n name = 'id=\"{}\"'.format(filename[0:-4].replace('-', '_'))\n date = 'date=\"{}\"'.format('_'.join(date_list))\n year = 'year=\"{}\"'.format(date_list[0])\n month = 'month=\"{}\"'.format(date_list[1])\n day = 'day=\"{}\"'.format(date_list[2])\n\n header = '<text {} {} {} {} {}>'.format(name, date, year, month, day)\n\n print(header)", "def get_subheader(self):\n element = self.driver.find_element(*self.subheader_selector)\n return element.text", "def header(self):\r\n return self.verbose_name", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetHeader(self)", "def __getHeader(self):\n\n return (\"\\\\rtf%d\" # RTF version\n \"\\\\%s\" # Character set used in document\n \"\\\\deff%d\" # Index of default font\n \"\\\\deflang%d\\n\" # Default language\n % (self.rtfVersion, self.charset, self.defaultFont,\n self.defaultLang) +\n self.__getFontTable() +\n self.__getColorTable() +\n self.__getListTables() +\n self.__getGenerator())", "def print_header():\n \n print_from_file(\"html/header.html\")", "def header(self, text, level, raw=None):\n return [[MdStyleInstructionCell('h{}'.format(level))] + text]", "def header_huffington(self):\n head = '\\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\\n\\n'\n head += '***{}***\\n\\n'.format(self.get_greeting())\n head += '.\\n\\n'\n head += '.\\n\\n'\n return head", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetHeader(self)", "def get_header(file):\n buffer=''\n for line in open(file).readlines():\n if line[0]=='#': buffer=buffer+line\n else: break\n return buffer", "def headers(self):\n fd = open(self.filename, \"r\")\n file_head = fd.readlines()\n fd.close()\n\n zip_heads = zip(file_head[0].split(self.separator),\n file_head[1].split(self.separator))\n\n metric_names = []\n category = \"\"\n for _category, metric in zip_heads:\n # fill empty category names\n if len(_category) is not 0:\n category = _category\n\n metric_names.append(\"%s.%s\" % (category, metric))\n\n return metric_names[:-1]", "def print_header(now):\n global config\n date_time = datetime.datetime.fromtimestamp(now).strftime('%Y-%m-%d %H:%M:%S')\n\n print('*************************************')\n print(f'HTTP LOGS STATISTICS - {date_time}')", "def get_header(self, root):\n header = etree.SubElement(root, \"FileHeader\")\n header.set(\"revMajor\", \"1\")\n header.set(\"revMinor\", \"0\")\n header.set(\"date\", datetime.today().strftime(\"%Y-%m-%dT%H:%M:%S\"))\n header.set(\"description\", \"Generated OpenSCENARIO File\")\n header.set(\"author\", \"QGIS OSCGenerator Plugin\")", "def get_header_separator():\n try:\n yml_iter = cfg.yml_config[\"separator\"]\n except:\n # Probably no \"separator\" section in the yml-file.\n return \" | \"\n return yml_iter", "def split_header_markup(self, source=None):\n if source is None:\n source = self.source\n\n header, markup = self.RE_SPLIT.match(source).groups()\n if not header:\n logging.warning('found no header')\n if not markup:\n logging.warning('markup is empty')\n logging.debug('markup length = %d' % len(markup))\n\n _header = {}\n if header:\n for item in header.split('\\n'):\n m = self.RE_HEADER.match(item)\n if not m:\n continue\n k, v = list(map(type('').strip, m.groups()))\n if k in ('labels', 'categories'):\n v = [_f for _f in [label.strip() for label in v.split(',')] if _f]\n elif k == 'draft':\n v = v.lower() in ('true', 'yes', '1')\n _header[k] = v\n header = _header\n\n logging.debug('header = %r' % header)\n\n return header, markup", "def getHeader(key):", "def getHeader():\n system, host, release, version, machine = os.uname()\n\n return \"# output generated by %s\\n# job started at %s on %s -- %s\\n# pid: %i, system: %s %s %s %s\" %\\\n (\" \".join(sys.argv),\n time.asctime(time.localtime(time.time())),\n host,\n global_id,\n os.getpid(),\n system, release, version, machine)", "def image_header(self):\n\n if not self._image_header:\n path_image_header = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.header\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_header):\n self.logger.warning(\n \"ImageHeader path doesn't exist: %s\", path_image_header\n )\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_header)\n self._image_header = {}\n with open(path_image_header) as f:\n for line in f:\n parts = line.split(\" = \")\n\n if len(parts) < 2:\n parts = line.split(\" : \")\n\n if len(parts) > 1:\n self._image_header[parts[0].strip()] = (\n parts[1].replace(\";\", \"\").replace(\"\\n\", \"\")\n )\n\n return self._image_header", "def get_header(font_package='helvet'):\n s = '\\\\documentclass{article}\\n'\n s += '\\\\usepackage[usenames,dvipsnames]{xcolor}\\n'\n s += '\\\\usepackage{tikz}\\n'\n s += '\\\\usepackage[active,tightpage]{preview}\\n'\n s += '\\\\usepackage{amsmath}\\n'\n if font_package is not None:\n s += '\\\\usepackage{%s}\\n'%(font_package)\n s += '\\\\usepackage{sfmath}\\n'\n s += '\\\\PreviewEnvironment{tikzpicture}\\n'\n s += '\\\\setlength\\PreviewBorder{5pt}\\n'\n return s", "def show_header():\n return {};", "def _info(self):\n text = ''.join(self._lines)\n rendered_text = jinja2.Template(text).render()\n return yaml.load(rendered_text)", "def get_export_header(self):\n\n name = self.get_name()\n\n if (self.name == \"input::nodes\"):\n\n name = \"user-specified\"\n\n grp_string = self.get_grp_string()\n\n if grp_string != \"\":\n\n grp_string = \" \" + grp_string\n\n return \"\\n!*!Label \" + self.path[1] + \" ..\" + grp_string + \" .. \" + name + \"\\n\"", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def get_tfsheader(tfsfile):\n headerdata = pd.read_csv(tfsfile, delim_whitespace=True, nrows=44, index_col=None)\n headerdata.columns = ['AT', 'NAME', 'TYPE', 'VALUE']\n return headerdata[['NAME', 'VALUE']]", "def format_report_header(self):", "def extract_header(tgt_file):\n with open(tgt_file) as tf:\n h_lines = []\n for t_line in tf:\n s_line = t_line.strip().split()\n if len(s_line) < 2:\n h_lines.append(t_line)\n continue\n try:\n # If we have a timestep, this is not a header line\n int(s_line[0])\n break\n except ValueError:\n h_lines.append(t_line)\n return h_lines", "def header(self) -> str:\n value = self.kind\n if self.options:\n value += '; ' + '; '.join(f'{k}={v}' for k, v in self.options.items())\n return value" ]
[ "0.76283085", "0.68693167", "0.6808511", "0.65722984", "0.6551671", "0.654237", "0.639567", "0.6334269", "0.62707156", "0.6265091", "0.62361425", "0.62100714", "0.618234", "0.617842", "0.61768436", "0.61641693", "0.6152005", "0.61499393", "0.61389565", "0.6129883", "0.6125649", "0.6122791", "0.61000955", "0.60989016", "0.6081147", "0.60687083", "0.6067637", "0.6067637", "0.6067637", "0.6063588", "0.6057393", "0.6056954", "0.6056954", "0.6035772", "0.6015955", "0.5997138", "0.59957075", "0.59912884", "0.59912884", "0.5961099", "0.59598875", "0.5955708", "0.59347326", "0.5883376", "0.58788866", "0.5872122", "0.58575606", "0.5835191", "0.58260626", "0.58118767", "0.58016926", "0.5784936", "0.5779954", "0.57787126", "0.5745855", "0.5745855", "0.5743635", "0.573293", "0.5732221", "0.572948", "0.5720786", "0.56883526", "0.56883526", "0.56680846", "0.566595", "0.5665165", "0.5661881", "0.5660433", "0.5647977", "0.56436545", "0.5637314", "0.5624314", "0.56233096", "0.56214637", "0.56201243", "0.5609555", "0.5608784", "0.56080437", "0.56079507", "0.5595723", "0.55890703", "0.5573446", "0.55663383", "0.55605894", "0.5560034", "0.5558624", "0.5557318", "0.55534565", "0.554412", "0.55357724", "0.5527407", "0.552643", "0.5517399", "0.55106664", "0.5508733", "0.5501633", "0.5488094", "0.54856926", "0.5484762", "0.54768777" ]
0.62574416
10
This returns a single entry corresponding to the Directory Entity referred to by FolderEntityData. The returned string is given below (between Start and End) Start
def getFolderEntry(FolderEntityData): if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']: errprint('\nThe given EntityData does not represent the data of a directory') raise ValueError OutputLines = [] OutputLines.append("FolderID : {UID}".format(UID=FolderEntityData.ID)) OutputLines.append("ParentFolderID : {UID}".format(UID=FolderEntityData.ParentID)) OutputLines.append("FolderType : {Type}".format(Type=FolderEntityData.Type)) OutputLines.append("FolderTitle : {Title}".format(Title=FolderEntityData.Title)) OutputLines.append("FolderDescription: |-2") OutputLines += [" "+Line for Line in FolderEntityData.Description.splitlines()] OutputLines.append("") return "\n".join(OutputLines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFolderItemName(self) -> unicode:\n ...", "def getFolderPath(self) -> unicode:\n ...", "def directory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_id\")", "def get(self):\n return self.directory_name", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.parentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.grandparentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def path(self):\n return self._dir_entry.path", "def get_entries(self):\n for irde in self.Entries:\n if irde != None:\n if irde.Name & 0x80000000:\n # Points to a Name object\n name = obj.Object(\"_IMAGE_RESOURCE_DIR_STRING_U\", (irde.Name & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n else:\n name = int(irde.Name)\n if irde.DataOffset & 0x80000000:\n # We're another DIRECTORY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", (irde.DataOffset & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n retobj.sectoffset = self.sectoffset\n else:\n # We're a DATA_ENTRY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DATA_ENTRY\", irde.DataOffset + self.sectoffset, vm = self.obj_vm, parent = irde)\n yield (name, bool(irde.DataOffset & 0x80000000), retobj)", "def get_relative_name(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetRelativeName', self.handle)", "def Dirname(self):\n result = self.Copy()\n\n while 1:\n last_directory = posixpath.dirname(result.last.path)\n if last_directory != \"/\" or len(result) <= 1:\n result.last.path = last_directory\n # Make sure to clear the inode information.\n result.last.inode = None\n\n break\n\n result.Pop(-1)\n\n return result", "def get_path(self):\n definition = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id)\n parent_id = definition.get(\"parentId\", None)\n if parent_id is not None:\n parent = DSSProjectFolder(self.client, parent_id)\n path = parent.get_path()\n return (\"\" if path == \"/\" else path) + \"/\" + definition.get(\"name\", \"\")\n else:\n return \"/\"", "def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")", "def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def folder(self):\n return self._folder", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def Directory(self) -> str:", "def directory_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"directory_id\")", "def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result", "def media_folder_name(self):\n raise NotImplementedError", "def entity_prefix(self):", "def get_name(self):\n return self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"name\", None)", "def FullDirItems():\n return fulldiritems", "def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string", "def _parse_dir(self):\r\n type_char = self._grab_type()\r\n user_name = self._grab_unascii() #This gets the user_name field for the DirEntity\r\n self._match(\"\\t\")\r\n selector = self._grab_unascii() #This gets the selector.\r\n self._match(\"\\t\")\r\n host = self._grab_host()\r\n self._match(\"\\t\")\r\n port = self._grab_port()\r\n self._match(\"\\r\\n\")\r\n return DirEntity(type_char, user_name, selector, host, port)", "def GetNTFSFileEntry(self):\n return self._fsntfs_file_entry", "def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId", "def getGroupFolder(self):\n if platform.system()==\"Windows\":\n groupFolder = os.path.join(\"\\\\\\\\ursa\",\"AQOGroupFolder\")\n if platform.system()==\"Linux\":\n groupFolder = os.path.join(\"/media\",\"ursa\",\"AQOGroupFolder\")\n return groupFolder", "def getPath(self):\n uid = str(self._result.uid)\n if not uid.startswith('/zport/dmd'):\n uid = '/zport/dmd/' + uid\n return uid", "def main_entity_of_page(self) -> str:\n return self._main_entity_of_page", "def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname", "def dirname(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4]", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def entity_guid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entity_guid\")", "def showId(self):\n #Here I'm supposing that the name of the table, and the extent polygon gives a unique mapping.\n try:\n extent = self.geometry.extent\n name = self.grid_name\n res = self.dArea\n string = \"%s:%s:%s:%s\" %(self.parent_id,name,extent,res)\n return string\n except:\n logger.error(\"[biospatial.gbif.taxonomy.GriddedTaxonomy] \\n The total geometry area has not been defined. Try running mergeGeometries first\")\n raise Exception(\"Geometry Extent has not been instantiated\")\n return None", "def get_root_folder_details(client):\n try:\n root_folder = client.folder(folder_id='0').get()\n print(f\"The root folder is owned by: {root_folder.owned_by['login']}\")\n items = root_folder.get_items(limit=100, offset=0)\n print('This is the first 100 items in the root folder:')\n for item in items:\n print(\" \" + item.name, item.id)\n\n except Exception as e:\n print(f\"Error has occurred: {e}\")\n return None", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def getDataPart( job, data, lineStart ):\n\tposOne = data.find( lineStart )\n\tif posOne != -1:\n\t\tposOne = posOne + len( lineStart )\n\t\tposTwo = data.find( \"\\n\", posOne )\n\t\tsubstring = data[posOne:posTwo]\n\t\treturn substring\n\telse:\n\t\tjob.log(\"\\tCould not find \\\"\" + lineStart + \"\\\"\" )\n\t\treturn None", "def getDataPart( job, data, lineStart ):\n\tposOne = data.find( lineStart )\n\tif posOne != -1:\n\t\tposOne = posOne + len( lineStart )\n\t\tposTwo = data.find( \"\\n\", posOne )\n\t\tsubstring = data[posOne:posTwo]\n\t\treturn substring\n\telse:\n\t\tjob.log(\"\\tCould not find \\\"\" + lineStart + \"\\\"\" )\n\t\treturn None", "def getProjectRootFolder(self) -> ghidra.framework.model.DomainFolder:\n ...", "def name_folder_data(data):\n string = 'results/S0(%s)_sig(%s)_r(%s)_T(%s)_K(%s)' % data\n return string", "def __repr__(self):\n\n return self.datafile.file_path", "def subFolder(self):\r\n return self.__folder", "def directory(self):\n if not self.query[\"dataset\"]:\n raise ValueError(\"At least a dataset must be selected\")\n if not self.query[\"filter\"]:\n if self.query[\"item\"]:\n return \"{}/{}\".format(self.query[\"dataset\"], self.query[\"item\"])\n else:\n return self.query[\"dataset\"]\n else:\n if not self.query[\"item\"]:\n raise ValueError(\"You cannot set a filter without selecting an item\")\n else:\n return \"{}/{}/{}\".format(*self.query.values())", "def debug_guid(pe):\n if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):\n for i in pe.DIRECTORY_ENTRY_DEBUG:\n if hasattr(i.entry, 'Signature_Data1'):\n return '{:08x}-{:04x}-{:-4x}-{}-{}{}'.format(\n i.entry.Signature_Data1,\n i.entry.Signature_Data2,\n i.entry.Signature_Data3,\n hex_reverse(i.entry.Signature_Data4, 4),\n hex_reverse(i.entry.Signature_Data5, 4),\n hex_reverse(i.entry.Signature_Data6, 8)\n )\n return None", "def directory(self) -> str:\n return self._values.get(\"directory\")", "def __str__(self):\n basename = []\n for key, val in self.entities.items():\n if key not in ('prefix', 'suffix') and \\\n val is not None:\n _check_key_val(key, val)\n # convert certain keys to shorthand\n if key == 'subject':\n key = 'sub'\n if key == 'session':\n key = 'ses'\n if key == 'acquisition':\n key = 'acq'\n if key == 'processing':\n key = 'proc'\n if key == 'recording':\n key = 'rec'\n basename.append('%s-%s' % (key, val))\n\n if self.suffix is not None:\n basename.append(self.suffix)\n\n basename = '_'.join(basename)\n if self.prefix is not None:\n basename = op.join(self.prefix, basename)\n\n return basename", "def entity_group(self):\n return self.key().entity_group()", "def GetFeedParent(organization, project, folder):\n if organization:\n return 'organizations/{0}'.format(organization)\n if folder:\n return 'folders/{0}'.format(folder)\n return 'projects/{0}'.format(project_util.GetProjectNumber(project))", "def to_string(self):\n def index_to_string(index):\n \"\"\"Return printable version of index without any whitespace\"\"\"\n if index:\n s = \"/\".join(index)\n return Quote(s)\n else:\n return \".\"\n\n slist = [\"Volume %d:\" % self.volume_number]\n whitespace = \" \"\n slist.append(\"%sStartingPath %s %s\" %\n (whitespace, index_to_string(self.start_index), (self.start_block or \" \")))\n slist.append(\"%sEndingPath %s %s\" %\n (whitespace, index_to_string(self.end_index), (self.end_block or \" \")))\n for key in self.hashes:\n slist.append(\"%sHash %s %s\" %\n (whitespace, key, self.hashes[key]))\n return \"\\n\".join(slist)", "def displayname(self):\n if self.path.is_dir():\n if (is_uuid(self.path.parts[-1])):\n self.is_uuid_folder = True\n return self.path.name + '/'\n elif is_proj(self.path.parts[-1]):\n return f'{bcolors.BOLD}' + self.path.name + f'{bcolors.ENDC}'\n return self.path.name", "def get(self):\n result = self.session.get(urljoin(self.base_url, self.directory))\n if result.ok:\n data = result.json()\n\n if self.raw:\n return data\n\n if not self.query[\"item\"]:\n return data[\"ids\"]\n else:\n if self.query[\"filter\"] == \"links\":\n return data[\"links\"]\n else:\n return data.get(\"entry\", None)\n else:\n raise RuntimeError(\"Malformed Request\")", "def __repr__(self):\n\n return \"<Entry location=%s>\" % (self.name)", "def __str__(self):\n return '{0}'.format(self.path.name[8:], )", "def showId(self):\n extent = self.getExtent()\n id = self.parent_id\n levels = self.getLevels()\n prefix = settings.NESTED_TAXONOMY_PREFIX\n \n # name = prefix,id,levels,extent\n \n name = '%s:%s:%s:%s' %(prefix,id,levels,extent)\n return name", "def get_datastore_id(options):\n service_instance = get_vc_content(options)\n datastore = options.datastore\n datacenter = get_datacenter(options)\n for item in datacenter.datastoreFolder.childEntity:\n if (item.name == datastore):\n return item._GetMoId()", "def import_dir(self):\n return self._directory(1) # DIRECTORY_ENTRY_IMPORT", "def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))", "def directory_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"directory_id\")", "def __str__(self):\n return '{0}'.format(self.path.name[2:])", "def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2])", "def getExperimentEntry(ExpEntityData):\n\n # Validate that ExpEntityData actually corresponds to an Experiment Entity\n if ExpEntityData.Type != 'Experiment':\n errprint(\"\\nThe Entity Data does not represent the data of an experiment\")\n raise ValueError\n\n OutputLines = []\n OutputLines.append(\"\")\n OutputLines.append(\"- ID : {ID}\".format(ID=ExpEntityData.ID))\n OutputLines.append(\" Title : {Title}\".format(Title=ExpEntityData.Title))\n OutputLines.append(\" Description: |-2\")\n OutputLines += [\" \"+Line for Line in ExpEntityData.Description.splitlines()]\n OutputLines.append(\"\")\n OutputLines.append(\n \"{0:#<100}\".format(\"## End of Experiment {UID} \".format(UID=ExpEntityData.ID)))\n\n return \"\\n\".join(OutputLines)", "def entity_extract(self, eid):\n fname = os.path.join(\n self.data_dir_base, \"entities\", self.code, \"extracts\", f\"{eid}.txt\"\n )\n if os.path.exists(fname):\n with open(fname) as f:\n return \"\".join(f.readlines())\n return \"\"", "def __str__(self):\r\n if self.root is None:\r\n return 'Folder <root>'\r\n else:\r\n ind = self.pids.index(self.root)\r\n if self.ptypes[ind] == 'folder':\r\n return 'Folder (' + self.root + ')'\r\n elif self.ptypes[ind] == 'program':\r\n return 'Program (' + self.root + ')'", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def folder(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"folder\")", "def entry(self) -> Optional[str]:\n return pulumi.get(self, \"entry\")", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'Movies' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'Movies' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def prefix(self):\n return str(self.data)", "def getType(self):\n return consts.IMAGE_DATA_DIRECTORY", "def get_mds_fullname(node):\n return str(node.getFullPath()).lower()", "def media_entry_id(self):\n return self.getattr('media_entry_id')", "def entry_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry_group_id\")", "def entry_id(self):\n if self.lexid is not None:\n return self.lexid.split('.')[0]\n else:\n return None", "def _blob_folder(self):\r\n\r\n # extend with tenant_id and/or subscription_id if multi-tenant/subscription support required\r\n return f\"{self.cloud_folder}/{self.account_name}/{self.container_name}\"", "def head(self):\n return os.path.splitext(self.path)[0]", "def slipDirLabel(self):\n return '[' + ''.join(map(strIdx, self.dirIdc)) + ']'", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def folder(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"folder\")", "def _getEntityEndKey(entityId):\n return \"%s\\x1E\" % entityId", "def to_line_start(self):\n # type: () -> LineNo\n metadata = self.safely_parse_metadata()\n return metadata[-1][0]", "def dir_name(self):\n return self._dir", "def shpname(self):\n _, tail = os.path.split(self.url)\n return self.folder + ('/' + tail[:-4]) * 2", "def _ComponentFromDirmd(json_data, subpath):\n # If no component exists for the directory, or if METADATA migration is\n # incomplete there will be no component information.\n return json_data.get('dirs', {}).get(subpath,\n {}).get('monorail',\n {}).get('component', '')", "def get_standard_directory(self):\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')", "def directory_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"directory_id\")", "def directory_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"directory_id\")", "def directory_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"directory_id\")", "def get_parent_entry(self):\n\t\treturn handle_to_object(call_sdk_function('PrlFsInfo_GetParentEntry', self.handle))", "def get_definition(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "def getUniStr(self):\n return(\"%s/%s\"%(Entity.getUniStr(self),self.semanticId))", "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "def absolute_folder_name(self):\n return 'music_decompose/media/{0}'.format(self.media_folder_name)", "def identify_folder(self, folder):", "def getPath(self):\n return self.__folder", "def _get_root_metadata(self):\n r = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'root']),\n params={'select': 'id,name,fileSystemInfo'})\n return r.json()", "def GetRoot(self):\n\n return self.GetPath()", "def entity_id(self) -> str:\n return self._entity_id" ]
[ "0.5994919", "0.56029546", "0.5287925", "0.52766377", "0.5237696", "0.52345103", "0.5213454", "0.5185664", "0.517336", "0.51614946", "0.51094973", "0.51090777", "0.50988936", "0.50865364", "0.50664777", "0.5054286", "0.50433457", "0.5036197", "0.50105923", "0.4974974", "0.49746954", "0.49649084", "0.49560356", "0.4953408", "0.4924727", "0.49246567", "0.49189046", "0.4891939", "0.48900315", "0.48747793", "0.48613036", "0.48534465", "0.48420897", "0.48250324", "0.482078", "0.4819409", "0.48142925", "0.48103184", "0.48103184", "0.48101658", "0.48056757", "0.48012668", "0.47948527", "0.47929302", "0.47904834", "0.47761944", "0.47760272", "0.47570288", "0.4755332", "0.47535214", "0.47509405", "0.47418928", "0.47399217", "0.47354177", "0.4730052", "0.4728686", "0.4724261", "0.4717584", "0.47166818", "0.47154364", "0.47135174", "0.47128162", "0.47105202", "0.4707579", "0.46991926", "0.46991926", "0.46890032", "0.4676615", "0.46721694", "0.46721694", "0.46603885", "0.4659646", "0.46551678", "0.4648012", "0.4641675", "0.46415097", "0.4629915", "0.46288094", "0.4628476", "0.46211347", "0.46211347", "0.46183124", "0.46167812", "0.4610638", "0.46099824", "0.46095508", "0.46080077", "0.4593399", "0.4593399", "0.4593399", "0.4591363", "0.45901376", "0.45873773", "0.45873445", "0.45812997", "0.4577941", "0.45744", "0.45716584", "0.45685735", "0.45633057" ]
0.7627963
0
This returns a single entry corresponding to the Experiment Entity referred to by ExpEntityData. The returned string is given below (between Start and End) Start
def getExperimentEntry(ExpEntityData): # Validate that ExpEntityData actually corresponds to an Experiment Entity if ExpEntityData.Type != 'Experiment': errprint("\nThe Entity Data does not represent the data of an experiment") raise ValueError OutputLines = [] OutputLines.append("") OutputLines.append("- ID : {ID}".format(ID=ExpEntityData.ID)) OutputLines.append(" Title : {Title}".format(Title=ExpEntityData.Title)) OutputLines.append(" Description: |-2") OutputLines += [" "+Line for Line in ExpEntityData.Description.splitlines()] OutputLines.append("") OutputLines.append( "{0:#<100}".format("## End of Experiment {UID} ".format(UID=ExpEntityData.ID))) return "\n".join(OutputLines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string", "def entity_description(self, eid):\n entities = self._load_entities()\n return entities[eid][\"description\"]", "def entity_extract(self, eid):\n fname = os.path.join(\n self.data_dir_base, \"entities\", self.code, \"extracts\", f\"{eid}.txt\"\n )\n if os.path.exists(fname):\n with open(fname) as f:\n return \"\".join(f.readlines())\n return \"\"", "def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def _getEntityEndKey(entityId):\n return \"%s\\x1E\" % entityId", "def current_entity(self):\n return self.entities[len(self.device_data[CONF_ENTITIES])]", "def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def main_entity_of_page(self) -> str:\n return self._main_entity_of_page", "def entity_guid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entity_guid\")", "def test_entity(self):\n self.request.log(\"Hello World\", entities=(Entity(1337)(12, \"Demo\"),))\n self.request.end()\n entry = self.get_entry()\n assert 'entities' in entry\n assert len(entry['entities']) == 1\n assert entry['entities'][0] == dict(entity=1337, id=12, name=\"Demo\")", "def __str__(self) -> str:\n st = \"<Entity>: \\n{\\n\"\n for k, v in self._keys.items():\n if not isinstance(v, list):\n st += f\"\\t {k} = \\\"{v}\\\"\\n\"\n if self._fixup is not None:\n for k, v in self.fixup.items():\n st += f\"\\t ${k} = \\\"{v}\\\"\\n\"\n\n for out in self.outputs:\n st += f'\\t{out!s}\\n'\n st += \"}\\n\"\n return st", "def _entity_as_text(self):\n return str(self.value)", "def get_description(self):\n return \"It is an Entity.\"", "def exp_metadata(self) -> LabExperiment:\n\n return self._exp_metadata", "def GetEntity(self):\n\t\treturn self.acad.ActiveDocument.Utility.GetEntity()", "def entity_name(self):\n return self.__entity_name", "def eid(self):\n return self._json['coredata']['eid']", "def entity_key(mention):\n return mention.get('entityId')", "def entity_id(self) -> str:\n return self._entity_id", "def entity(self):\n return self._entity", "def entity(self):\n return self._entity", "def __repr__(self) -> str:\n desc: List[str] = []\n if classname := self['classname']:\n desc.append(classname)\n desc.append('Entity')\n if name := self['targetname']:\n desc.append(f'\"{name}\"({classname})')\n else:\n desc.append(classname)\n if hammerid := self['hammerid']:\n desc.append(f'#{hammerid}')\n if origin := self['origin']:\n desc.append(f'@ ({origin})')\n return f'<{\" \".join(desc)}>'", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def entity_search():\n data = {'EntityType': entity_type}\n parameters = data_to_json(data)\n url = base_url + 'ams/entity/search'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Records']\n return r_value", "def GetEntity(self):\n return self.__entity", "def entity_snippet(response):\n for result in response.results:\n e_set = extract_entities(result.summary)\n result.summary = ' '.join(e_set)\n return response", "def get_entity_name() -> str:\n return \"NewsItemEntity\"", "def experiment_name(self):\n # type: () -> string_types\n return self._experiment_name", "def GetDescription(cls):\n return textwrap.dedent('''\n This trace step includes a diagram of the Ego long. acceleration in the report.\n ''').strip()", "def entity_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entity_name\")", "def entity_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entity_name\")", "def entry(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry\")", "def entry(self) -> Optional[str]:\n return pulumi.get(self, \"entry\")", "def test_entity_default_name(self):\n self.request.log(\"Hello World\", entities=(Entity(\"entity\")(12),))\n self.request.end()\n entry = self.get_entry()\n assert 'entities' in entry\n assert len(entry['entities']) == 1\n assert entry['entities'][0] == dict(entity=\"entity\", id=12, name=\"12\")", "def _get_tooltip_text(ip_entity):\n return \"<br>\".join(\n str(line)\n for line in [\n ip_entity.Address,\n ip_entity.Location.City or \"Unknown city\",\n ip_entity.Location.CountryCode or \"Unknown country\",\n *(list(ip_entity.AdditionalData.items())),\n ]\n )", "def entity(self, rawbase_name, entity_id):\n url = \"%s/record/%s?id=%s\" % (self.api, rawbase_name, entity_id)\n return self.__get_request(url, 5)", "def get_etag(self): # real signature unknown; restored from __doc__\n return \"\"", "def __str__(self):\n outbuffer = []\n outbuffer.append(\"%d keys in dataset\" % len(self.__quantile))\n outbuffer.append(self.head())\n outbuffer.append(\"...\")\n outbuffer.append(self.tail())\n return \"\\n\".join(outbuffer)", "def entity(self, elem):\n return data.Entity(self, elem)", "def get_entity(endpoint):\n _entity, _id = parser_endpoint(endpoint)\n\n return _entity", "def entity(entity):\n for start, end in entity.spans:\n yield start, end-start", "def __repr__(self):\n return self._metadata.__str__()", "def get_entry_string(self):\n return f\"{self.get_time_string()} {self.mode} {self.radar}\"", "def id(self): # type: () -> str\n return self.inspection['Id']", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def get_text(adm, obj):\n return adm['data'][slice(*extent(obj))]", "def get_entity(obj):\n return obj.or_expression.and_expression.cmp_expression.arith_expression. \\\n mul_expression.unary_expression.pow_expression.primary_expression. \\\n entity", "def __repr__(self) -> str:\n output = f\"<Dataset(id={self.id}, name={self.dataset_name}\"\n output += f\"organisation name={self.organization_name},n\"\n output += f\"reference period={self.dataset_date}, update frequency={self.update_frequency}, \"\n output += f\"review_date={str(self.review_date)}, last_modified={str(self.last_modified)},\"\n output += f\"updated_by_script={str(self.updated_by_script)}, metadata_modified={str(self.metadata_modified)})>\"\n return output", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def etag(self) -> str:\n return pulumi.get(self, \"etag\")", "def _get_popup_text(ip_entity):\n return \"<br>\".join(\n str(line)\n for line in [\n ip_entity.Address,\n *(list(ip_entity.Location.properties.values())),\n *(list(ip_entity.AdditionalData.items())),\n ]\n )", "def pretty_print_entity(entity: tg.tl.TLObject) -> str:\n\n return bprint.bprint(entity, stream=str, skip_predicate=_bprint_skip_predicate)", "def exp_in(self) -> str:\n if self.inst_in:\n return 'instance:' + self.inst_in + ';' + self.input\n else:\n return self.input", "def get(self):\n return self.__expedition", "def __str__(self):\n s = \"Projection info:\\n\"\n s += \" #instances: \" + str(self.data_ninstances) + \"\\n\"\n s += \" data dimension: \" + str(self.data_dim) + \"\\n\"\n s += \" projection dimension: \" + str(self.projection_dim) + \"\\n\"\n s += \" data: \" + str(self.data[0]) + \"\\n\"\n s += \" \" + str(self.data[1]) + \"...\\n\"\n s += \" projection: \" + str(self.projection[0]) + \"\\n\"\n s += \" \" + str(self.projection[1]) + \"...\"\n return s", "def get_docstring(self, entity_lines):\n entity = ''.join(entity_lines)\n result = re.search(r'(?sm)\\\"\\\"\\\"(.*?)\\\"\\\"\\\"', entity)\n if result is None:\n return ''\n return self._trim(result.groups()[0])", "def details_string(self):\n parts = self.tree.xpath('span[position()=last()]//text()')\n\n return ''.join(parts)", "def entity_data(self, entity_name, limit=10):\n from sagas.ofbiz.entities import OfEntity as e, finder, record_list_df\n # limit = 10\n offset = 0\n result = finder.find_list(entity_name, limit, offset)\n result = record_list_df(entity_name, result, drop_null_cols=True, contains_internal=False)\n print(result)", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def etag(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"etag\")", "def entity_id(self):\n return self._entity_id", "def entity_id(self):\n return self._entity_id", "def entity_id(self):\n return self._entity_id", "def entity_id(self):\n return self._entity_id", "def entity_id(self):\n return self._entity_id", "def getFolderEntry(FolderEntityData):\n\n if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']:\n errprint('\\nThe given EntityData does not represent the data of a directory')\n raise ValueError\n\n OutputLines = []\n \n OutputLines.append(\"FolderID : {UID}\".format(UID=FolderEntityData.ID))\n OutputLines.append(\"ParentFolderID : {UID}\".format(UID=FolderEntityData.ParentID))\n OutputLines.append(\"FolderType : {Type}\".format(Type=FolderEntityData.Type))\n OutputLines.append(\"FolderTitle : {Title}\".format(Title=FolderEntityData.Title))\n OutputLines.append(\"FolderDescription: |-2\")\n OutputLines += [\" \"+Line for Line in FolderEntityData.Description.splitlines()]\n OutputLines.append(\"\")\n\n return \"\\n\".join(OutputLines)", "def getExpansion(self, data):\n pass", "def return_entity(self, entity, request, environ, start_response,\n response_headers, status=200, status_msg=\"Success\"):\n response_type = self.content_negotiation(\n request, environ, self.EntryTypes)\n if response_type is None:\n return self.odata_error(\n request, environ, start_response, \"Not Acceptable\",\n 'xml, json or plain text formats supported', 406)\n # Here's a challenge, we want to pull data through the feed by\n # yielding strings just load in to memory at the moment\n if response_type == \"application/json\":\n data = str('{\"d\":%s}' %\n ''.join(entity.generate_entity_type_in_json()))\n else:\n doc = core.Document(root=core.Entry)\n e = doc.root\n e.set_base(str(self.service_root))\n e.set_value(entity)\n data = str(doc)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n self.set_etag(entity, response_headers)\n start_response(\"%i %s\" % (status, status_msg), response_headers)\n return [data]", "def get_eco_details(self, pgn_data):\n result = eco_mapping['unknown']\n\n try:\n moves = self.get_moves(pgn_data)\n current_sequence = ''\n\n for move in moves:\n half_move = '.'.join([move[0], move[1]])\n current_sequence += half_move\n\n if current_sequence in eco_mapping:\n result = eco_mapping[current_sequence]\n else:\n break\n\n current_sequence = ' '.join([current_sequence, move[2]])\n\n if current_sequence in eco_mapping:\n result = eco_mapping[current_sequence]\n else:\n break\n\n current_sequence += ' '\n except:\n pass\n\n return result", "def E(self):\n return self._properties['E']", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def extract(self): # type: () -> str\n if self.end():\n return self._src[self._marker :]\n else:\n return self._src[self._marker : self._idx]", "def __str__(self):\n return self._str_hsp_header() + \"\\n\" + self._str_aln()", "def get_formed_entity(self, match, col_name):\n matched_str = match.groups()[0]\n location_start, location_end = match.span()\n formed_entity = {\n \"location_start\": location_start,\n \"location_end\": location_end,\n \"source\": matched_str\n }\n if self.result_columns:\n matched_rows = self.dataframe[self.dataframe[col_name].str.contains(r'(?:^|;){}(?:$|;)'.format(matched_str), regex=True)]\n if self.unique_column_values:\n matched_row = self.get_single_result(matched_rows)\n for _col_name, new_col_name in self.result_columns.items():\n formed_entity[new_col_name] = matched_row[_col_name]\n else:\n formed_entity[\"entities\"] = []\n for _, matched_row in matched_rows.iterrows():\n sub_entity = {}\n for _col_name, new_col_name in self.result_columns.items():\n sub_entity[new_col_name] = matched_row[_col_name]\n formed_entity[\"entities\"].append(sub_entity)\n\n formed_entity.update(self.preformed_entity)\n return formed_entity", "def aqGetExpName(self):\n return self._expname", "def showId(self):\n #Here I'm supposing that the name of the table, and the extent polygon gives a unique mapping.\n try:\n extent = self.geometry.extent\n name = self.grid_name\n res = self.dArea\n string = \"%s:%s:%s:%s\" %(self.parent_id,name,extent,res)\n return string\n except:\n logger.error(\"[biospatial.gbif.taxonomy.GriddedTaxonomy] \\n The total geometry area has not been defined. Try running mergeGeometries first\")\n raise Exception(\"Geometry Extent has not been instantiated\")\n return None", "def __str__(self):\n output = ['Tile ID {}'.format(self._tileid)]\n for ex, files in self._exposure_files.items():\n filenames = '- exposure {:08d}\\n'.format(ex)\n for f in files:\n filenames = '{} + {}\\n'.format(filenames, f)\n output.append(filenames)\n\n return '\\n'.join(output)", "def short(self):\n if len(self.entities) == 0:\n return '.'\n elif len(self.entities) == 1:\n return iter(self.entities).next().short()\n else:\n return '*'", "def __getitem__(self, key):\n return self.graph.readExtendedAttributes(self.entityId, key)[0]", "def __repr__(self) -> str:\n if not self.hass:\n return f\"<Entity {self.name}>\"\n\n return super().__repr__()", "def pt_effort(self):\n if not self.effort:\n return \"\"\n\n (effort, effort_unit) = self.effort\n unit_letter = effort_unit[0].upper()\n return f\"PT{effort:d}{unit_letter:s}\"", "def info():\n return r\"\"\"Lin-Yu Tseng and Chun Chen, \"Multiple trajectory search for Large Scale Global Optimization,\" 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210\"\"\"", "def get_meta_str(self):\n s = []\n t = \"%-32s: %s\"\n s.append(t % (\"Edition\", self._meta.get(\"edition\", \"---\")))\n s.append(t % (\"Master-table\", self._meta.get(\"master\", \"---\")))\n cc = self._meta.get(\"center\", \"---\")\n cs = self._meta.get(\"subcenter\", \"---\")\n if self._tables is not None:\n cc = self._tables.lookup_codeflag(1033, cc)\n cs = self._tables.lookup_codeflag(1034, cs)\n s.append(t % (\"Centre\", cc))\n s.append(t % (\"Sub-Centre\", cs))\n s.append(t % (\"Update sequence number\", self._meta.get(\"update\", \"---\")))\n s.append(t % (\"Type of data\", (\"observed\" if self._meta.get(\"obs\", 0) else \"other\")))\n dc = self._meta.get(\"cat\", \"---\")\n if self._tables is not None:\n dc = self._tables.lookup_common(dc)\n s.append(t % (\"Data category\", dc))\n s.append(t % (\"International data sub-category\", self._meta.get(\"cat_int\", \"---\")))\n s.append(t % (\"Local data sub-category\", self._meta.get(\"cat_loc\", \"---\")))\n s.append(t % (\"Version number of master table\", self._meta.get(\"mver\", \"---\")))\n s.append(t % (\"Version number of local table\", self._meta.get(\"lver\", \"---\")))\n s.append(t % (\"Most typical time\", self._meta.get(\"datetime\", \"---\")))\n s.append(t % (\"Optional section present\", (\"yes\" if self._meta.get(\"sect2\", False) else \"no\")))\n s.append(t % (\"Compression\", (\"yes\" if self._meta.get(\"comp\", False) else \"no\")))\n s.append(t % (\"Number of data subsets\", self._meta.get(\"subsets\", \"---\")))\n return \"\\n\".join(s)" ]
[ "0.6376407", "0.5688458", "0.56678385", "0.5637462", "0.5474638", "0.5404834", "0.5358794", "0.53286546", "0.5274664", "0.5274664", "0.52555805", "0.5246036", "0.51556396", "0.51428646", "0.51405764", "0.51387566", "0.51039517", "0.5103533", "0.5092884", "0.5080785", "0.50753295", "0.5066855", "0.5056109", "0.5056109", "0.5042214", "0.5036803", "0.5013337", "0.5006676", "0.5000942", "0.500042", "0.49913874", "0.4973963", "0.4909544", "0.4909544", "0.4903674", "0.48994052", "0.48765194", "0.48692822", "0.48606557", "0.48579344", "0.4857307", "0.48453587", "0.48303896", "0.48278096", "0.48216692", "0.47993743", "0.47958586", "0.47924137", "0.47918117", "0.47748196", "0.47612777", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.4742283", "0.47382092", "0.4738042", "0.47376662", "0.4736553", "0.47312146", "0.47299483", "0.47281304", "0.4726328", "0.4722655", "0.4722655", "0.4722655", "0.4722655", "0.4722655", "0.4722655", "0.4722655", "0.47183058", "0.47183058", "0.47183058", "0.47183058", "0.47183058", "0.47118053", "0.47117335", "0.47096872", "0.47037864", "0.47037515", "0.4703683", "0.4703167", "0.46994883", "0.4699065", "0.4697599", "0.46969026", "0.46942332", "0.46925518", "0.46919778", "0.46904963", "0.46789613", "0.46778828", "0.4672679" ]
0.81062454
0
a constructor for an EmployeeAccess object
def __init__(self): self.dbconnect = dbConnection.connection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, employee_id, name, supervisor_id, lft, rgt):\n self.employee_id = employee_id\n self.name = name\n self.supervisor_id = supervisor_id\n self.lft = lft\n self.rgt = rgt", "def __init__(self,name,empid,designation,experience):\n self.name = name\n self.empid = empid\n self.designation = designation\n self.experience = experience\n self.salary = self.cal_sal()", "def __init__(self, **kwargs):\n _declarative_constructor(self, **kwargs)", "def __init__(self, eid: str, name: str):\n self.eid = eid\n self.name = name", "def __init__(self, first_name, last_name, department, age=None, salary=0.0):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.department = department\n self.salary = salary", "def __init__(self, name, address, phone, badge, salary):\r\n\r\n self.name = name\r\n self.address = address\r\n self.phone = phone\r\n self.badge = badge\r\n self.salary = salary", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self,fname,lname,salary):\n self.first_name = fname\n self.last_name = lname\n self.salary = salary", "def setUp(self):\n self.my_employee = Employee('knight', 'lee', 10000)", "def __init__(self, first_name, last_name, annual_salary):\n\t\tself.first_name = first_name\n\t\tself.last_name = last_name\n\t\tself.annual_salary = annual_salary", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, first_name, last_name, age, title, department):\n super().__init__(first_name, last_name, age, title, department)\n self.privilages = Privileges()", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def __init__(self, first_name, last_name, age, sex, city):\n\t\tsuper().__init__(first_name, last_name, age, sex, city)\n\t\tself.privileges = Privileges()", "def setUp(self):\n\t\tself.mason = Employee(\"mason\",\"karsevar\",10000)", "def __init__(self, first, last, salary):\n\t\tself.first = first \n\t\tself.last = last \n\t\tself.salary = salary", "def __init__(self, first_name, last_name, address):\n\n self.first_name = first_name\n self.last_name = last_name\n self.address = address", "def __init__(self, enclosures, employees):\n self.enclosures = enclosures\n self.employees = employees\n self.speciesList = [Monkey.getSpeciesInfo(), Gorilla.getSpeciesInfo(), PolarBear.getSpeciesInfo()]", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, store_id, name, address, city, state, zipcode, phone):\n self.store_id = store_id # 'id' is already a built-in attribute\n self.name = name\n self.address = address\n self.city = city\n self.state = state\n self.zipcode = zipcode\n self.phone = phone", "def __init__(self, linearExpression, indexingExpression, numericExpression = None):\n \n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression\n self.indexingExpression = indexingExpression\n self.numericExpression = numericExpression", "def __init__(self,database_access_object):\n #self.db = dbaccess(fn)\n self.db = database_access_object\n self.datastore_to_process = None\n self.datastore_process_index = -1", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def createEmployee(firstName, lastName, ssn, salary):\n employee = Employee(firstName, lastName, ssn, salary)\n # verify\n if firstName != employee.firstName or \\\n lastName != employee.lastName or \\\n ssn != employee.ssn or \\\n salary != employee.salary:\n raise ValueError(\"Failed to initialize Employee\")\n return employee", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n self.email = \"abc\"\n self.age = 20", "def __init__(self, description, detail, identifier, lifecycle,\n name, query, reference, role, securityLabel, type,):\n self.description = description\n self.detail = detail\n self.identifier = identifier\n self.lifecycle = lifecycle\n self.name = name\n self.query = query\n self.reference = reference\n self.role = role\n self.securityLabel = securityLabel\n self.type = type", "def __init__(self, seqrepo_access: SeqRepoAccess) -> None:\n self.seqrepo_access = seqrepo_access", "def __init__(self, field_names=..., **kwargs) -> None:\n ...", "def __init__(self, espec_izquierda, espec_derecha):\n self._izquierda = espec_izquierda\n self._derecha = espec_derecha\n return", "def __init__(self, first_name, last_name):\r\n\r\n self.first_name = first_name\r\n self.last_name = last_name", "def __init__(self, name):\n self.diary_id = len(DiaryModel.diary)+1\n self.name = name\n self.date_created = datetime.datetime.utcnow()\n self.date_modified = None", "def create_employee(attributes):\n neccessary_keys = [\"empid\", \"gender\", \"sales\", \"bmi\", \"salary\", \"birthday\",\n \"age\"]\n for key in neccessary_keys:\n if not key in attributes.keys():\n raise ValueError(\"employee could not be created: {} is missing\".format(key))\n return Employee(attributes[\"empid\"], attributes[\"gender\"],\n attributes[\"sales\"], attributes[\"bmi\"],\n attributes[\"salary\"], attributes[\"birthday\"],\n attributes[\"age\"])", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def __init__(self, first_name, last_name):\r\n self.first_name = first_name\r\n self.last_name = last_name", "def __init__(self, nombre_depto, id_depto):\n self.nombre_depto = nombre_depto\n self.id_depto = id_depto\n self.empleados = []", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, **kwargs):\n\t\tself._name = None\n\t\tself._username = None\n\t\tself._password = None\n\t\tself._context = self", "def __init__(self, pd_obj, pd_obj_exp):\n super(Exponential, self).__init__()\n self.pd_obj = pd_obj\n self.pd_obj_exp = pd_obj_exp\n self.pd_obj_out = None", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, reference = -1, objtype = None, classmodule = 0, name = ''):\n self.objectreference = reference # the index in the Python storage where the Basic object is stored\n self.objecttype = objtype # ('SF_String', 'DICTIONARY', ...)\n self.classmodule = classmodule # Module (1), Class instance (2)\n self.name = name # '' when no name\n self.internal = False # True to exceptionally allow assigning a new value to a read-only property\n self.localProperties = [] # the properties reserved for internal use (often empty)", "def setUp(self):\n\t\tfirst_name = 'Gerson'\n\t\tlast_name = 'Santos'\n\t\tannual_salary = 5000\n\t\tself.gerson = Employee(first_name, last_name, annual_salary)", "def __init__(self, name, age):\r\n self.name = name\r\n self.age = age", "def __init__(self, exciton_obj=None, dipole_term=False):\n self.exciton_obj = exciton_obj\n self.file_storage = exciton_obj.file_storage\n self.k_grid = exciton_obj.k_grid\n self.a1, self.a2 = exciton_obj.a1, exciton_obj.a2\n self.n_spins = exciton_obj.n_spins\n self.n_orbs = exciton_obj.n_orbs\n self.use_dipole_term = dipole_term", "def __init__(self, linearExpression):\n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression", "def __init__(self, linearExpression):\n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression", "def __init__(\n self, user_id: int, data_access_layer: AbstractDataAccessLayer, logger\n ) -> None:\n self._log = logger\n self._dal = data_access_layer\n self._user_id = user_id", "def __init__(self, exclusives_mapping):\r\n if not isinstance(exclusives_mapping, ExclusivesMapping):\r\n raise ValueError('An ExclusivesMapping is required, given %s of type %s'\r\n % (exclusives_mapping, type(exclusives_mapping)))\r\n self._exclusives_mapping = exclusives_mapping", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, birthday, username):\n self.first_name = first_name\n self.last_name = last_name\n self.birthday = birthday\n self.username = username\n self.login_attempts = 0\n self.age = self.set_birthday()", "def __init__(self, *, user_data, department) -> None:\n self._user_data: User = user_data\n self._department: str = department", "def __init__(self, dao):\n RAMSTKDataModel.__init__(self, dao)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n\n # Initialize private scalar attributes.\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, \n # - Arguments from Person\n given_name=None, initials=None, family_name=None, \n email_address=None,\n # - Other staff-specific arguments\n department=None\n ):\n # - We can use super() to call the parent class' __init__ \n # because there's only one parent class...\n super().__init__(\n given_name, initials, family_name, email_address\n )\n # - But we ALSO need to initialize properties that are \n # members of THIS class\n self.department = department", "def __init__(self, firstname, lastname):\n self.firstname = firstname\n self.lastname = lastname", "def __init__(self, name, hall_ID, password, monthly_salary,\n rebuild=False, worker_ID=None):\n\n # The rebuild flag, if true, denotes that the object is being made from\n # data already present in the database\n # If False, a new data row is added to the specific table\n if not rebuild:\n self.worker_ID = db.add(\"worker\")\n db.update(\"worker\", self.worker_ID, \"worker_type\", \"M\")\n self.password = password\n else:\n self.worker_ID = worker_ID\n self._password = password\n\n self.monthly_salary = monthly_salary\n worker.Worker.__init__(self, self.worker_ID, name, hall_ID)", "def __init__(self, first_name, last_name, age):\n\t\tself.first_name = first_name\n\t\tself.last_name = last_name\n\t\tself.age = age\n\t\tself.login_attempts = 0", "def __init__(self, name, age):\n\t\t# self.name is an object variable\n\t\tself.name = name\n\t\t#error checking\n\t\tif age < 0:\n\t\t\t# the raise keyword is how our programs can raise errors\n\t\t\traise ValueError(\"Age cannot be negative\")\n\t\tself.age = age", "def __init__(self,first_name,last_name,password):\n self.first_name = first_name\n self.last_name = last_name\n self.password = password", "def __init__(self, first_name, last_name, age, gender):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.gender = gender", "def __init__(self, name, alias=None, columns=None, is_root_table=False):\n self.name = name\n self.alias = alias\n self.columns = columns or []\n self.is_root_table = is_root_table", "def __init__(self, first, last, email, grade):\n self.first_name = first\n self.last_name = last\n self.email = email\n self.grade = grade", "def __init__(self, login, password, organization):\n self.login = login\n self.password = password\n self.organization = organization", "def __init__(self, base, kwargs, events, extensions=[]):\n\n self.base = base\n self.kwargs = kwargs\n self.events = events\n self.extensions = list(extensions)\n\n self.db = OrderedDict()", "def __init__(self):\n self._db = db\n # Connect to DB\n self._db.connect()\n # Create tables\n self._db.create_tables([Teachers, Parents, Tutors, Students, Homework, Groups, StudentsGroups, Courses])\n # Create filling entries\n self.__create_dummies()\n self._db.close()", "def __init__(__self__, *,\n access_string: pulumi.Input[str],\n engine: pulumi.Input[str],\n user_id: pulumi.Input[str],\n user_name: pulumi.Input[str],\n authentication_mode: Optional[pulumi.Input['UserAuthenticationModeArgs']] = None,\n no_password_required: Optional[pulumi.Input[bool]] = None,\n passwords: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"access_string\", access_string)\n pulumi.set(__self__, \"engine\", engine)\n pulumi.set(__self__, \"user_id\", user_id)\n pulumi.set(__self__, \"user_name\", user_name)\n if authentication_mode is not None:\n pulumi.set(__self__, \"authentication_mode\", authentication_mode)\n if no_password_required is not None:\n pulumi.set(__self__, \"no_password_required\", no_password_required)\n if passwords is not None:\n pulumi.set(__self__, \"passwords\", passwords)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def __init__(self, filename, exoid, offset=1):\n self.filename = filename\n self.exoid = exoid\n self._o = offset\n\n pass", "def __init__(self, dataset_name, teacher_model, students_model):\n self.data_manager = DataManager(dataset_name)\n self.dataset_name = dataset_name\n self.teacher_model = teacher_model\n self.student_model = students_model", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def __init__(self, userId=None, password=None, **kwargs):\n object_cache = ExtendedObjectCache()\n object_cache.setduration(days=10)\n if 'cache_location' in kwargs:\n object_cache.setlocation(kwargs[\"cache_location\"])\n\n self.client = suds.client.Client(self.WSDL_URL,\n cache=object_cache,\n transport=WellBehavedHttpTransport())\n\n self.authentication = self.create('AuthenticationType')\n if userId and password:\n self.authentication.userId = userId\n self.authentication.password = password", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def __init__(self, name, race, sex, age):\n self.Race = race\n self.Sex = sex\n self.Age = age\n self.Name = name", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, *args, **kwargs):\n raise NotImplementedError()", "def __init__(self, student, start_date, day_periods):\n self.student = student\n self.start_date = start_date\n self.day_periods = day_periods\n self.student_name = student.full_name_lastname_first(\n show_middle_name=False)\n self.student_gender= student.gender\n self.student_attendance_record = self.student.attendance", "def __init__(self, modoConsulta = none, dictConsulta = none):\n\n self.__modoConsulta = modoConsulta\n self.__dictConsulta = dictConsulta", "def __init__(self, first_name=\" \", last_name=\" \", company_name=\" \", address=\" \", city=\" \", county=\" \", state_code=\" \", zip_code=0, phone_number=\" \", phone_number_2=\" \", email_address=\" \"):\n \n self.first_name = first_name\n self.last_name = last_name\n self.crm_company_name = \"\"\n self.company_name = company_name\n self.address = address\n self.city = city\n self.county = county\n self.state_code = state_code\n self.zip_code = zip_code\n self.phone_number = phone_number\n self.phone_number_2 = phone_number_2\n self.email_address = email_address", "def __init__(self, name, list_countries,list_sectors,list_products,EORA=False):\n self.name = name\n self.m = ConcreteModel()\n self.countries = list_countries\n self.total_countries = len(list_countries)\n self.sectors = list_sectors\n self.products = list_products\n \n if EORA is True:\n self.EORA = True\n else:\n self.EORA = False", "def __init__(\n self, datetime,\n provider, asset_license,\n ext_properties\n ):\n self.ext_properties = ext_properties\n self.license = asset_license\n self.provider = provider\n self.datetime = datetime", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def employee(self, employee: object):\n\n self._employee = employee" ]
[ "0.6591341", "0.65801567", "0.616899", "0.61554563", "0.6011643", "0.5887966", "0.58608353", "0.58440644", "0.5821259", "0.581591", "0.5810037", "0.5808263", "0.57878995", "0.57751787", "0.57714325", "0.57638586", "0.57473", "0.5722684", "0.5716975", "0.57147384", "0.5699858", "0.5697087", "0.5690169", "0.5690169", "0.5690169", "0.5679319", "0.567308", "0.56708515", "0.56615996", "0.56615996", "0.56615996", "0.56615996", "0.56615996", "0.56615996", "0.56615996", "0.56615996", "0.5659846", "0.563801", "0.5636149", "0.56263185", "0.56214875", "0.56187296", "0.5611212", "0.56041753", "0.5583925", "0.5583765", "0.55820113", "0.55796194", "0.5578211", "0.5561543", "0.5561543", "0.5561543", "0.5561543", "0.5550509", "0.55489755", "0.55489755", "0.55489755", "0.55475277", "0.5546475", "0.55434895", "0.55355394", "0.55328923", "0.55328923", "0.55277777", "0.5520759", "0.5519804", "0.55071187", "0.5502684", "0.5496235", "0.5490242", "0.5490242", "0.5490242", "0.5490242", "0.5489406", "0.5483116", "0.54830134", "0.54774404", "0.54696465", "0.54684126", "0.54659665", "0.546592", "0.5465163", "0.5454873", "0.54529554", "0.5450361", "0.54476887", "0.54454166", "0.54451144", "0.5432682", "0.5427183", "0.5426779", "0.5413654", "0.54117817", "0.54043233", "0.54041684", "0.5396267", "0.53956276", "0.53955716", "0.5395324", "0.5391231", "0.5387627" ]
0.0
-1
get all the employees out of the database
def get_employees(self): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('select * from employee') employees = list() for row in cursor: employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) employees.append(employee) return employees
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def getEmployees(self):\n return self.employees", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_employees(self):\n return self.employees", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def employees(self) -> object:\n return self._employees", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def employee_works_in(employee_id: int) -> List[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT department\n FROM EmployeeDepartments\n WHERE EmployeeDepartments.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch all\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n departments = []\n for row in result:\n departments.append(\n row[0]\n )\n\n cur.close()\n conn.close()\n return departments\n except Exception as e:\n print(\"ddd\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_employees_directory(self):\n response = requests.get(self._base_url + \"employees/directory\",\n auth=(self._api_key, \"pass\"),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n emps_json = json.loads(response.text)['employees']\n return {int(e['id']): Employee(e['displayName'],\n e['firstName'],\n e['lastName'],\n e['nickname']) for e in emps_json}", "def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees", "def scan_table(self,expression=''):\n response = self.table.query(KeyConditionExpression=Key(\"Employeeid\").eq(int(expression)))\n print(response['Items'])\n df = pd.DataFrame(response['Items'], index=[0])\n print(df.head())\n return df", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def show_all():\n\n QUERY = \"\"\"\n SELECT first_name, last_name, github\n FROM students\n \"\"\"\n\n db_cursor = db.session.execute(QUERY)\n\n rows = db_cursor.fetchall()\n\n return rows", "def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def query_table(self, expression = ''):\n response = self.table.scan(FilterExpression = Attr(\"Employeeid\").gt(int(expression)))\n df = pd.DataFrame(response['Items'])\n print(df.head(20))\n return df", "def get_people(self):\n cursor = self.cur()\n cursor.execute('SELECT * FROM {tn} '.format(tn=\"person\"))\n all_people = cursor.fetchall()\n return all_people", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def query_employee_skill(self):\n\n query = \"select Skill_Descrpt, Emp_Fname, Emp_Lname from \" \\\n \"skill, employee, empskill \" \\\n \"where employee.Emp_ID = empskill.Emp_ID \" \\\n \"and skill.Skill_ID = empskill.Skill_ID \"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def employees(self, employees: object):\n\n self._employees = employees", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def employers_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def Fetch_all(self):\n\n try:\n query = \"select * from user\"\n cur = self.con.cursor()\n cur.execute(query)\n for row in cur:\n print(\"User Id : \", row[0])\n print(\"User Name : \", row[1])\n print(\"User Phone : \", row[2])\n print()\n except Exception as e:\n logger.error(\"Error occured at data selection..\", e)", "def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports", "def query_all():\n\tstudents = session.query(Student).all()\n\treturn students", "def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def select_all_persons(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM person\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list", "def name_get(self):\n res = []\n for employee in self:\n name = employee.name\n name = ' '.join([name or '', employee.middle_name or '', employee.last_name or ''])\n res.append((employee.id, name))\n return res", "def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def get_employee_query(where=None):\n query = (\n db.session.query(Employee)\n .outerjoin(Department, Department.id == Employee.department_id)\n .with_entities(\n Department.name.label(\"department_name\"),\n Employee.id,\n Employee.name,\n Employee.date_birth,\n Employee.salary,\n Employee.department_id,\n )\n )\n if where is None:\n return query\n\n return query.filter(where)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def all_exercises(self):\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercises(\n row[0], row[1]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.Name,\n e.Language\n from Exercise e\n \"\"\")\n\n all_exercises = db_cursor.fetchall()\n\n for exercise in all_exercises:\n print(exercise)", "def list(self):\n # Grupos en los que el usuario formo parte\n curso = self.get_curso_actual()\n entregadores = identity.current.user.get_entregadores(curso)\n r = cls.select(IN(cls.q.entregador, entregadores), orderBy=-Entrega.q.fecha)\n return dict(records=r, name=name, namepl=namepl, limit_to=identity.current.user.paginador)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def filter_employees(self, searchQuery=\"\", researchGroup=\"\", promotor=0, ):\n from Employee import Employee\n try:\n cursor = self.dbconnect.get_cursor()\n\n sql = 'select * from employee e INNER JOIN researchGroup r ON r.groupID=e.researchGroup WHERE ' \\\n 'e.name LIKE %(searchQueryQ)s'\n\n if researchGroup != \"\":\n sql += \"AND r.name = %(researchGroupQ)s\"\n\n if promotor == 1:\n sql += 'AND e.promotor = TRUE'\n if promotor == 2:\n sql += 'AND e.promotor = FALSE'\n\n cursor.execute(sql, dict(searchQueryQ=\"%\" + searchQuery + \"%\", researchGroupQ=researchGroup))\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees\n except:\n self.dbconnect.rollback()\n raise Exception('unable to filter employees')", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def load_employees(file_path):\n\temployees = []\n\tfor line in open(file_path):\n\t\temployee = Employee.employee_from_insert_stmnt(line)\n\t\tif employee:\n\t\t\temployees.append(employee)\n\treturn employees", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def entity_data(self, entity_name, limit=10):\n from sagas.ofbiz.entities import OfEntity as e, finder, record_list_df\n # limit = 10\n offset = 0\n result = finder.find_list(entity_name, limit, offset)\n result = record_list_df(entity_name, result, drop_null_cols=True, contains_internal=False)\n print(result)", "def all_exercises(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Exercise(row [1], row [2])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select e.id,\n e.name,\n e.language\n \n from exercises e\n order by e.language\n \"\"\")\n\n all_exercises = db_cursor.fetchall()\n print('\\n***All Exercises***')\n for exercise in all_exercises:\n print(exercise)", "def get_unique_employees():\n unique_names = []\n\n for entry in Entry.select():\n if entry.employee_name not in unique_names:\n unique_names.append(entry.employee_name)\n\n clear()\n return unique_names", "def _getCadastroEmpregos(self, id_cadastro):\n return self.execSql(\"select_cadastro_empregos\",\n id_cadastro=int(id_cadastro))", "def query_worklog(self, emp_id=None):\n\n query = \"select * from worklog\"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_elements(self):\n query = f\"select name, middle_name, last_name, age from `{self.table_id}`\"\n query_job = self.client.query(query)\n clients = []\n for row in query_job:\n print('Name', row['name'], 'middle name:', row['middle_name'], 'last name: ',row['last_name'], 'age:', row['age'])\n clients.append(Client(row['name'],row['middle_name'],row['last_name'],row['age']))\n return clients", "def get_departments() -> list:\n return Department.query.all()", "def get_employees_by_date_of_birth(cls, date, strategy=lazyload):\n cls._check_strategy(strategy)\n\n employees = db.session.query(Employee).options(\n strategy(Employee.department)\n ).filter_by(\n date_of_birth=date\n ).all()\n return employees", "def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned", "def loadEmployees(testList):\n # define an empty employee List\n employeeList = []\n\n for item in testList:\n itemToAdd = None\n if item['type'] == 'employee':\n try:\n itemToAdd = createEmployee(item['firstName'],\n item['lastName'],\n item['SSN'],\n item['salary'])\n except ValueError:\n continue\n\n elif item['type'] == 'manager':\n try:\n itemToAdd = createManager(item['firstName'],\n item['lastName'],\n item['SSN'],\n item['salary'],\n item['title'],\n item['yearBonus'])\n except ValueError:\n continue\n # Add Employee/Manager Object to List\n if itemToAdd != None: # Note : this line will call Employee __eq__ to verify that it is not equal to None\n employeeList.append(itemToAdd)\n\n return employeeList", "def get(self):\n\n users = [marshal(user, user_fields) for user in models.ExerciseUser.select()]\n\n return users", "def list_people():\n\n person_list = []\n for person in person_database:\n person_list.append(person)\n return person_list", "def working_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n working_employees_list = []\r\n line_list = []\r\n\r\n for i,line in enumerate(work_trips_by_date): \r\n\r\n for line in employee_list:\r\n if line[0] in work_trips_by_date[i]:\r\n working_employees_list.append(line[2]+','+line[6]+','+work_trips_by_date[i][0])\r\n \r\n return working_employees_list", "def get_payees(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get payees from database\n cur.execute(\"SELECT * FROM payees\")\n payees_data = cur.fetchall()\n\n # convert into a list of payee dictionaries\n payees_list = []\n [payees_list.append({'payee_id': payee[0],\n 'payee_name': payee[1]})\n for payee in payees_data]\n\n # close the cursor\n self.close_cursor()\n\n return payees_list", "def select_all_meetings(self):\n db_connection = DbConnection()\n\n try:\n connection = db_connection.get_connection()\n\n cursor = connection.cursor()\n cursor.execute(self.select_all_sql)\n rows = cursor.fetchall()\n\n cursor.close()\n db_connection.close_connection()\n except Exception:\n raise\n\n else:\n\n return rows", "def filter_by_employee(table, employee_id):\n operations = []\n employee_id_index = 1\n for record in table:\n id = record[employee_id_index]\n if id == employee_id:\n operations.append(record)\n return operations", "def read_all():\n # Create the list of users from our data\n users = User.query.order_by(User.first_name).all()\n\n # Serialize the data for the response\n user_schema = UserSchema(many=True)\n data = user_schema.dump(users)\n return data", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def get_all_companies_and_people():", "def getELUsers(**kwargs):\n \n for key in kwargs:\n if type(kwargs[key]) == list:\n kwargs[key] = kwargs[key][0]\n \n allELUsers = ELUser.ELUser.all(**kwargs)\n allELUsersDictionaries = [dict(eluser) for eluser in allELUsers if dict(eluser)]\n \n return flask.Response(\n response = json.dumps(allELUsersDictionaries),\n status = 200,\n content_type = 'application/json'\n )", "def get_employeeProjects(self, id):\n from Project import Project\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select project from projectpromotor where employee=%s', (id,))\n\n projectsId = list()\n for row in cursor:\n projectsId.append(row[0])\n\n projects = list()\n for projId in projectsId:\n cursor.execute('select * from project where projectID=%s',\n (projId,)) # returns exactly one row from the table\n row = cursor.fetchone()\n project = Project(row[0], row[1], row[2], row[3])\n\n cursor.execute('select year from projectYearConnection where projectID=%s', (projId,))\n\n years = list()\n for row in cursor:\n years.append(row[0])\n\n project.activeYear = years\n\n projects.append(project)\n\n return projects", "def set_employees_by_id(department_id):\n return Employee.query.filter_by(department_id=department_id)", "def fetch_all(self):\n with self.__connection.cursor() as cursor:\n query = \"SELECT * FROM %s\" % self.__schema\n cursor.execute(query)\n return cursor.fetchall()", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def extension2() -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT ED.department, SUM(R.cost)\n FROM Repair R JOIN Device D USING(deviceID)\n\t JOIN EmployeeDeparments ED ON (D.issuedTo = ED.empId)\n GROUP BY ED.department\n ORDER BY R.cost DESC, ED.department ASC\"\"\"\n cur.execute(sql, ())\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n # If employee info returns nothing, return none\n if result == None:\n cur.close()\n conn.close()\n return []\n\n costs = []\n for row in result:\n costs.append(\n [row[0], row[1]]\n )\n\n cur.close()\n conn.close()\n return costs\n except Exception as e :\n print(\"ex2\")\n print(e)\n # If something went really wrong\n cur.close()\n conn.close()\n return None", "def employee(self) -> object:\n return self._employee", "def all():\n session = session_maker(\n app.config['MYSQL_USER'], app.config['MYSQL_PASS'], app.config['MYSQL_SERVER_PORT_3306_TCP_ADDR'],\n app.config['MYSQL_SERVER_PORT_3306_TCP_PORT'], app.config['DB'])\n\n print(\n tabulate(\n selection_list_all(session),\n headers=['number', 'sqlid', 'name', 'city', 'state']))", "def get_all_labs():\n return Lab.query.all()", "def get_records(self):\n logging.debug('Return all records in table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"\"\"SELECT * FROM {}\"\"\".format(self._name))\n rows = self._cursor.fetchall()\n\n records = []\n for r in rows:\n record = {'date': r['date'],\n 'time': r['time'],\n 'location': r['location'],\n 'nodeID': r['nodeID']}\n logging.info('{}|{}|{}|{}'.format(r['date'],r['time'],r['location'],r['nodeID']))\n records.append(record)\n\n return records", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_mentee_list():\n # Get db object and users table\n db = get_db()\n users = db.users\n \n # Search database for mentees\n cursor = users.find({\"role\": \"Mentee\"})\n \n context = {'mentees': []}\n \n for document in cursor:\n temp = document\n del temp['_id']\n context['mentees'].append(temp)\n \n context['url'] = \"/api/v1/mentees/\"\n return flask.jsonify(**context)", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })" ]
[ "0.7986589", "0.78798616", "0.7810305", "0.7761145", "0.771737", "0.7529015", "0.7422921", "0.722801", "0.7137951", "0.71137774", "0.7091648", "0.7014968", "0.6984071", "0.6906787", "0.6850874", "0.67577934", "0.6755647", "0.67153805", "0.66789347", "0.6564769", "0.65336215", "0.64868057", "0.6434089", "0.6367211", "0.63359034", "0.6327923", "0.62664163", "0.6265159", "0.61977154", "0.6197423", "0.6197247", "0.6153945", "0.6145145", "0.60842437", "0.6084099", "0.6078474", "0.607111", "0.6051459", "0.6030666", "0.6029221", "0.6028791", "0.6010459", "0.60092396", "0.60018516", "0.59813154", "0.59806633", "0.59776014", "0.59696937", "0.5959942", "0.5952042", "0.5949968", "0.5920766", "0.5917323", "0.5917168", "0.591084", "0.5897956", "0.5890603", "0.5884979", "0.5879641", "0.5851665", "0.5839181", "0.5830206", "0.5829004", "0.5818768", "0.5792382", "0.57917666", "0.57642305", "0.57562214", "0.57279354", "0.5725814", "0.5718817", "0.57069653", "0.5702767", "0.56984746", "0.5665086", "0.5662372", "0.5656586", "0.56552947", "0.5639041", "0.5638279", "0.5593978", "0.5592852", "0.55923086", "0.5576849", "0.5575022", "0.55655557", "0.55624104", "0.5552818", "0.55499786", "0.5539786", "0.552395", "0.5513827", "0.5508377", "0.54972917", "0.54838157", "0.54811084", "0.54771143", "0.5476723", "0.5471209", "0.5468092" ]
0.8586788
0
this function gets all the admins from the database
def get_admins(self): from Employee import Employee admins = list() cursorRoles = self.dbconnect.get_cursor() cursorRoles.execute('select * from employeeRoles where role=\'admin\'') for row in cursorRoles: admins.append(self.get_employee(row[0])) return admins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)", "def get_admins(self):\n return self.admins_group.user_set.all()", "async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "async def _ad_all(self, ctx):\n all_admins = self.database.get_all_admins()\n consumed = []\n out = \"```\"\n for admin in all_admins:\n if admin.guild_id not in consumed:\n out += f\"Guild: {self.bot.get_guild(admin.guild_id)}\\n\"\n consumed.append(admin.guild_id)\n admin = self.bot.get_user(admin.user_id)\n admin = str(admin) if admin is not None else admin.user_id\n out += f\" {admin}\\n\"\n if out != \"```\":\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"No admins currently\")", "def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def __update_admin_cache(self):\n\n header = connect(self.__path)\n curs = header.cursor()\n curs.execute(\"SELECT * FROM admins WHERE id IS NOT NULL\")\n data = curs.fetchall()\n newlist = []\n for item in data:\n newlist.append(item[0])\n self.__admins = newlist", "def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response", "def admins_index(_):\n return {\"admin_users\": [u.username for u in models.User.admins()]}", "def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users", "def get_admin_users(self):\r\n try:\r\n users = self.list_all(\"users\")\r\n users_admin = [user for user in users if user[\"role\"] == \"admin\"]\r\n return users_admin\r\n except PDClientError as e:\r\n raise e", "def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins", "def get_admins(self, uid):\n admin_data = self.list_admin_roles(uid)\n admins = []\n for admin in admin_data:\n admins.append(\n ZenossDeviceManagementAdmin(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n admin\n )\n )\n\n return admins", "def get_drink_admins(self):\n admins = self.group('drink')\n return admins", "def get_admin_users() -> User:\n return User.objects.filter(group__name__contains=\"admin\")", "def getPermsOfAdmin(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\")\n admin_perms=admin_main.getLoader().getAdminByName(request[\"admin_username\"]).getPerms()\n perms_list=self.__getPermsListFromAdminPerms(admin_perms)\n sorted=SortedList(perms_list)\n sorted.sortByPostText('[\"name\"]',0)\n return sorted.getList()", "def admins(message):\n hf.query_users(message, hf.get_users(), \"admin\")", "def get_all_npf_admins(self):\n npf_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_org():\n npf_admins.append(user.user)\n return npf_admins", "def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))", "def get_administrators(self, *args, **kwargs):\n return self.bot.get_chat_administrators(self.id, *args, **kwargs)", "def get_all_biz_admins(self):\n biz_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_biz():\n biz_admins.append(user.user)\n\n return biz_admins", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def get_all_users_for_admin_purposes(connection):\r\n with connection:\r\n return connection.execute(GET_ALL_USERS).fetchall()[1]", "def get_all_users():", "def getAdmin():", "def organization_get_admins_no_login(self, client, id):\n assert client.get('/organizations/' + id + '/admins',\n headers={}).status == '400 BAD REQUEST'", "def get_for_admin(self, admin):\n if admin.is_superuser:\n return self.get_query_set()\n return self.get_query_set().filter(owners__user=admin)", "def __reloadAdmins(self, admin_id):\n for admin_username in admin_main.getLoader().getAllUsernames():\n try:\n admin_obj=admin_main.getLoader().getAdminByName(admin_username)\n if admin_obj.creator_id == admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n else:\n for lock_obj in admin_obj.getLocks():\n if lock_obj.getLockerID()==admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n break\n except:\n logException(LOG_DEBUG)", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def get_admin_list(host):\n users = query(\"$.host.'{host}'.admin\", host=host)\n if isinstance(users, (str, unicode)):\n users = users.replace(', ', ' ').replace(',', ' ').split(' ')\n return users or []", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def drinkAdmins(self):\n admins = self.group('drink')\n return admins", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def get_for_admin(self, admin):\n if admin.is_superuser:\n return self.get_queryset()\n return self.get_queryset().filter(owners__user=admin)", "def getAdminData(self):\n return getAdminData(self)", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def list_users(self):\n return self.get_admin(\"users\")", "def get_gadm_list():\n cur = g.db.execute('select id_user from user_group where gadm == 1', [uid])\n gadm = [row[0] for row in cur.fetchall()]\n return gadm", "async def autorole_list(self, ctx):\n roles = await self.bot.db.execute(\n \"SELECT role_id FROM autorole WHERE guild_id = %s\",\n ctx.guild.id,\n as_list=True,\n )\n content = discord.Embed(\n title=f\":scroll: Autoroles in {ctx.guild.name}\", color=int(\"ffd983\", 16)\n )\n rows = []\n for role_id in roles:\n rows.append(f\"<@&{role_id}> [`{role_id}`]\")\n\n if not rows:\n rows = [\"No roles have been set up yet!\"]\n\n await util.send_as_pages(ctx, content, rows)", "def get(self):\n DA = DataAccessor()\n students = DA.getStudents()\n admins = DA.getAdmins()\n self.generate('manageUsers.html', {\n 'admins' : admins,\n 'students' : students\n })", "def admin_ids(self):\n # type: () -> List[int]\n return self._admin_ids", "def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )", "def administrators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"administrators\")", "def get_list_of_users(request):\n admin_user_list = AdminUser.objects.order_by('pk')\n paginator = Paginator(admin_user_list, 1) # Show 3 admin per page\n\n page = request.GET.get('page')\n try:\n admin_list = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n admin_list = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n admin_list = paginator.page(paginator.num_pages)\n context = {'admin_list': admin_list, 'page': page}\n return render(request, 'users/list_of_users.html', context)", "def user_admin_list_data():\n video = VideoFactory()\n collection = video.collection\n moira_list = factories.MoiraListFactory()\n collection.admin_lists.set([moira_list])\n return SimpleNamespace(video=video, moira_list=moira_list, collection=collection)", "def get_users_admins_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .all()\n )\n return users", "def get_admin(request):\n\n jsonResp = {}\n jsonResp['admin'] = ''\n if User.objects.filter(profile = 'Admin').exists():\n mode = NameSpace.objects.get(ns_id='Human')\n name = User.objects.get(profile = 'Admin',ns_id=mode)\n admin = name.username\n jsonResp['admin'] = admin\n\n return JsonResponse(jsonResp)", "def getAllPerms(self,request):\n request.needAuthType(request.ADMIN)\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n all_perms_dic=perm_loader.getLoader().getAllPerms()\n if request.has_key(\"category\"):\n category=request[\"category\"]\n else:\n category=\"all\"\n all_perms_list=self.__getPermsListFromPerms(all_perms_dic,category)\n sorted=SortedList(all_perms_list)\n sorted.sortByPostText('[\"name\"]',0)\n return sorted.getList()", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def user_list():\n if session['user_admin'] == False:\n abort(403)\n\n # Retrieve all Users\n sqa_sess = sqa_session()\n users = sqa_sess.query(User).all()\n\n return render_template('admin/user_list.html', users=users)", "def administrators(self):\n store = self['__store']\n admin_group = store.get(self.get('admin_group_id', None))\n if admin_group:\n return admin_group.name\n return 'nothing'", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get_all_users(db):\n return list(db['user'].find())", "def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset", "def get_all_volunteers(self):\n volunteers = []\n for user in User.objects.all():\n if not OcAuth(user.id).is_admin():\n volunteers.append(user)\n return volunteers", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def admin_can_view_all_user_accounts(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('love', str(reply['users'][1]['username']))\n self.assertIn('walker', str(reply['users'][2]['username']))\n self.assertEqual(resp.status_code, 200)", "def get_users():\n return db.fetch_users()", "def get_admin_ids(bot, chat_id):\r\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]", "def show_entries_admin():\n books = map(encrypt_book_record, Entries.query.order_by(Entries.title).all())\n if 'adminname' in session and session['adminname'] is not None :\n user = User.query.filter_by(username=session['adminname']).first()\n return render_template('show_entries_admin.html', books=books, user=user)\n else :\n return redirect(url_for('login'))", "async def get(\n self, administrator: Optional[bool] = None, term: Optional[str] = None\n ) -> r200[ListAdministratorResponse]:\n\n url_query = self.request.query\n\n try:\n page = int(url_query[\"page\"])\n except (KeyError, ValueError):\n page = 1\n\n try:\n per_page = int(url_query[\"per_page\"])\n except (KeyError, ValueError):\n per_page = 25\n\n return json_response(\n await get_data_from_req(self.request).administrators.find(\n page,\n per_page,\n administrator,\n term,\n )\n )", "def list_admins_by_role(self, uid, role):\n admin_data = self.list_admin_roles(uid)\n admins = []\n for admin in admin_data:\n if admin['role'] == role:\n admins.append(admin)\n\n return admins", "def __reloadUsers(self, admin_id):\n user_main.getUserPool().reloadUsersWithFilter(lambda loaded_user:loaded_user.getBasicUser().getOwnerObj().getAdminID()==admin_id)", "def get_all_groups():\n return jsonify(admin.get_all_groups(current_app.scoped_session()))", "def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]", "def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]", "def get_admin_ids(bot, chat_id):\n return [admin.user.id for admin in bot.get_chat_administrators(chat_id)]", "def list_admin_roles(self, uid):\n uid = self._check_uid(uid)\n role_data = self._router_request(\n self._make_request_data(\n 'getAdminRoles',\n data=dict(\n uid=uid\n )\n )\n )\n\n for r in role_data['data']:\n r['uid'] = r['uid'].replace('/zport/dmd/', '', 1)\n\n return role_data['data']", "def show_admins(var, wrapper, message):\n cli, nick, chan, rest = wrapper.client, wrapper.source.name, wrapper.target.name, message # FIXME: @cmd\n\n admins = []\n pl = list_players()\n\n if (wrapper.public and var.LAST_ADMINS and var.LAST_ADMINS +\n timedelta(seconds=var.ADMINS_RATE_LIMIT) > datetime.now()):\n cli.notice(nick, messages[\"command_ratelimited\"].format())\n return\n\n if wrapper.public or (var.PHASE in var.GAME_PHASES or nick in pl):\n var.LAST_ADMINS = datetime.now()\n\n if var.ADMIN_PINGING:\n return\n\n var.ADMIN_PINGING = True\n\n def admin_whoreply(event, var, chan, user):\n if not var.ADMIN_PINGING or chan is not channels.Main:\n return\n\n if is_admin(user.nick): # FIXME: Using the old interface for now; user.is_admin() is better\n if user is not users.Bot and not event.params.away:\n admins.append(user.nick) # FIXME\n\n def admin_endwho(event, var, target):\n if not var.ADMIN_PINGING or target is not channels.Main:\n return\n\n admins.sort(key=str.lower)\n\n msg = messages[\"available_admins\"] + \", \".join(admins)\n\n reply(cli, nick, chan, msg)\n\n var.ADMIN_PINGING = False\n\n who_result.remove(\"who_result\")\n who_end.remove(\"who_end\")\n\n who_result = EventListener(admin_whoreply)\n who_result.install(\"who_result\")\n who_end = EventListener(admin_endwho)\n who_end.install(\"who_end\")\n\n channels.Main.who()", "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def get_admins_by_role(self, uid, role):\n admin_data = self.list_admin_roles(uid)\n admins = []\n for admin in admin_data:\n if admin['role'] == role:\n admins.append(\n ZenossDeviceManagementAdmin(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n admin\n )\n )\n\n return admins", "def get_managers_list(self):\n try:\n role_id = [x[0] for x in self.db_handler.get_roles_list() if x[1] == 'Менеджер'][0]\n staff_by_role = self.db_handler.get_all_staff_by_role_id(role_id)\n\n self.logger.write_to_log('managers list got', 'model')\n\n return staff_by_role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_info_admin(self):\n return self.get_info(\"HS_ADMIN\")", "def update_admin_ids():\n admin_emails_config = Registry.get_config_property(\n 'admin_emails')\n if not admin_emails_config:\n return []\n\n admin_ids = []\n for email in admin_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n admin_ids.append(user_id)\n else:\n raise Exception('Bad admin email: %s' % email)\n return admin_ids", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_users(self):\n return self.get_all_dbusers()", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id':u.id, 'admin':u.admin})\n return { 'users' : usersJSON }", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def admin(lti=lti):\n user = db.session.query(User).filter_by(lti_user_id=lti.name).first()\n if user.id==86:\n game = app.extensions['redis'].get('game').decode('utf-8')\n params = app.extensions['redis'].get('params').decode('utf-8')\n return render_template(\"admin.html\", GameClasses=GameClasses, game=game, params=params)\n else:\n raise RequestDenied", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id': u.id, 'admin': u.admin})\n return {'users': usersJSON}", "def all_users():\n\n users = crud.get_users()\n\n return render_template('all_users.html', users=users)", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def users(self):\n return self.get_data(\"users\")", "def admin(request):\n\n admin = get_admin(request)\n\n # For now, admin panels always appear in ascending order\n\n model_admin_root = admin[\"models\"]\n\n # TODO: Have renderer adapters for panels, so that they can override views\n admin_panels = sorted(model_admin_root.items(), key=lambda pair: pair[1].title)\n rendered_panels = [render_panel(ma, request, name=\"admin_panel\") for id, ma in admin_panels]\n\n return dict(panels=rendered_panels)", "def getResponsibleUsers():", "def get_mentor_list():\n # Get db object and users table\n db = get_db()\n users = db.users\n \n # Search database for mentors\n cursor = users.find({\"role\": \"Mentor\"})\n \n context = {'mentors': []}\n \n for document in cursor:\n temp = document\n del temp['_id']\n context['mentors'].append(temp)\n \n context['url'] = \"/api/v1/mentors/\"\n return flask.jsonify(**context)", "def getAdminContent(self, **params):\n return getAdminContent(self, **params)" ]
[ "0.77166426", "0.76271695", "0.76095897", "0.7580871", "0.75705355", "0.7568923", "0.74572515", "0.7428086", "0.7204757", "0.7203195", "0.7175435", "0.70761865", "0.70348865", "0.70129657", "0.69840354", "0.6977274", "0.69340014", "0.6931243", "0.6885563", "0.6853746", "0.6849007", "0.68253785", "0.67971444", "0.6790082", "0.66530603", "0.6632572", "0.66026497", "0.65907925", "0.6582403", "0.65572435", "0.65320075", "0.6525977", "0.6455852", "0.64496624", "0.63758594", "0.6369238", "0.63516295", "0.6339092", "0.6322953", "0.6312598", "0.6305697", "0.6294123", "0.62181175", "0.62109154", "0.6145108", "0.61322826", "0.6129529", "0.60931855", "0.60654545", "0.60417897", "0.6028936", "0.6024431", "0.599379", "0.59912586", "0.59880066", "0.59832674", "0.59832674", "0.59832674", "0.59832674", "0.59824777", "0.59777033", "0.59701955", "0.5965554", "0.59635675", "0.5962652", "0.59516054", "0.5944245", "0.5925319", "0.5917687", "0.5903646", "0.589661", "0.58935994", "0.5877354", "0.5877354", "0.5877354", "0.5871416", "0.5861752", "0.5849091", "0.5844476", "0.5840722", "0.58341384", "0.5809869", "0.5801252", "0.5797964", "0.57956845", "0.5793882", "0.57789844", "0.5776083", "0.5774574", "0.5759463", "0.574156", "0.5737644", "0.57278925", "0.5727376", "0.5721612", "0.5721112", "0.5717963", "0.57149994", "0.5701254", "0.57005167" ]
0.81468296
0
gets a single employee out the database on an id
def get_employee(self, id): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,)) row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None", "def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_by_name(name: str):\n logger.debug('Retrieving employee by name %s.', name)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.name == name\n ).scalar()\n except Exception as exception:\n logger.error('An error occurred while retrieving employee by name %s.'\n ' Exception: %s', name, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by name %s.', name)\n return employee", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def find_by_id(id: int):\n exercise = Exercise.try_find_by_id(id)\n if not exercise:\n raise NotFound(EXERCISE_NOT_FOUND_MSG)\n return exercise", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def get_entry_by_id(model, id):\n print(model, id)\n return db.session.query(model).filter_by(id=id).first()", "def is_manager(employee_id: int) -> Optional[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Department.name\n FROM Employee JOIN Department ON(Employee.empid = Department.manager)\n WHERE Employee.empid = %s\"\"\"\n cur.execute(sql, (employee_id,))\n\n # Attempt to fetch first row\n result = cur.fetchone()\n\n # If nothing is fetched\n if result == None:\n cur.close()\n conn.close()\n return result\n\n\n cur.close()\n conn.close()\n return result[0]\n except Exception as e:\n # If something went really wrong\n print(\"bbb\")\n print(e)\n cur.close()\n conn.close()\n return None", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def get_employee_by_uuid(uuid):\n employee = db.session.query(Employee).filter_by(uuid=uuid).first()\n if employee is None:\n raise ValueError('Invalid employee uuid')\n return employee", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def getEquipmentByEquipmentId(equipment_id):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"SELECT * FROM equipment WHERE equipment_id =%s\"\r\n try:\r\n cursor.execute(userEquipmentInsertQuery, (equipment_id,))\r\n equipment = cursor.fetchall()\r\n return equipment\r\n except Exception:\r\n print('Error: OOPs something went wrong while getting the equipment by equipment id!')\r\n finally:\r\n cursor.close()\r\n db.close()", "def get(self, id):\n return Entry.query.filter(Entry.id == id).one()", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def get(id):\n return User.query.filter_by(id=id).first()", "def find_by_id(id):\n query = \"SELECT * FROM parcels WHERE id=%s\"\n return db.find_one(query, (id,))", "def get(self, id):\n return self.__model__.query.get(id)", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def get_by_id(cls, id):\n return cls.query().get(id)", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def find_by_id(db, model, id, *, worker_task=False):\n item = db.query(model).get(id)\n if not item:\n if worker_task:\n raise RuntimeError(\"could not find\") # TODO pick better exception\n raise HTTPError(falcon.HTTP_404, errors={\"id\": \"does not exist\"})\n return item", "def get_details(office_id):\n\n office = OfficeModel()\n office_exists = office.get_one(office_id)\n print(office)\n if office_exists is not None:\n return make_response(jsonify(\n {'status': 200, 'data': office.sub_set()}\n ), 200)\n\n return make_response(jsonify(\n {'status': 404,\n \"error\": 'Office with id {} not found'.format(office_id)}\n ), 404)", "def __get_one_by_id(\n self, table_name: str, id_name: str, db_id: str\n ) -> Mapping[str, Any]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * FROM {table_name}\n WHERE ({id_name} = ?)\n \"\"\",\n (int(db_id),),\n )\n results = c.fetchall()\n if len(results) != 1:\n raise EntryDoesNotExistException(\n f\"Table {table_name} has no {id_name} {db_id}\"\n )\n return results[0]", "def get_person(self, id):\n PERSON = \"\"\"SELECT name FROM Person\n WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Person WHERE id = %s\"\"\", (id,))\n self.db_cursor.execute(PERSON, (id,))\n self.db_connection.commit()\n p_attribs = self.db_cursor.fetchall()\n ret = Person()\n ret.name = p_attribs[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve person: \" + str(id))\n return None\n\n return ret", "def find_employee_id(self,name):\n nam = list(self.emp_id.values())\n val = nam.index(name)\n ids = list(self.emp_id.keys())\n id = ids[val]\n return id", "def get_by_id(cls, id):\n try:\n return cls.objects.get(id=id)\n except(IntegrityError, OperationalError):\n return None", "def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()", "def get(self, _id):", "def fetch_by_id(self, person_id: int) -> PersonModel:\n person_db_model = PersonDBModel.query.get(person_id)\n if not person_db_model:\n raise PersonNotFound(person_id)\n person = PersonModel.from_db(person_db_model)\n self.logger.info(f'Successfully fetched Person {person.first_name} {person.last_name} by ID {person_id}')\n return person", "def employee_works_in(employee_id: int) -> List[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT department\n FROM EmployeeDepartments\n WHERE EmployeeDepartments.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch all\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n departments = []\n for row in result:\n departments.append(\n row[0]\n )\n\n cur.close()\n conn.close()\n return departments\n except Exception as e:\n print(\"ddd\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def fetch_obj(type, id, error=404, new_id=False):\n if id is None:\n abort(error)\n obj_q = Session.query(type)\n obj = obj_q.get(int(id))\n #else:\n # obj = obj_q.filter(type.ID==int(id)).first()\n\n if obj is None:\n abort(error)\n return obj", "async def find_by_id(self, _id: int) -> Record:\n conn: Connection\n async with self.db_pool.acquire() as conn:\n return await conn.fetchrow(\n f\"SELECT * FROM {self.table_name} WHERE {self.primary_key}=$1\",\n _id,\n )", "def get_object(id):", "def _get(self, id_: str) -> Union[DBModelInstance, NoReturn]:\n record = self.model.query.get(id_)\n if record:\n return record\n else:\n # raise error to correct handling wrong inputted params\n raise ServiceBadRequest()", "def get_one_by_id(self, object, id):\n self.lock.acquire()\n result = self.__Session.query(object).get(id)\n self.lock.release()\n return result", "def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user", "def get_by_id(self, id):\n return self.session.query(User).filter_by(id=id).first()", "def retrieve(self, request, pk=None):\n try:\n team_employee = self.get_team_employee_object(pk)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(team_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)", "def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()", "def get_exercise(name):\n # Get db object and exercises table\n db = get_db()\n exercises = db.exercises\n \n # Search database for exercises with matching name\n cursor = exercises.find({\"name\": str(name)})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='exercise with specified name not found')\n \n context = {}\n for document in cursor:\n temp = document\n temp['exercise_id'] = str(document['_id'])\n del temp['_id']\n context = temp\n \n context['url'] = \"/api/v1/exercises/\" + name + \"/\"\n return flask.jsonify(**context)", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get_by_id(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_BY_ID.format(expense_id))", "def by_id(cls, id):\n try:\n return DBSession.query(cls).filter(cls.id == id).one()\n except (NoResultFound, MultipleResultsFound):\n return None", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_by_id(self, pkId: int):\n if not self.model:\n raise NameError('database model has not been set.')\n if not pkId:\n raise ValueError('invalid primary key value.')\n\n with self.session() as session:\n query = self.get_query(session)\n rec = query.get(pkId)\n return rec", "def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def get_hikedetails_by_id(hike_id):\n\n return Hike.query.get(hike_id)", "def employee(self) -> object:\n return self._employee", "def set_employees_by_id(department_id):\n return Employee.query.filter_by(department_id=department_id)", "def get_by_id(oai_data_id):\n try:\n return OaiData.objects.get(pk=str(oai_data_id))\n except ObjectDoesNotExist as exception:\n raise exceptions.DoesNotExist(str(exception))\n except Exception as ex:\n raise exceptions.ModelError(str(ex))", "def get(cls, pk):\n return DBSession().query(cls).get(pk)", "def get(self, id):\n adm = Administration()\n pers = adm.get_person_by_id(id)\n return pers", "def select_single(self, user_id, id=None, name=None):\n self.cur.execute(' \\\n SELECT `id`, \\\n `user_id`, \\\n `name`, \\\n `description`, \\\n `house_id`, \\\n (SELECT `name` FROM `Location` AS `B` WHERE (`B`.`id` = `A`.`house_id`)) AS `house` \\\n FROM `Location` AS `A` \\\n WHERE `user_id` = ? \\\n AND ((`id` = ?) OR (? IS NULL)) \\\n AND ((`name` = ?) OR (? IS NULL))',\n (user_id, id, id, name, name))\n result = self.cur.fetchone()\n return result", "def find_by_id(cls, _id):\n user = cls.query.filter_by(id=_id).first()\n return user", "def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()", "async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))", "def getByID(session, id):\n return session.query(User).filter(User.id == id).first()", "def get(self, identifier):\n fields = \",\".join(self.model.get_fields_name())\n query = \"select {0} from {1} where {2}=?\".format(\n fields,\n self.ressource_config[\"table\"],\n self.model.pk_field.name)\n cursor = self.get_connector().cursor()\n cursor.execute(query, (identifier,))\n obj = cursor.fetchone()\n\n if obj:\n fields = self.model.get_fields_name()\n return dict(zip(fields, obj))\n else:\n raise NotFound", "def get(self, query_data=None, id_obj=None):\n if id_obj:\n return self.collection.find_one({'_id': id_obj})\n return self.collection.find_one(query_data)", "def get_one(self, index, *args, **kw):\n person = M.People.query.get(index=index)\n log.debug('person {}'.format(person))\n if(person):\n kw['_id'] = person._id\n return super(PeopleAPIController, self).get_one(*args, **kw)", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def get_user(id):\n url = 'https://jsonplaceholder.typicode.com/'\n user = requests.get(url + 'users', params={'id': id}).json()\n return user", "def by_id(cls, _id):\n return dbsession.query(cls).filter_by(id=_id).first()", "def by_id(cls, _id):\n return dbsession.query(cls).filter_by(id=_id).first()", "def by_id(cls, _id):\n return dbsession.query(cls).filter_by(id=_id).first()", "def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def retrieve(self, request, pk=None):\n\n try:\n expense = Expenses.objects.get(pk=pk)\n serializer = ExpenseSerializer(\n expense, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def get_by_eid(cls, eid):\r\n results = execute_query('g.e(eid)', {'eid':eid})\r\n if not results:\r\n raise cls.DoesNotExist\r\n return Element.deserialize(results[0])", "def getEntityByID (self, ID, fail=False):\n r = self.find(Or(Equal('DPID', ID),Equal(F('ID'), ID)))\n if len(r) == 0:\n if fail:\n raise RuntimeError(\"No entity with ID \" + str(ID))\n else:\n return None\n assert len(r) == 1\n return r[0]" ]
[ "0.8462909", "0.8436909", "0.8286135", "0.8277989", "0.795276", "0.7786417", "0.75786275", "0.7489491", "0.7484681", "0.73151207", "0.7233033", "0.6973724", "0.6964095", "0.6891466", "0.68717116", "0.6806852", "0.66826135", "0.6661457", "0.6648924", "0.6645056", "0.66137886", "0.6545444", "0.6531044", "0.6497339", "0.64534837", "0.64507073", "0.6445847", "0.63906133", "0.635961", "0.63405216", "0.6334209", "0.6313872", "0.628012", "0.6250437", "0.62362814", "0.62362814", "0.62348473", "0.62184465", "0.6214663", "0.62082785", "0.619414", "0.61793065", "0.6171613", "0.6155878", "0.61532867", "0.6094363", "0.6091962", "0.60900545", "0.6072143", "0.60562927", "0.60476786", "0.60374784", "0.6035425", "0.6028484", "0.60119444", "0.6008506", "0.6002725", "0.59647304", "0.5957524", "0.59497255", "0.5941679", "0.5935031", "0.5932459", "0.5924172", "0.5913995", "0.58937013", "0.5889714", "0.5886167", "0.5880429", "0.58759594", "0.58646446", "0.58580476", "0.5857806", "0.5847375", "0.5840064", "0.5839764", "0.5830226", "0.5829853", "0.58294207", "0.5822635", "0.58219", "0.58165395", "0.57990533", "0.57988536", "0.57978576", "0.5788366", "0.5779508", "0.5767256", "0.57645553", "0.5747483", "0.5745626", "0.5745626", "0.5745626", "0.57384104", "0.5734218", "0.57291734", "0.5721869", "0.5716902", "0.57104605", "0.5703787" ]
0.8603124
0
gets a single employee out the database on a name
def get_employeeOnName(self, name): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,)) if (cursor.rowcount != 0): row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_by_name(name: str):\n logger.debug('Retrieving employee by name %s.', name)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.name == name\n ).scalar()\n except Exception as exception:\n logger.error('An error occurred while retrieving employee by name %s.'\n ' Exception: %s', name, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by name %s.', name)\n return employee", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def is_manager(employee_id: int) -> Optional[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Department.name\n FROM Employee JOIN Department ON(Employee.empid = Department.manager)\n WHERE Employee.empid = %s\"\"\"\n cur.execute(sql, (employee_id,))\n\n # Attempt to fetch first row\n result = cur.fetchone()\n\n # If nothing is fetched\n if result == None:\n cur.close()\n conn.close()\n return result\n\n\n cur.close()\n conn.close()\n return result[0]\n except Exception as e:\n # If something went really wrong\n print(\"bbb\")\n print(e)\n cur.close()\n conn.close()\n return None", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get(self, name, user):\n connection = self.connect()\n cursor = connection.cursor()\n cursor.execute(self.sql[\"get\"], {\"name\": name, \"user\": user})\n result = cursor.fetchone()\n if result is not None:\n return result[0].split()\n else:\n raise DoesNotExistException(\n \"Could not find an applicable saved roll with that name.\"\n )", "def get_exercise(name):\n # Get db object and exercises table\n db = get_db()\n exercises = db.exercises\n \n # Search database for exercises with matching name\n cursor = exercises.find({\"name\": str(name)})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='exercise with specified name not found')\n \n context = {}\n for document in cursor:\n temp = document\n temp['exercise_id'] = str(document['_id'])\n del temp['_id']\n context = temp\n \n context['url'] = \"/api/v1/exercises/\" + name + \"/\"\n return flask.jsonify(**context)", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def query_by_name(name):\n\tstudent = session.query(Student).filter_by(\n\t\tname=name).first()\n\treturn student", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def getByName( self, people_name ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `name` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_name ) )\n person = Mysql.ex( qry )\n if len( person ) == 0:\n return False\n return person[0]", "def find_employee_id(self,name):\n nam = list(self.emp_id.values())\n val = nam.index(name)\n ids = list(self.emp_id.keys())\n id = ids[val]\n return id", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def __get_one_by_id(\n self, table_name: str, id_name: str, db_id: str\n ) -> Mapping[str, Any]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * FROM {table_name}\n WHERE ({id_name} = ?)\n \"\"\",\n (int(db_id),),\n )\n results = c.fetchall()\n if len(results) != 1:\n raise EntryDoesNotExistException(\n f\"Table {table_name} has no {id_name} {db_id}\"\n )\n return results[0]", "def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def _byname(self, name):\n query = \"\"\"SELECT * \n FROM ppmxl \n WHERE id = '%s';\"\"\" % name\n result = self.corot.query(query)\n return result", "def get_person(self, id):\n PERSON = \"\"\"SELECT name FROM Person\n WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Person WHERE id = %s\"\"\", (id,))\n self.db_cursor.execute(PERSON, (id,))\n self.db_connection.commit()\n p_attribs = self.db_cursor.fetchall()\n ret = Person()\n ret.name = p_attribs[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve person: \" + str(id))\n return None\n\n return ret", "def get(self, name_or_id):\n \n r = self.library.database.get_name(name_or_id)\n\n if not r[0]:\n r = self.library.database.get_id(name_or_id)\n \n return r", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def get(self, table, entry_name):\n data = Select('*', table, name=entry_name).one()\n if data is None:\n return {\"Error\": f\"{entry_name} not found in {table}. \"\n f\"Try this instead: {NS.search._path}/{table}/{entry_name}\"\n }, 404\n elif \"Error\" in str(data):\n return data, 500\n return data", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_elo_from_db(player: str):\n with open('db.json') as fo:\n data = loads(fo.read())\n\n return data[player]", "def get_entry(self, name):\n return self.__datacatalog.get_entry(name=name)", "def employee_works_in(employee_id: int) -> List[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT department\n FROM EmployeeDepartments\n WHERE EmployeeDepartments.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch all\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n departments = []\n for row in result:\n departments.append(\n row[0]\n )\n\n cur.close()\n conn.close()\n return departments\n except Exception as e:\n print(\"ddd\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def _eidlookup(eid, splitname=False):\n\n with sqlite3.connect(DB) as conn:\n cursor = conn.cursor()\n if splitname:\n cursor.execute(\"SELECT lastname, firstname FROM players WHERE eid=?\", (eid,))\n else: # grab fullname.\n cursor.execute(\"SELECT fullname FROM players WHERE eid=?\", (eid,))\n row = cursor.fetchone()\n if row:\n if splitname:\n return (\"{0}, {1}\".format(str(row[0]), str(row[1])))\n else:\n return (str(row[0]))\n else:\n return None", "def fetch_user_id(name):\n queryset = run_query(f\"SELECT id from users where name='{name}'\")\n output = queryset.fetchone()\n return output[0] if output else None", "def career():\r\n cursor.execute('SELECT name FROM careers order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def get_record(self, collection_name, filter):\n\n try:\n self.logger.info('in get_record()')\n collection = self.get_db()[collection_name]\n record = collection.find_one(filter)\n self.logger.info('in get_record()')\n return record\n except Exception as e:\n self.logger.error(f'Error occurred while getting records {e}')", "def get_by_name(*, db_session, incident_name: str) -> Optional[Incident]:\n return db_session.query(Incident).filter(Incident.name == incident_name).first()", "def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)", "def getPlayerIDFromName(name):\n\n # Connect to the database.\n conn, c = main.connect()\n\n # Select the player that matches the name.\n SQL = \"SELECT playerID FROM player WHERE playerName=%s\"\n data = (name, )\n c.execute(SQL, data)\n\n toReturn = c.fetchone()\n\n conn.commit()\n conn.close()\n\n # Only return the first result\n return toReturn[0]", "def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def employers_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def get_employee_by_uuid(uuid):\n employee = db.session.query(Employee).filter_by(uuid=uuid).first()\n if employee is None:\n raise ValueError('Invalid employee uuid')\n return employee", "def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)", "def get_by_name(self, name):\n ksat = Ksat.query.filter_by(name=name).first()\n\n return ksat", "def db_name():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/name', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def employee(self) -> object:\n return self._employee", "def get_template_by_name(name, event_id):\n sql = \"\"\"\n select r.*, e.name event_name\n from runsheet_template r\n inner join event e on e.id=r.event_id\n where r.name=:name and e.id=:event_id\n \"\"\"\n data = {'name': name, 'event_id': event_id}\n\n rows = db.session.execute(sql, data)\n if rows.rowcount == 0:\n return\n else:\n return rows.fetchone()", "def get_supplier_address_by_name(supplier_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from supplier where name = '{}';\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def by_name(cls, name):\n return cls.all().filter('name =', name).get()", "def query_business_name():\n print()\n business_name = input(\n 'Please enter full business name or type \"back\" or \"quit\": ')\n print()\n if business_name == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if business_name == \"back\":\n return \"back\"\n\n business_object = business_col.find_one({\"name\": business_name})\n if business_object is None:\n print(\"No business found with given name.\")\n\n return business_object", "def query_employee_skill(self):\n\n query = \"select Skill_Descrpt, Emp_Fname, Emp_Lname from \" \\\n \"skill, employee, empskill \" \\\n \"where employee.Emp_ID = empskill.Emp_ID \" \\\n \"and skill.Skill_ID = empskill.Skill_ID \"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def by_name(cls, name):\n u = cls.all().filter('name =', name).get()\n return u", "def findall(self, name):\n if name in self._dict:\n return self._dict[name]\n else:\n raise PoseError(\"%s is not in database\" % name)", "def getEmployeeRecord(self, employees, records, name, reader):\n # pull the first and last name of the employee\n last, first = name.split(',')\n # clean up, if necessary\n last = last.strip()\n first = first.split()[0].strip()\n # get the next line with the social security number\n text = next(reader)[0]\n # check that it is the right line\n assert text.startswith('SSN: ')\n # extract the obfuscated number\n ssn = text[5:].strip().replace('x', '?')\n\n # go through the employee index looking for a name match\n for eid, fullname in employees.items():\n # if this is the match\n if fullname == (last, first):\n # bail\n break\n # if we get this far, there is no match\n else:\n # complain\n # print('could not match {} {}'.format(first, last))\n # make one up\n eid = ((last, first), ssn)\n\n # attempt to\n try:\n # look up the employee\n employee = records[eid]\n # if that fails\n except KeyError:\n # build the employee record\n employee = Employee(first=first, last=last, ssn=ssn)\n # and attach it\n records[eid] = employee\n\n # grab the next line\n line = next(reader)\n # start parsing paycheck info\n while line:\n # have we reached the summary section?\n if line[0].startswith('Employee Totals:'):\n # swallow this section\n for line in reader:\n # bail if the zeroth field isn't empty; it's the end of the section\n if line[0]: return line\n # ran out of input\n break\n # otherwise, this is a paycheck section; extract\n line = self.getEmployeePaycheck(employee=employee, header=line, reader=reader)\n\n # if we get this far, the input was exhausted and we are all done\n return", "def get_player(playerName):\n return players_col.find_one({\"name\": playerName})", "def get_author_by_name(self, name):\n\n cur = self.conn.cursor()\n query = 'SELECT author_id , name FROM author WHERE name = ? '\n cur.execute(query, (name,))\n return row_to_dict_or_false(cur)", "def getEquipmentByEquipmentId(equipment_id):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n userEquipmentInsertQuery = \"SELECT * FROM equipment WHERE equipment_id =%s\"\r\n try:\r\n cursor.execute(userEquipmentInsertQuery, (equipment_id,))\r\n equipment = cursor.fetchall()\r\n return equipment\r\n except Exception:\r\n print('Error: OOPs something went wrong while getting the equipment by equipment id!')\r\n finally:\r\n cursor.close()\r\n db.close()", "def findUser(username):\n connector = appEngine.connect()\n userId = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", username).fetchone()\n #selectInput = select([user]).where(user.column.userName == username)\n #db.execute(selectInput)\n return userId", "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def get(self, name):\n\n if not name in self.store.keys():\n raise NotFoundInDataStore()\n\n return self.store[name][1]", "def select_single(self, user_id, id=None, name=None):\n self.cur.execute(' \\\n SELECT `id`, \\\n `user_id`, \\\n `name`, \\\n `description`, \\\n `house_id`, \\\n (SELECT `name` FROM `Location` AS `B` WHERE (`B`.`id` = `A`.`house_id`)) AS `house` \\\n FROM `Location` AS `A` \\\n WHERE `user_id` = ? \\\n AND ((`id` = ?) OR (? IS NULL)) \\\n AND ((`name` = ?) OR (? IS NULL))',\n (user_id, id, id, name, name))\n result = self.cur.fetchone()\n return result", "def get_by_name(name):\n\n result = {}\n status = 404\n print id\n # nodes=Property.query.all()\n obj = Property.query.filter_by(name=name).filter(Property.users.contains(current_user)).first()\n if obj:\n result['prop'] = obj\n status = 200\n\n return result, status", "def getAllWhereNameIs2(table, name, orgName):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%' and organisationId like (SELECT organisationId FROM Organisation WHERE name like '\" + orgName + \"' )\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs2 from DbController')", "def read(id):\n db = core.connect()\n return db[id]", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def get_by_name(name):\n return database.get_all(Domain, name, field=\"name\").all()", "def select_instructor(self, email):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\"SELECT * FROM instructors WHERE email=?\", (email,))\n return cursor.fetchone()", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_user(name, password):\n collection = get_collection(\"user\")\n user_info = collection.find_one({\"name\": name, \"password\": get_password(name, password)})\n return user_info", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def find_by_name(name):\n return repository.find_by_name(name)", "def get_place(name):\n return session.query(Place).filter_by(name=name).first()", "def fetch_one(q, *params):\n db = Database()\n db.cur.execute(q, params)\n ret = db.cur.fetchone()\n db.con.close()\n return ret", "def get(self, identifier):\n fields = \",\".join(self.model.get_fields_name())\n query = \"select {0} from {1} where {2}=?\".format(\n fields,\n self.ressource_config[\"table\"],\n self.model.pk_field.name)\n cursor = self.get_connector().cursor()\n cursor.execute(query, (identifier,))\n obj = cursor.fetchone()\n\n if obj:\n fields = self.model.get_fields_name()\n return dict(zip(fields, obj))\n else:\n raise NotFound", "def read_db_one(id, tablename = None):\n\n # Set the default tablename\n if tablename is None:\n tablename = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"USE %s\"%(config['db']))\n cur.execute(\"SELECT * FROM %s WHERE id = %d;\"%(tablename,id))\n conn.commit()\n result = cur.fetchone()\n if len(result) == 0:\n result = None\n\n except Exception as e:\n print(\"read_data_list failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def company_name():\r\n\r\n cursor.execute('SELECT name from companies \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def _eidnamefetch(eid):\n\n try:\n url = 'http://api.espn.com/v1/sports/football/nfl/athletes/%s?apikey=dha4fmjhb6q36zffzkech2zn' % str(eid)\n req = urllib2.Request(url)\n req.add_header(\"User-Agent\",\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/17.0 Firefox/17.0\")\n r = urllib2.urlopen(req)\n data = json.loads(r.read())\n data = data['sports'][0]['leagues'][0]['athletes'][0]\n fn = data['fullName']\n return fn\n except Exception, e:\n print \"ERROR: _eidnamefetch :: {0}\".format(e)\n return None", "def get_by_name(self, user_name):\n user = (\n self.session\n .query(tables.User)\n .filter_by(user_name=user_name)\n .first()\n )\n return user", "def get_product(conn, product_id: int) -> str:\n with conn.cursor() as cursor:\n cursor.execute(\"\"\"select name from products\n where id = {0}\"\"\".format(product_id))\n try:\n return cursor.fetchone()[0]\n except TypeError:\n raise errors.StoreError", "def __getitem__(self, dbname):\n return Database(dbname=dbname, connection=self)" ]
[ "0.75500387", "0.7348031", "0.7300037", "0.72360134", "0.7058836", "0.7009213", "0.6654517", "0.66495013", "0.66377455", "0.65564954", "0.6541405", "0.6435797", "0.6376785", "0.6338932", "0.61743134", "0.61031044", "0.60764945", "0.60599095", "0.59950364", "0.5983809", "0.59781575", "0.59689844", "0.5962662", "0.595988", "0.5945464", "0.5945464", "0.5945464", "0.592619", "0.59190995", "0.58895534", "0.58135056", "0.5812104", "0.57835025", "0.57815224", "0.57695645", "0.574096", "0.5716935", "0.5709205", "0.56670266", "0.5666021", "0.56455797", "0.5628562", "0.56272477", "0.5622422", "0.55948955", "0.5573822", "0.5572453", "0.55593467", "0.5543328", "0.55406505", "0.55366915", "0.55338013", "0.5519203", "0.55172104", "0.55000746", "0.5495479", "0.54880387", "0.5487598", "0.54729146", "0.54638714", "0.5458397", "0.5405541", "0.5400826", "0.5397576", "0.5390528", "0.5377836", "0.536198", "0.5350448", "0.5345497", "0.5341344", "0.53383756", "0.5338294", "0.5334212", "0.5334176", "0.533361", "0.53300065", "0.53296345", "0.53237015", "0.53179735", "0.5312327", "0.53017", "0.5298076", "0.5296161", "0.52918214", "0.52843684", "0.5278537", "0.52740365", "0.5265336", "0.5261028", "0.5258384", "0.5256139", "0.52480406", "0.52468735", "0.5238203", "0.52307045", "0.5228003", "0.5227332", "0.5219632", "0.5209712", "0.5193907" ]
0.8025803
0
adds an employee to the database
def add_employee(self, empl): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)', (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern, empl.active, empl.promotor)) cursor.execute('SELECT LASTVAL()') eid = cursor.fetchone()[0] empl.id = eid # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save Employee!\n(%s)' % (error))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def add_employee(self, employee):\n self.employees.add(employee)", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def add_employee():\n\n while True:\n first_name = get_user_string(\"Enter your first name\")\n last_name = get_user_string(\"Enter your last name\")\n grade = get_employee_input_int(\"Enter your grade\")\n db.add_employee(first_name, last_name, grade)\n print(\"New employee \" + first_name + \" \" + last_name + \" has been added to the employee table\")\n user_input = input(\"Do you want to add more employees to the table ? (Y/N)\")\n if(str(user_input).upper()) == 'Y':\n continue\n elif (str(user_input).upper()) == 'N':\n break\n else:\n print(\"Invalid Input\\nReturning to the main menu\")\n break", "def post(self):\n employee = Employee(**self.data)\n _dict = Employee.encode(employee)\n\n _id = DatabaseManager.insert(Collection.EMPLOYEES, _dict)\n employee_dict = DatabaseManager.find_document_by_id(\n Collection.EMPLOYEES, _id, True\n )\n return employee_dict", "def add_person():\n # get values from user\n responses = accept_inputs([\"Name\"])\n # insert into db\n query_no_results(\"insert into person (name) values(?)\", [responses[\"Name\"]])\n print(\"New person created\")", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add_employeeRole(self, id, role):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employeeRoles values(%s,%s)',\n (id, role))\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save EmployeeRole!\\n(%s)' % (error))", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def createEmployee():\n form = CreateEmployeeForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff = Staff(first_name=form.first_name.data, last_name=form.last_name.data, password=hashed_password, \n email=form.email.data, role=form.role.data, location=form.location.data)\n db.session.add(staff)\n db.session.commit()\n flash(f'Employee Added To Database', category='Success')\n return redirect(url_for('login'))\n return render_template('new_employee.html', title=\"Register\", form=form)", "def add_user(self, username, password, name, department):\n db = sqlite3.connect(self.name)\n cur = db.cursor()\n cur.execute('SELECT MAX(ID) FROM users')\n maxid = cur.fetchone()[0]\n usid = maxid + 1 if maxid is not None else 0\n date = time.strftime('%Y.%m.%d')\n cur.execute(\n 'INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)',\n (usid, username, password, \"user\", name, department, 28)\n )\n db.commit()\n db.close()", "def register():\n add_employee = True\n form = RegistrationForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n username=form.username.data,\n glad_id=form.glad_id.data,\n tel_no=form.tel_no.data,\n role_id=2 , ##form.role_id.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def insert_employee(self,\n region_name,\n last_name,\n first_name,\n hire_date,\n mi=None):\n\n if self.check_input_type(region_name, \"Region\"):\n if self.check_input_type(hire_date, \"Date\"):\n region_info = self.query_region(region_name)\n region_id = region_info[0][0]\n\n if mi != \"\":\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Mi, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, mi, first_name, hire_date\n )\n else:\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, first_name, hire_date\n )\n\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Date format not valid!\")\n else:\n ErrorMessageWindow(\"Region input not valid!\")", "def save(self, db):\n db.query(\n \"INSERT INTO staff (name) VALUES(:name)\",\n name=self.name\n )", "def add_to_db(name, email_id):\n conn = None\n try:\n conn = connect_to_db()\n cur = conn.cursor()\n # This is the best way that I found to do an 'upsert' in a database agnostic way.\n # Try to update the data first, and if no records get updated, insert them.\n cur.execute(UPDATE_STMT.format(nm=name, em=email_id))\n if cur.rowcount == 0:\n cur.execute(INSERT_STMT.format(nm=name, em=email_id))\n conn.commit()\n print('Successfully added/updated record!')\n except Exception as e:\n print(str(e))\n disconnect_from_db(conn)\n raise e\n finally:\n disconnect_from_db(conn)", "def add():\n name = request.form['name']\n message = request.form['message']\n\n try:\n newcurs = g.conn.execute(\"\"\"INSERT INTO record\n VALUES (%s, %s );\"\"\", name, message)\n newcurs.close()\n except Exception:\n print \"can not write record to database\"\n return redirect('/error')\n\n return render_template(\"index.html\", **locals())", "def add_person():\n # Find the last used PK\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id FROM person ORDER BY id DESC;\")\n for row in cursor.fetchone():\n last_pk = row\n\n # Auto-increment the primary key for the person table.\n last_pk = last_pk + 1\n\n # Prompt the user for the rest of their information.\n first_name = input(\"Enter your first name: \")\n middle_name = input(\"Enter your middle name: \")\n last_name = input(\"Enter your last name: \")\n suffix_name = input(\"Enter your suffix: \")\n e_mail = input(\"Enter your email: \")\n # Default status of the person is active (1).\n status = 1\n\n # Store the input in a variable.\n person_data = (last_pk, first_name, middle_name, last_name, suffix_name,\n e_mail, status)\n\n # Connect and insert the data into the person table.\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO person VALUES(?, ?, ?, ?, ?, ?, ?);\",\n person_data)\n connection.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def add_payee(self, payee_name):\n # [todo] - add check that payee_name is unique\n\n # open a cursor\n cur = self.get_cursor()\n\n self.reset_auto_increment('payees')\n\n # add payee with given name\n add_payee_statement = \"INSERT INTO payees \" + \\\n \"VALUES ('0', '{0}')\".format(payee_name)\n\n cur.execute(add_payee_statement)\n\n # close cursor\n self.close_cursor()", "def AddCost(Cost):\n\n logs.logger.debug(\"Start to add Cost object to the database.\")\n try:\n session.add(Cost)\n session.commit()\n logs.logger.info(\"Add Cost object to the database.\")\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def addUserEntry(userName):\n connector = appEngine.connect()\n rows = connector.execute('SELECT count(*) FROM user').rowcount\n newUserId = 'u' + str(ceil(time.time()))\n connector.execute('INSERT INTO user(userID,userName) VALUES(?, ?)', (newUserId, userName))", "def remove_employee(self, id):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('DELETE FROM employee WHERE employeeID=%s', (id,))\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to remove Employee!\\n(%s)' % (error))", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def add_record(self, record):\n logging.debug('Adding new entry to table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n if '' in (date, time, node_id, location):\n raise Exception('Invalid SecuritySystemDB record!')\n\n self._cursor.execute(\"insert into {} values(?, ?, ?, ?)\".format(self._name),\n (date, time, location, node_id))", "def db_add_entry(person):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if person.name in db:\n print(\"Updating existing entry ..... {name}\\n\".format(name=person.name))\n else:\n person.new = True\n print(\"Adding new entry ..... {name}\".format(name=person.name))\n db[person.name.capitalize()] = person.phone\n db.sync()\n db.close()\n db_show_all()", "def add_department():\n form = AddDepartment()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_department = Department(name=form.name.data)\n db.session.add(new_department)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash('Department already exists!', 'warning')\n return redirect(url_for('add_department'))\n\n flash(f'Department {form.name.data} created!', 'success')\n return redirect(url_for('home'))\n\n flash('Name not defined.', 'warning')\n return render_template('department/department_add.html', form=form)", "def add_user(self, role, emp_name, username, status, password):\n Log.info(\"Start to add user.\")\n self.click(self.user_add_btn)\n self.wait_unit_el_present(self.add_user_form)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(emp_name, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"New user is added.\")", "def add_to_db(ark_obj):\n session = Session()\n session.add(ark_obj)\n session.commit()\n session.close()", "def test_new_employer_crud_methods(self):\n response = self.client.post(\n '/employers/', self.new_employer_data, format='json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(len(Employer.objects.all()), 2)\n\n # test one employer retrieve\n response = self.client.get('/employers/1/')\n self.assertEqual(response.status_code, 200)\n self.assertIn('Andela', response.data['name'])\n\n # test one employer update\n response = self.client.put('/employers/1/',\n {'name': 'New Employer'})\n self.assertEqual(response.status_code, 200)\n self.assertIn('New Employer', response.data['name'])", "def employee(self, employee: object):\n\n self._employee = employee", "def addJobToDb(self,jobname):\n\t\tsql = \"INSERT INTO hudson_jobs(jobname) VALUES (%s)\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql,[jobname])", "def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_user(user):\n\n try:\n session.add(user)\n session.commit()\n except Exception as e:\n logger.error(e)", "def add_customer(login, password, name, phone, email):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n INSERT INTO Customers\n (login,password,customer_name,phone,email)\n VALUES(?,?,?,?,?)\n \"\"\",\n (login, password, name, phone, email))", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def db_add_and_commit(db_, model):\n from sqlalchemy.exc import OperationalError\n try:\n db_.session.add(model)\n db_.session.commit()\n except OperationalError as e:\n print(str(e))\n exit(1)", "def add_user(name: str, last_name: str, username: str) -> None:\n with connection:\n connection.execute(ADD_USER, (name, last_name, username))", "def AddUser(database):\n name=input(\"Enter the name of the user : \").lower()\n lastname=input(\"Enter the lastname of the user : \").lower()\n\n if f\"{name}_{lastname}\" in database.keys():\n print(\"the user already exists\")\n return\n\n age=int(input(\"Enter the age of the user : \"))\n yearStudy=int(input(\"Enter the year of study of the user : \"))\n fieldStudy=input(\"Enter the field of study of the user : \")\n nbinterest=int(input(\"how many interests does he have? : \"))\n interest=[]\n for i in range(nbinterest):\n interest.append(input(\"Enter the interest of the user : \"))\n city=input(\"Enter the city of the user : \") \n database[f\"{name}_{lastname}\"]=User(name,lastname,age,yearStudy,fieldStudy,city,interest)\n saveDatabase(database,database[f\"{name}_{lastname}\"])", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def anyadir_empleado(self, empleado):\n self.empleados.append(empleado)", "def add_investment():\n\n company_name = request.args.get('company-name')\n date_of_entry = datetime.datetime.today().strftime('%Y-%m-%d')\n \n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n \n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n\n date_of_investment = request.args.get('date')\n\n new_inv = Investment(date_of_entry=date_of_entry, \n date_of_investment=date_of_investment,\n company_name=company_name, \n quantity=quantity, \n cost=cost)\n \n db.session.add(new_inv)\n db.session.commit()\n\n user_id = session['user']\n new_inv_id = new_inv.inv_id\n\n\n new_userinv = UserInv(inv_id=new_inv_id,\n user_id=user_id)\n db.session.add(new_userinv)\n db.session.commit()\n\n return jsonify('investment added!')", "def create_person(conn, person, first_name, last_name):\n sql = ''' INSERT INTO person(firstname,lastname)\n VALUES(?,?) '''\n cur = conn.cursor() # cursor object\n cur.execute(sql, person)\n # print(str(cur.lastrowid))\n # return cur.lastrowid # returns the row id of the cursor object, the person id\n first_name.set('')\n last_name.set('')\n messagebox.showinfo('Success', 'Person Successfully Added to Database!')", "def add_tag_to_db():\n new_tag = Tag(name=request.form['name'])\n\n db.session.add(new_tag)\n db.session.commit()\n\n flash(f\"Tag '{new_tag.name}' was successfully added\")\n\n return redirect('/tags')", "def add_entry(name, quantity, price):\n\n exists = False\n today = datetime.datetime.today()\n date = clean_date(today.strftime('%m/%d/%Y'))\n\n for product in session.query(Product):\n if product.product_name == name:\n exists = True\n \n if exists == True:\n product = session.query(Product).filter(Product.product_name==name).first()\n product.product_quantity = quantity\n product.product_price = price\n product.date_updated = date\n session.add(product)\n else:\n product = Product(\n product_name = name,\n product_quantity = quantity,\n product_price = price,\n date_updated = date\n )\n session.add(product)\n \n session.commit()", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def add_record(self, table_name, **kwargs):\n\n if not self.open:\n print(\"Not currently connected to a DB.\")\n return False\n\n\n fields = \", \".join([str(f) for f in kwargs.keys()])\n values = \", \".join([str(v) for v in kwargs.values()])\n q = \"INSERT INTO {tn}({columns}) VALUES ({values})\"\n self.query = q.format(tn=table_name,\n columns=fields,\n values=values)\n\n # try:\n self.cursor.execute(self.query)\n print(\"{}\\n inserted into {} table.\".format(values, table_name))\n return True\n # except Exception as error:\n # print(\"Failed to add {} to {} table.\".format(values, table_name))\n # print(\"SQL Query: \\n{}\\n\".format(self.query))\n # print(\"Exception: \\n{}\".format(error))\n\n # return False", "def change_employee(self, employee):\n cursor = self.dbconnect.get_cursor()\n try:\n if employee.id == None:\n raise Exception('no id given')\n cursor.execute('select * from employee where employeeID=%s', (str(employee.id),))\n if cursor.rowcount == 0:\n raise Exception('no employee found with that id')\n cursor.execute(\n 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s',\n (employee.name, employee.email, employee.office, employee.title,\n employee.internOrExtern, employee.active, employee.promotor, employee.id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise Exception('unable to change employee')", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def __call__(self):\r\n AddNewRecords()", "def save(self)->None:\n database.cursor.execute(\n \"INSERT INTO users(firstname,lastname,othernames,email,phone,username,password,role) VALUES (%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.first_name,\n self.last_name,\n self.other_name,\n self.email,\n self.phone_number,\n self.user_name,\n self.password,\n self.is_admin\n ))\n super().save()", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def add_hospital(request):\n if request.POST:\n post = request.POST\n name = post.get(\"name\")\n address = post.get(\"address\")\n city = post.get(\"city\")\n state = post.get(\"state\")\n zip = post.get(\"zip\")\n hospital = Hospital.objects.create(\n name=name,\n address=address,\n city=city,\n state=state,\n zip=zip\n )\n\n if hospital:\n return redirect('add_hospital')\n\n return render(request, 'add_hospital.html')", "def create(self):\n db.session.add(self)\n db.session.commit()", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def create():\n data = request.get_json()\n print(\"DATA: \", data)\n db_helper.insert_new_record(data['first_name'], data['last_name'], data['class_period'], data['current_belt'], data['student_teacher_id'])\n result = {'success': True, 'response': 'Done'}\n return jsonify(result)", "def add_user(self, email, name, password):\n insert_command = \"INSERT INTO users(email, name, password, role) VALUES('%s', '%s', '%s');\" % (\n email, name, password)\n try:\n self.cur.execute(insert_command)\n self.cur.execute(\n \"SELECT * FROM users WHERE email = '%s';\" % (email,))\n item = self.cur.fetchone()\n if item:\n return jsonify({\"msg\": \"User successfully created\"}), 201\n except psycopg2.IntegrityError:\n output = {\n 'message': 'Email address already exists: ',\n }\n return jsonify(output), 400", "def test_new_user_is_added(db_session):\n new_user = User(username=\"test\", password=\"test\")\n db_session.add(new_user)\n query = db_session.query(User).all()\n assert len(query) == 1", "def add(self, obj):\n self.getSession().add(obj)\n self.commit() # paranoially\n return obj", "def add(self, obj):\n self.session.add(obj)", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def add_user():\n username = request.json['username']\n email = request.json['email']\n\n user = User(username, email)\n\n db.session.add(user)\n db.session.commit()\n return user_schema.jsonify(user)", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def add_exercise():\n json_data = request.get_json()\n new_question = json_data.get(\"new_question\")\n new_answer = json_data.get(\"new_answer\")\n user_id = session.get(\"email\")\n try:\n fm.add_exercise(new_question, new_answer, user_id)\n msg = \"Exercise added for user: {}\".format(user_id)\n app.logger.info(msg)\n return jsonify({\"message\": \"add exercise call completed\"})\n except Exception as e:\n msg = \"The question or the answer to be added has exceeded the max char limit\"\n app.logger.error(msg)\n abort(400)", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def addNewAuthor(name: str, birth: str):\n if not name or not checkDate(birth):\n abort(400)\n author = Author(name=name, birth=birth)\n db.session.add(author)\n db.session.commit()\n app.logger.info(f\"New author with id: {author.id} added\")", "def _insert_department(self):\n # Insert\n if db_department.idx_department_exists(1) is False:\n record = Department(\n code=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1102)", "def insert_experience(uid, rid):\n errmsg = []\n\n experience = Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).first()\n if experience:\n errmsg.append(\"Experience entry already exists for given user at this restaurant.\")\n\n if not errmsg:\n experience = Experience(uid = uid, rid = rid, experience = 0)\n db.session.add(experience)\n db.session.commit()\n return None\n return errmsg", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n # try:\n # db.session.add(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def add_user_to_g():\n\n if CURR_USER_KEY in session:\n g.user = Employee.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def employees(self, employees: object):\n\n self._employees = employees", "def add_user(user: dict):\n new_user = [user]\n insert_into_table('users', new_user)", "def add_user():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n fname = request.form.get(\"fname\")\n lname = request.form.get(\"lname\")\n language = request.form.get(\"language\")\n\n new_user = User(email=email, password=password,fname=fname,\n lname=lname,language=language)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/\")", "def add_entry(db, table, columns, values):\n mycursor = db.cursor()\n\n sql = \"INSERT INTO \" + table + \" (\" + parse_sql_param_from_array(columns) + \") VALUES (\" + parse_sql_param_from_array(values, escape=True) + \")\"\n mycursor.execute(sql)\n\n db.commit()" ]
[ "0.8205563", "0.7556254", "0.74963003", "0.74677837", "0.74195415", "0.71267223", "0.7027136", "0.68783706", "0.6761403", "0.6604914", "0.6410982", "0.63886535", "0.6380282", "0.6344615", "0.6239359", "0.6239359", "0.6229492", "0.6211725", "0.618129", "0.61335653", "0.61314315", "0.61312056", "0.61241335", "0.6122054", "0.606227", "0.6060016", "0.6027342", "0.5945643", "0.59397423", "0.59250766", "0.59024346", "0.5901823", "0.5901823", "0.5901823", "0.5898366", "0.5879391", "0.5872375", "0.5840885", "0.5832554", "0.58237964", "0.57995164", "0.578781", "0.578556", "0.5783671", "0.5757301", "0.57472354", "0.574318", "0.57280487", "0.5725802", "0.5722518", "0.57173795", "0.57072806", "0.56904763", "0.5689696", "0.5677811", "0.56739056", "0.5666836", "0.5666331", "0.565755", "0.56538594", "0.56535226", "0.5652386", "0.5637892", "0.5616796", "0.56109935", "0.5608321", "0.5599795", "0.559346", "0.5587575", "0.5576659", "0.5573863", "0.5570605", "0.55622685", "0.55544364", "0.55439305", "0.55413115", "0.5538688", "0.5537207", "0.55278534", "0.5526406", "0.5522921", "0.5522921", "0.5522921", "0.5522921", "0.55228806", "0.55041206", "0.5500685", "0.54980063", "0.5497766", "0.549505", "0.54937017", "0.5493102", "0.5489898", "0.5479812", "0.547899", "0.5476394", "0.5475282", "0.54718256", "0.54696465", "0.5466053" ]
0.7959675
1
removes an emplouee out the database
def remove_employee(self, id): cursor = self.dbconnect.get_cursor() try: cursor.execute('DELETE FROM employee WHERE employeeID=%s', (id,)) self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to remove Employee!\n(%s)' % (error))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete():", "def remove():\n\n db_remove()", "def remove(name):\n del person_database[name]", "def remove_data(self):\n db.session.delete(self)\n db.session.commit( )", "def remove():", "def remove(self):", "def delete(self, name):\n if name in self._dict:\n self._dict.pop(name)\n self.save()\n else:\n raise PoseError(\"%s is not in database\" % _name)", "def delete(self):\n ...", "def remove(self):\n db.session.delete(self)\n db.session.commit()", "def deleteRow(self, database):\r\n self.conn = connect(\"database.sqlite\")\r\n self.cur = self.conn.cursor()\r\n self.cur.execute(\r\n f\"DELETE FROM {database} WHERE id=(SELECT MAX(id) FROM {database})\")\r\n self.conn.commit()\r\n self.cur.close()", "def removeAlertFromDb(self):\n sql_query = \"DELETE FROM Alert WHERE symbol='\" + self.symbol + \"' AND cross='\" + self.cross + \"' AND level=\" + str(self.level)\n db.exec_query(sql_query)", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n\n cursor = self._conn.cursor()\n cursor.execute(\"DELETE FROM saves\")\n self._conn.commit()", "def clean_exam():\n data = Exam.objects.all()\n data.delete()", "def cmd_delete_employee():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_employee_by_id(id)\r\n User.query.filter(User.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Employee '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.employees'))\r\n else:\r\n flash(f\"Employee '{id}' was not found\")\r\n return redirect(url_for('main.employees'))", "def delete_entry_from_db(entry):\n db.session.delete(entry)\n db.session.commit()", "def deleteRow(self):\r\n self.conn = connect('database.sqlite')\r\n self.cur = self.conn.cursor()\r\n self.cur.execute('''\r\n DELETE FROM Clients WHERE id=(SELECT MAX(id) FROM Clients)\r\n ''')\r\n self.conn.commit()\r\n self.cur.close()", "def remove(table, id_):\n\n # your code\n\n key = common.check_for_key(id_,table)\n\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n table.pop(key)\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n #print(table)\n return table", "def delete(self):\r\n db.session.delete(self)\r\n db.session.commit()", "def delete(self, id):\n empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoeliminar:\n db.session.delete(empleadoeliminar)\n db.session.commit()\n return 201\n api.abort(404)", "def deletar_empresa(id):\n empresa = Empresa.query.get_or_404(id)\n \n db.session.delete(empresa)\n db.session.commit()\n flash('Empresa deletada com sucesso.')\n\n return redirect(url_for('home.listar_empresas'))", "def delete(self):\r\n s = self.get_session()\r\n s.delete(self)\r\n s.commit()", "def delete_leader(self):", "def _delete (self):\n self._exec ('delete from table_name where id=%(id)s')", "def remove(name, db):\n database = load(db)\n if name in database:\n del database[name]\n pickle.dump(database, open(db, 'wb'))\n print(\"%s removed from %r\" % (name, db))\n else:\n print(\"no such person %r in %r\" % (name, db))\n sys.exit(-1)", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()", "def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")", "def delete_meal():", "def deletePlayers():\n #deletes the contents of table players\n DB().execute(\"DELETE FROM players\", True)", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete(self):\n db.session.delete(self)\n db.session.commit()", "def delete_record():\n global data_base, table, choice, res, confirmation, output1, place_for_enter, choice_row\n choice_row = choice.get()\n sqlite3_simple_delete_record(data_base, table, choice_row, res)\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def delete(self)->None:\n database.cursor.execute(\n \"DELETE FROM {} WHERE id={}\".format(self.table_name, self.id))\n database.connection.commit()", "def Remove(self, e):\n self.reset(unset_namelist=True)", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def del_entry(dbfile):\n\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n c.execute(\"\"\"\n DELETE FROM bringatrailer WHERE id = (SELECT MAX(id) FROM bringatrailer)\n \"\"\")\n conn.commit()\n conn.close()", "def __del__(self):\n print(f\"{self.fullname()} deleted from database.\")", "def delete_table(self, name: str) -> None:", "def delete(self)->None:\n database.cursor.execute(\n \"DELETE FROM {} WHERE id = %s\".format(self.table_name), (self.id))\n database.connection.commit()", "def remove_row(self, row_id):", "def delete_self(self):\n self.table.remove((Query().name == self.name))", "def delete_self(self):\n self.table.remove((Query().name == self.name))", "def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()", "def deletePlayers():\n db, cursor = connect()\n cursor.execute(\"DELETE FROM players\") \n db.commit() \n db.close()", "def deletePlayers():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM tournament\")\n DB.commit()\n DB.close()", "def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)", "def deletePlayers():\n db = connect()\n db_cursor = db.cursor()\n query = \"DELETE FROM players\"\n db_cursor.execute(query)\n db.commit()\n db.close()", "def delete(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def unjoin():\n event_id = request.args.get('eventId')\n print(\"-=-=-=-=-\",event_id)\n register = db.session.query(Register).filter(Register.event_id == event_id).first()\n db.session.delete(register)\n db.session.commit()\n return redirect('/')", "def remove(self):\r\n\t\tself._delete()", "def removeJob(self):\n job, name = self.getJob() \n answer = tkMessageBox.askyesno(\"Warning\",'Remove this job?')\n if answer == False:\n return \n try: \n self.jobManager.deleteJob(job)\n except:\n print 'job not in database, removing from peat'\n del self.DB.meta.peatsa_jobs[name]\n self.DB.meta.__p__changed = 1\n self.updateJobs()\n return", "def delete_data(self):\n conn = self._connect_DB()\n cur = conn.cursor()\n cur.execute(\"DELETE FROM movie_table;\")\n self._close_connection(conn)", "def delete_order():", "def deletePlayers():\n db = connect()\n c = db.cursor()\n query = (\"DELETE FROM players;\")\n c.execute(query)\n db.commit()\n db.close()", "def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))", "def delete(self, e):\n if self.search(e):\n self.table[hash(e) % len(self.table)].remove(e)\n else:\n raise IndexError(\"Unknown value\")", "def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()\n # try:\n # db.session.delete(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\n pass", "def deletePlayers():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"delete from players;\")\n db_conn.commit()\n db_conn.close()", "def delete(self):\n query = \"DELETE FROM \" + self.table + \" WHERE \" + self.idfield + \"=%s\"\n dbh = dbstuff.getRW(self.dbh_key)\n try:\n c = dbh.cursor()\n c.execute(query, self.id)\n c.close()\n dbh.commit()\n finally:\n dbstuff.release(dbh,self.dbh_key)", "def tearDown(self):\n with database() as db:\n db.query(\"DELETE FROM persons WHERE person_name = 'test_person_a' OR person_name = 'test_person_b'\")", "def emeventdelete(request):\n if(request.GET):\n eid=request.GET.get(\"id\")\n s=\"delete from tbleventprograms where pId='\"+str(eid)+\"'\"\n try:\n c.execute(s)\n db.commit()\n except:\n pass\n else:\n return HttpResponseRedirect(\"/emevent\")\n return render(request,\"emevent.html\")", "def remove_from_db(id_task_to_rem):\n\t# delete query\n\tquery = \"DELETE FROM tasks WHERE id_task=(%s)\"\n\n\t# connection to database\n\tconnection = pymysql.connect(user=\"root\", password=\"sysadmin\", host=\"localhost\", database=\"todolist\")\n\t# get a cursor\n\tcursor = connection.cursor()\n\n\t# execute query\n\tcursor.execute(query, (id_task_to_rem,))\n\t# commit on DB\n\tconnection.commit()\n\t# close cursor and connection\n\tcursor.close()\n\tconnection.close()", "def remove_employee(self, employee):\n self.employees.remove(employee)", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def delete_players():\n DB = connect()\n c = DB.cursor()\n c.execute(\"DELETE FROM players\")\n DB.commit()\n DB.close()", "def delete(name):\n # Just like adding something, we use the cursor, but instead of INSERT INTO, we write DELETE FROM.\n # WHERE determines which activity the user wants to change\n c.execute(\"DELETE FROM activities WHERE name = (?)\", [name])\n # Now we must commit the changes that happened in the database\n conn.commit()", "def deletePlayers():\n cursor.execute(\"\"\"delete from players\"\"\")", "def delete(no):\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n # delete a record\n c.execute(f\"DELETE from person_info WHERE oid= \" + str(no))\n\n conn.commit()\n conn.close()", "def removePostFromDb(photo_name):\n connection = sqlite3.connect(homePath + DBname)\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM photo WHERE photo_name == (?);\", (photo_name,))", "async def __remove(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n await self.bot.say(\"Bookkeeper list is currently empty, add new bookkeepers using points keeper add\"\n \" <Discord name or nickname>\")\n self.save_db()\n return\n if name.id not in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"Keeper is not registered, please make sure the name or nickname is correctly spelled. \"\n \"You can check using points keeper list\")\n return\n self.db[server.id][\"bookkeeper\"].remove(name.id)\n self.save_db()", "def deletePlayers():\n dbconnection = connect()\n dbcursor = dbconnection.cursor()\n dbcursor.execute(\"DELETE FROM players\")\n dbconnection.commit()\n dbconnection.close()", "def __delitem__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n q = q.filter(PAW2_DBObject.key == key)\n assert q.delete(synchronize_session=False) == 1\n session.commit()", "def deleteMatches():\n #deletes the contents of table matches\n DB().execute(\"DELETE FROM matches\", True)", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def remove(table, id_):\n\n # your code\n\n common.toremoveid(\"store/games.csv\",data_manager.get_table_from_file(\"store/games.csv\"),id_)", "def delete():\n id_num = int(input('Enter the ID number of the item you wish to delete\\n'))\n db_actions.remove(id_num)", "def remove(table, id_):\n\n entry_index = 0\n for entry in table:\n entry_id_ = entry[0]\n if entry_id_ == id_:\n del table[entry_index]\n entry_index += 1\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n return table", "def delete(d):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"DELETE FROM book WHERE isbn = %s\",(d,))\n conn_obj.commit()\n conn_obj.close()", "def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def delete_entry(self, user, entry):\r\n try:\r\n self.curs.execute(f\"\"\"DELETE FROM {user} WHERE application = ? \"\"\", (entry))\r\n except sq.OperationalError:\r\n return self.err_find", "def delete_employee():\n employee_Id_list = db.get_employee_Id_list()\n print(\"The current employee list is \" , employee_Id_list)\n while True:\n delete_id = get_user_string(\"Enter the employee id to be delete\")\n if int(delete_id) in employee_Id_list:\n employee_to_be_deleted = db.get_employee(delete_id)\n db.delete_employee(delete_id)\n print(\"Employee \" + employee_to_be_deleted.full_name + \" has been delete from employee\")\n break\n else:\n print(\"No Id found\")\n continue", "def deletePlayers():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"DELETE FROM player\")\n dbConn.commit()\n dbConn.close()", "def delete_salary_group(db:Session):\n pass", "def eliminarServicio(codigo):\n try:\n conexion.cur.execute('delete from servicios where codigoServicio = ?', (codigo,))\n conexion.conex.commit()\n\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def deletePlayers():\n conn = connect()\n cursor = conn.cursor()\n cursor.execute(\"DELETE FROM players\")\n conn.commit()\n conn.close()" ]
[ "0.67720187", "0.67416126", "0.66954815", "0.6653715", "0.6581915", "0.65473205", "0.65393317", "0.6538599", "0.6497466", "0.6485631", "0.6443768", "0.6437587", "0.6437587", "0.6437587", "0.6437587", "0.64256126", "0.6419886", "0.64053607", "0.6392454", "0.6385016", "0.63654757", "0.63629967", "0.63402784", "0.6314467", "0.630749", "0.62787354", "0.62585425", "0.62583303", "0.6255777", "0.6255777", "0.6240876", "0.6238492", "0.62367237", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62351716", "0.62135994", "0.6211902", "0.6201505", "0.6191447", "0.6179318", "0.6179213", "0.61764354", "0.6176175", "0.6174653", "0.61533505", "0.61533505", "0.61503386", "0.61447006", "0.6117413", "0.6113937", "0.6110624", "0.61028475", "0.61027503", "0.6101632", "0.61012375", "0.60836446", "0.60823166", "0.6061793", "0.60580873", "0.6056466", "0.60535854", "0.6048774", "0.6048774", "0.6048774", "0.6039979", "0.60366744", "0.603609", "0.60345984", "0.6031343", "0.60220987", "0.6022019", "0.6020992", "0.60198146", "0.6019249", "0.6018146", "0.601006", "0.60064524", "0.5999256", "0.5997877", "0.5994008", "0.5976371", "0.59759074", "0.59690356", "0.5966772", "0.59634763", "0.5962943", "0.59623796", "0.59584117", "0.5954312", "0.59518045", "0.5950779", "0.59458274", "0.59411556" ]
0.6492649
9
does a filter on all the employees in the database
def filter_employees(self, searchQuery="", researchGroup="", promotor=0, ): from Employee import Employee try: cursor = self.dbconnect.get_cursor() sql = 'select * from employee e INNER JOIN researchGroup r ON r.groupID=e.researchGroup WHERE ' \ 'e.name LIKE %(searchQueryQ)s' if researchGroup != "": sql += "AND r.name = %(researchGroupQ)s" if promotor == 1: sql += 'AND e.promotor = TRUE' if promotor == 2: sql += 'AND e.promotor = FALSE' cursor.execute(sql, dict(searchQueryQ="%" + searchQuery + "%", researchGroupQ=researchGroup)) employees = list() for row in cursor: employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) employees.append(employee) return employees except: self.dbconnect.rollback() raise Exception('unable to filter employees')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def filter_by_employee(table, employee_id):\n operations = []\n employee_id_index = 1\n for record in table:\n id = record[employee_id_index]\n if id == employee_id:\n operations.append(record)\n return operations", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def filter():\n return get_filter_data(db, MyTable)", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def get_employees(self):\n return self.employees", "def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees", "def getEmployees(self):\n return self.employees", "def get_employee_query(where=None):\n query = (\n db.session.query(Employee)\n .outerjoin(Department, Department.id == Employee.department_id)\n .with_entities(\n Department.name.label(\"department_name\"),\n Employee.id,\n Employee.name,\n Employee.date_birth,\n Employee.salary,\n Employee.department_id,\n )\n )\n if where is None:\n return query\n\n return query.filter(where)", "def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n if 'emp_hours' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('emp.luggage_transfer.hours'),\n context.get('emp_hours'), [\"employee\"], context)\n args.append(('id', 'not in', [isinstance(d['employee'], tuple) and d['employee'][0] or d['employee'] for d in emp_ids]))\n if 'mission_line' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('mission_line'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n if 'illness' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.illness'),\n context.get('illness'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n\n if 'same' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('same'), [\"employee_id\"], context)\n args.append(('id', 'in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n \n if 'alternative_setting_id' in context:\n old_ids = super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context={}, limit=limit)\n\n alternative_setting_id = context.get('alternative_setting_id')\n setting_obj = self.pool.get('hr.alternative.setting')\n alternative_setting_id = setting_obj.browse(cr, uid, alternative_setting_id)\n degrees_ids = [\n x.id for x in alternative_setting_id.degrees_ids]\n degrees_ids += degrees_ids\n degrees_ids = tuple(degrees_ids)\n\n departments_ids = [\n x.id for x in alternative_setting_id.departments_ids]\n departments_ids += departments_ids\n departments_ids = tuple(departments_ids)\n\n ex_employees_ids = [\n x.id for x in alternative_setting_id.employees_ids]\n ex_employees_ids += ex_employees_ids\n ex_employees_ids = tuple(ex_employees_ids)\n\n\n old_ids_tuple = [x[0] for x in old_ids] + [x[0] for x in old_ids]\n old_ids_tuple = tuple(old_ids_tuple)\n\n accessed_ids = self.search(cr, uid, [])\n accessed_ids += accessed_ids\n accessed_ids = tuple(accessed_ids)\n\n if not old_ids_tuple:\n old_ids_tuple = (0,0)\n \n if not departments_ids:\n departments_ids = (0,0)\n cr.execute(\n ''' Select emp.id,(SELECT MAX(date) as max_date\n FROM hr_alternative_process_line\n WHERE employee_id=emp.id and state='confirmed')date\n from hr_employee emp\n where emp.degree_id in %s \n and emp.department_id not in %s \n and emp.state = 'approved' \n and emp.payroll_state = 'khartoum' \n and emp.id in %s \n and emp.gender='male' \n and emp.id in %s \n and emp.id not in %s \n order by date NULLS LAST''', (degrees_ids,departments_ids,old_ids_tuple,accessed_ids,ex_employees_ids))\n history = cr.dictfetchall()\n new_ids = []\n while True:\n try:\n new_ids.append( history.pop()['id'] )\n except:\n break\n\n temp = dict(old_ids)\n old_ids = [x for x in old_ids if x[0] in new_ids]\n #new_ids = [x for x in new_ids if x in accessed_ids]\n #print \"..........................temp\",new_ids\n #print \"......................\",[(x, temp.get(x,False) ) for x in new_ids]\n #print \"......................\",sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n return sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n\n return super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)", "def build_filters(self, filters=None):\n\n if filters is None:\n filters = {}\n\n orm_filters = super(EmployeeResource, self).build_filters(filters)\n\n if 'role' in filters:\n ids = (Employee.by_assignment_role(filters['role'])\n .values_list('id', flat=True))\n orm_filters['pk__in'] = ids\n\n return orm_filters", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def query_table(self, expression = ''):\n response = self.table.scan(FilterExpression = Attr(\"Employeeid\").gt(int(expression)))\n df = pd.DataFrame(response['Items'])\n print(df.head(20))\n return df", "def search_all_records(self, data: dict, execution_context: dict):", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list", "def scan_table(self,expression=''):\n response = self.table.query(KeyConditionExpression=Key(\"Employeeid\").eq(int(expression)))\n print(response['Items'])\n df = pd.DataFrame(response['Items'], index=[0])\n print(df.head())\n return df", "def search_entity(self, name_filter):\n name_filter=name_filter.lower()\n model_reader=oc.delegator.getModelReader()\n names=model_reader.getEntityNames()\n # print(len(names))\n for name in names:\n if name_filter in name.lower():\n print(name)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))", "def employee_works_in(employee_id: int) -> List[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT department\n FROM EmployeeDepartments\n WHERE EmployeeDepartments.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch all\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n departments = []\n for row in result:\n departments.append(\n row[0]\n )\n\n cur.close()\n conn.close()\n return departments\n except Exception as e:\n print(\"ddd\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def employees(self, employees: object):\n\n self._employees = employees", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def employees(self) -> object:\n return self._employees", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def _check_employee(self):\n\n for record in self:\n\n if record.nik_number:\n # find duplicate nik\n employee_ids = self.search([('id', 'not in', self.ids), ('nik_number', '=', record.nik_number)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Employee Identity Number.\")\n raise ValidationError(error_msg)\n\n # check nik format. it required base_indonesia\n if not record._check_nik(record):\n error_msg = _(\"NIK did not match with Company Code.\")\n raise ValidationError(error_msg)\n\n if record.identification_id:\n employee_ids = self.search([('id', 'not in', self.ids), ('identification_id', '=', record.identification_id)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Identification Number.\")\n raise ValidationError(error_msg)\n\n return True", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def searchRecords(self, filterChoice, keyword):\r\n session = wx.GetApp().session\r\n model = getattr(db, self.modelName)\r\n\r\n result = None\r\n if filterChoice == \"Person\":\r\n qry = session.query(model)\r\n logging.debug(qry)\r\n result = qry.filter(db.Person.full_name.contains('%s' % keyword))\r\n\r\n result = result.all()\r\n\r\n logging.debug(result)\r\n return result", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def filter_all(_):\n return True", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def filter_all(cls, **kwargs):\n return cls.query.filter_by(**kwargs).all()", "def set_employees_by_id(department_id):\n return Employee.query.filter_by(department_id=department_id)", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def filter( self, trans, user, query, column_filter ):\n if column_filter == \"All\":\n return query\n return query.filter( model.Category.name == column_filter )", "def query_all():\n\tstudents = session.query(Student).all()\n\treturn students", "def test_filter_users_geq(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n users = User.query\n users = apply_filter(users, User, {'column': 'id', 'type': 'geq',\n 'value': '5'})\n result = users.all()\n assert len(result) == 6", "def working_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n working_employees_list = []\r\n line_list = []\r\n\r\n for i,line in enumerate(work_trips_by_date): \r\n\r\n for line in employee_list:\r\n if line[0] in work_trips_by_date[i]:\r\n working_employees_list.append(line[2]+','+line[6]+','+work_trips_by_date[i][0])\r\n \r\n return working_employees_list", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n\n if self.value() is None:\n return queryset.all()\n\n return queryset.filter(firm__pk=self.value())", "def test_job_filter(self):\n # A Job in the database\n job_filter = {\"job\" : \"EnGiNeEr\"}\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users with given filter\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"filter\":json.dumps(job_filter)})\n search_result = json.loads(resp.content)\n for user in search_result:\n self.assertIn(job_filter.title(),user['job'],\"Job filter doesn't run correctly\")", "def get(self, request):\n TODOs = Todotbl.objects.all() if request.user.role == User.ADMIN else Todotbl.objects.filter(user=request.user)\n for filterData in re.split(Filter.separator, request.data[Filter.column]):\n filterDataMake = FilterData()\n # set column to search\n if Filter.column_priority in filterData:\n filterDataMake.columnName = Filter.column_priority\n elif Filter.column_status in filterData:\n filterDataMake.columnName = Filter.column_status\n\n # what search (order by or filtering)\n if Filter.asc in filterData: # order by asc\n filterDataMake.val = Filter.asc\n TODOs = Todotbl.objects.filter(id__in=TODOs).order_by(filterDataMake.columnName).distinct()\n elif Filter.desc in filterData: # order by desc\n filterDataMake.val = Filter.desc\n TODOs = Todotbl.objects.filter(id__in=TODOs).order_by(filterDataMake.columnName).reverse().distinct()\n else:\n if Filter.more in filterData: # if more\n filterDataMake.isMore = True\n elif Filter.less in filterData: # if less\n filterDataMake.isLess = True\n\n if Filter.equal in filterData: # if equal\n filterDataMake.isEqual = True\n if filterDataMake.isMore: # if more equal\n if Filter.column_priority == filterDataMake.columnName: # if more equal in column priority\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n priority__gte=validate_priority(filterData)).distinct()\n elif Filter.column_status == filterDataMake.columnName: # if more equal in column status\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n status__gte=validate_word_status(filterData, '>=')) \\\n .distinct()\n elif filterDataMake.isLess: # if less equal\n if Filter.column_priority == filterDataMake.columnName: # if less equal in column priority\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n priority__lte=validate_priority(filterData)).distinct()\n elif Filter.column_status == filterDataMake.columnName: # if less equal in column status\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n status__lte=validate_word_status(filterData,\n '<=')).distinct()\n else: # if equal\n if Filter.column_priority == filterDataMake.columnName: # if equal in column priority\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n priority=validate_priority(filterData)).distinct()\n elif Filter.column_status == filterDataMake.columnName: # if equal in column status\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n status=validate_word_status(filterData, '=')).distinct()\n\n else: # if not equal, only more or less\n if filterDataMake.isMore: # if more\n if Filter.column_priority == filterDataMake.columnName: # if more in column priority\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n priority__gt=validate_priority(filterData)).distinct()\n elif Filter.column_status == filterDataMake.columnName: # if more in column status\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n status__gt=validate_word_status(filterData, '>')).distinct()\n else: # if less\n if Filter.column_priority == filterDataMake.columnName: # if less in column priority\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n priority__lt=validate_priority(filterData)).distinct()\n elif Filter.column_status == filterDataMake.columnName: # if less in column status\n TODOs = Todotbl.objects.filter(id__in=TODOs,\n status__lt=validate_word_status(filterData, '<')).distinct()\n return Response((model_to_dict(TODO) for TODO in TODOs), status=status.HTTP_200_OK)", "def test_filter_rows(self):\n self.insert_row()\n\n instance = Manager.objects().first().run_sync()\n dictionary = instance.to_dict(Manager.name)\n self.assertDictEqual(dictionary, {\"name\": \"Guido\"})", "def _custom_filter(self, query):\r\n return query", "def list(self, request):\n\n records = filter_against_records(request)\n \n if 'faculty_id' in request.query_params:\n faculty = Faculties.objects.filter(id=request.query_params.get('faculty_id'))[0]\n departments = Departments.objects.filter(faculty_id=model_to_dict(faculty)['id'])\n for department in departments:\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'department_id' in request.query_params:\n department = Departments.objects.filter(id=request.query_params.get('department_id'))[0]\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'start_year_id' in request.query_params:\n start_year = StartYears.objects.filter(id=request.query_params.get('start_year_id'))[0]\n education_programs = EducationPrograms.objects.filter(start_year_id=model_to_dict(start_year)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'specialization_id' in request.query_params:\n specialization = Specializations.objects.filter(id=request.query_params.get('specialization_id'))[0]\n education_programs = EducationPrograms.objects.filter(specialization_id=model_to_dict(specialization)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_level_id' in request.query_params:\n education_level = EducationLevels.objects.filter(id=request.query_params.get('education_level_id'))[0]\n education_programs = EducationPrograms.objects.filter(education_level_id=model_to_dict(education_level)['id'])\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_program_id' in request.query_params:\n education_program = EducationPrograms.objects.filter(id=request.query_params.get('education_program_id'))[0]\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'student_id' in request.query_params:\n records = records.filter(student_id=request.query_params.get('student_id'))\n\n \n\n \n \n \n students = Students.objects.all()\n res = []\n for student in students:\n student_records = records.filter(student_id=model_to_dict(student)['id'])\n if len(student_records) > 0:\n res.append(student)\n\n return Response(normalize_students(res))", "def setEmployees(self, employees):\n self.employees = employees", "def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())", "def filter_list(client, args):\n from ..util import print_query\n print_query(client.context.query)", "def all(self):\n print('HELLO')\n return self.__model__.query.all()", "def get_queryset(self):\n if 'email' in self.request.GET:\n if (Person.objects.filter(user__email=self.request.GET['email'])\n .exists()):\n queryset = (Person.objects\n .filter(user__email=self.request.GET['email']))\n else:\n queryset = []\n else:\n queryset = Person.objects.all()\n return queryset", "def get_queryset(self):\n if 'email' in self.request.GET:\n if (Person.objects.filter(user__email=self.request.GET['email'])\n .exists()):\n queryset = (Person.objects\n .filter(user__email=self.request.GET['email']))\n else:\n queryset = []\n else:\n queryset = Person.objects.all()\n return queryset", "def test_filter_users_eq(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n users = User.query\n users = apply_filter(users, User, {'column': 'id', 'type': 'eq',\n 'value': '2'})\n result = users.all()\n assert len(result) == 1", "def filterData(records):\n def isInteresting(record):\n if record[VO_ISSUER] in ('/DC=ch/DC=cern/OU=computers/CN=voms.cern.ch', '/DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch'):\n return True\n if record[VO_NAME] in ('atlas', 'cms', 'alice'):\n return True\n if record[USERSN] == '/C=SI/O=SiGNET/O=IJS/OU=F9/CN=Andrej Filipcic':\n return True\n if record[USERSN] in ('aliprod', '/aliprod'):\n return True\n if record[USERSN].startswith(ALIEN_USER_PREFIX):\n return True\n\n return False\n\n return [ r for r in records if isInteresting(r) ]", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def filter_query(self, request, query, view):\n raise NotImplementedError('.filter_query() must be implemented.') # pragma: no cover", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items", "def step_filter(self, qs):\n return qs", "def query_employee_skill(self):\n\n query = \"select Skill_Descrpt, Emp_Fname, Emp_Lname from \" \\\n \"skill, employee, empskill \" \\\n \"where employee.Emp_ID = empskill.Emp_ID \" \\\n \"and skill.Skill_ID = empskill.Skill_ID \"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def _getData(self, entity, params):\n\n res = []\n entity_code = entity.code\n conn = self._connect(entity)\n try:\n conn.create_function(\"INLIST\", 2, self._inlist)\n\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n\n if not self.exists(entity_code, cursor):\n self.generate_entity(entity)\n\n my_departments = \"\"\n my_users = \"\"\n for column in entity.definition[\"columns\"]:\n if \"entityFilterByDepartment\" in column or column[\"type\"] == \"departmentSelector\":\n my_departments = self.getMyDepartments()\n if \"entityFilterByUser\" in column or column[\"type\"] == \"userSelector\":\n my_users = self.getMyUsers()\n\n # Create columnames for each column in entity metadata. Adding too related fields\n columnNames = \"A.id\"\n leftJoin = \"\"\n letter = \"B\"\n thisEntityHaveDepartmentFilter = False\n thisEntityHaveUserFilter = False\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\"]:\n columnNames += f\", A.[{column['field']}]\"\n\n elif column[\"type\"] == \"dateTime\":\n columnNames += f\", strftime('%Y-%m-%d',{column['field']}) as [{column['field']}]\"\n\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n columnNames += f\", A.[{column['field']}]\"\n columnNames += f\", {letter}.[{column['entityLabel']}] as {letter}_label\"\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['field']} \"\n\n if \"entityFilterByDepartment\" in column:\n leftJoin += f' AND ( {letter}.departments is null or INLIST({letter}.departments,\"{my_departments}\") = 1 ) '\n if \"entityFilterByUser\" in column:\n leftJoin += f' AND ( {letter}.users is null or INLIST({letter}.users,\"{my_users}\") = 1 ) '\n\n letter = self.getNextLetter(letter)\n\n elif column[\"type\"] == \"departmentSelector\":\n columnNames += f\", A.[departments]\"\n thisEntityHaveDepartmentFilter = True\n\n elif column[\"type\"] == \"userSelector\":\n columnNames += f\", A.[users]\"\n thisEntityHaveUserFilter = True\n\n elif column[\"type\"] == \"relatedEntity\":\n columnNames += f\", {letter}.[{column['entityLabel']}] as {column.field}\"\n if \"relatedColumnRelation\" in column and column[\"relatedColumnRelation\"]:\n left_on = str(column['relatedColumnRelation']).replace(\n \"#entity#\", \"A\").replace(\"#relatedEntity#\", letter)\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {left_on} \"\n else:\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['relatedForeignKey']} \"\n letter = self.getNextLetter(letter)\n\n sortBy = \"A.ID\"\n if \"sortBy\" in params and params[\"sortBy\"]:\n sortBy = f'A.{params[\"sortBy\"]}'\n elif \"sortBy\" in entity.definition and entity.definition[\"sortBy\"]:\n sortBy = f'A.{entity.definition[\"sortBy\"]}'\n where = \"\"\n letter = \"B\"\n\n if thisEntityHaveDepartmentFilter:\n where = f' WHERE ( A.departments is null or INLIST(A.departments,\"{my_departments}\") = 1 ) '\n if thisEntityHaveUserFilter:\n where = f' WHERE ( A.users is null or INLIST(A.users,\"{my_users}\") = 1 ) '\n\n # Add filter for group in related entities\n for column in entity.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"] and (\"entityFilterByDepartment\" in column or \"entityFilterByUser\" in column):\n where += \" AND \" if where else \" WHERE \"\n where += f'A.{column[\"field\"]} is null or A.{column[\"field\"]} is not null and {letter}.id is not null '\n letter = self.getNextLetter(letter)\n\n param_list = tuple()\n if \"filters\" in params and params[\"filters\"] and len(params[\"filters\"]) > 0:\n for filter_item in params[\"filters\"]:\n if \"values\" in filter_item and filter_item[\"values\"] and len(filter_item[\"values\"]) > 0:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n\n if \".\" in str(filter_item[\"field\"]):\n mm_entity = \"MM\" + str(filter_item[\"field\"]).split(\".\")[0]\n mm_field = str(filter_item[\"field\"]).split(\".\")[1]\n if len(filter_item[\"values\"]) == 1:\n where += f\" {mm_entity}.[{mm_field}] = ?\"\n param_list += (append(filter_item[\"values\"][0]),)\n else:\n where += f\" {mm_entity}.[{mm_field}] IN ({','.join( filter_item['values'])})\"\n\n leftJoin += f\" INNER JOIN [{filter_item['field'].split('.')[0]}] as {mm_entity} ON {mm_entity}.{filter_item['relatedManyToManyKey']} = A.id \"\n else:\n if len(filter_item[\"values\"]) == 1:\n if filter_item[\"useLike\"]:\n where += f\" A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_item['values'][0]}%\",)\n else:\n where += f\" A.[{filter_item['field']}] = ?\"\n param_list += (filter_item[\"values\"][0],)\n else:\n if filter_item[\"useLike\"]:\n where += \" ( 1=2 \"\n for filter_value in filter_item[\"values\"]:\n if filter_value:\n where += f\" OR A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_value}%\",)\n where += \" ) \"\n else:\n where += f\" A.[{filter_item['field']}] IN ({','.join( filter_item['values'])})\"\n\n # Add fixed condition\n if \"condition\" in entity.definition and entity.definition[\"condition\"]:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n where += entity.definition[\"condition\"]\n\n sql = f\"SELECT {columnNames} FROM {entity_code} as A {leftJoin}\"\n if where != \"\":\n sql += where\n\n sql += f\" ORDER BY {sortBy}\"\n\n if \"fromReg\" in params and params[\"fromReg\"] > 0 and \"toReg\" in params and params[\"toReg\"] > 0:\n sql += F\" LIMIT {params['fromReg']-1}, {params['toReg']-params['fromReg']+1} \"\n\n cursor.execute(sql, param_list)\n for row in cursor:\n dic = {\"id\": row[\"id\"]}\n letter = \"B\"\n\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\", \"dateTime\", \"date\"]:\n dic[column[\"field\"]] = row[column[\"field\"]]\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n dic[column[\"field\"]] = f\"{row[column['field']]}|-|{row[f'{letter}_label']}\"\n letter = self.getNextLetter(letter)\n elif column[\"type\"] == \"departmentSelector\":\n dic[\"departments\"] = row[\"departments\"]\n elif column[\"type\"] == \"userSelector\":\n dic[\"users\"] = row[\"users\"]\n elif column[\"type\"] == \"relatedEntity\":\n dic[column[\"field\"]] = row[column[\"field\"]]\n letter = self.getNextLetter(letter)\n\n res.append(dic)\n\n finally:\n conn.close()\n\n return res", "def filter_by_schema(self, schema):\n pass", "def get_list_filter(self,table=None,**kwargs):\n # import pdb;pdb.set_trace()\n self.where = '1'\n self.order_by = 'id'\n if not isinstance(table,SqliteTable):\n return\n \n # get the column names for the table\n table_column_names = table.get_column_names()\n \n self._create_filter_session(table.table_name) # ensure it exists\n \n where_list = []\n session_data = session.get(self.HEADER_NAME)\n if session_data and table.table_name in session_data:\n filter_data = session_data[table.table_name][self.FILTERS_NAME]\n for k,v in filter_data.items():\n col = v.get(self.FIELD_NAME)\n val = v.get(self.VALUE)\n kind = v.get(self.TYPE)\n start = v.get(self.DATE_START)\n end = v.get(self.DATE_END)\n if col and (val or start or end):\n \n # if the column name is a physical column in the primary table\n # prepend the column name with the table name to avoid ambiguous column names\n if col in table_column_names and '.' not in col:\n col = table.table_name + '.' + col\n \n if kind == 'date':\n start = iso_date_string(start if start else self.BEGINNING_OF_TIME)\n end = iso_date_string(end if end else self.END_OF_TIME)\n # print(start,end)\n where_list.append(\"\"\"date({col}) >= date('{start}') and date({col}) <= date('{end}')\"\"\".format(col=col,start=start,end=end))\n # print(where_list[-1])\n else:\n where_list.append(\"\"\"{col} LIKE '%{val}%'\"\"\".format(col=col,val=str(val).lower()))\n \n \n # import pdb;pdb.set_trace()\n order_list = []\n for order_data in session_data[table.table_name][self.ORDERS_NAME]:\n for dom_id in order_data.keys():\n col = order_data[dom_id].get(self.FIELD_NAME)\n direction = int(order_data[dom_id].get(self.DIRECTION,0)) #direction will be -1,0 or 1\n if col and direction:\n \n # if the column name is a physical column in the primary table\n # prepend the column name with the table name to avoid ambiguous column names\n # Same as above, but not sure it's really needed in order by...\n if col in table_column_names and '.' not in col:\n col = table.table_name + '.' + col\n\n direction = 'DESC' if direction < 0 else 'ASC'\n collate = ''\n field_type = \"TEXT\"\n try:\n field_type = table.get_column_type(order_data[dom_id]['field_name'])\n except KeyError:\n # the field name may be defined in the query \n pass\n if field_type.lower() == \"text\":\n collate = 'COLLATE NOCASE'\n order_list.append(\"\"\"{col} {collate} {direction}\"\"\".format(col=col,collate=collate,direction=direction))\n \n if where_list:\n self.where = ' and '.join(where_list)\n if order_list:\n self.order_by = ','.join(order_list)\n else:\n self.order_by = table.order_by_col #default order for this table", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def get_all_companies_and_people():", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_find_all_entities_action(self):\n pass", "def _compute_results(self):\n self.ensure_one()\n Result = self.env['sla.employee.view']\n dom = []\n if self.supplier_category_name:\n if self.supplier_category_name == 'employee':\n dom += [('pay_to', '=', 'employee')]\n elif self.supplier_category_name == 'supplier':\n dom += [('pay_to', '!=', 'employee'),('invoice_id.partner_id.category_id.name', '!=', 'ต่างประเทศ')]\n elif self.supplier_category_name == 'foreign':\n dom += [('pay_to', '!=', 'employee'),('invoice_id.partner_id.category_id.name', '=', 'ต่างประเทศ')]\n if self.user_ids:\n dom += [('voucher_id.validate_user_id', 'in', self.user_ids.ids)]\n if self.source_document_type:\n dom += [('invoice_id.source_document_type', '=',\n self.source_document_type)]\n if self.fiscalyear_start_id:\n dom += [('voucher_id.date', '>=',\n self.fiscalyear_start_id.date_start)]\n if self.fiscalyear_end_id:\n dom += [('voucher_id.date', '<=',\n self.fiscalyear_end_id.date_stop)]\n if self.period_start_id:\n dom += [('voucher_id.date', '>=',\n self.period_start_id.date_start)]\n if self.period_end_id:\n dom += [('voucher_id.date', '<=',\n self.period_end_id.date_stop)]\n if self.date_start:\n dom += [('voucher_id.date', '>=', self.date_start)]\n if self.date_end:\n dom += [('voucher_id.date', '<=', self.date_end)]\n self.results = Result.search(\n dom, order=\"fiscalyear,voucher_number,invoice_number\")", "def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses", "def get_queryset(self):\n queryset = Food.objects.all()\n name = self.request.query_params.get('name', None)\n ndb_no = self.request.query_params.get('ndb_no', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n elif ndb_no is not None:\n queryset = queryset.filter(ndb_no=ndb_no)\n return queryset", "def get_employees_by_date_of_birth(cls, date, strategy=lazyload):\n cls._check_strategy(strategy)\n\n employees = db.session.query(Employee).options(\n strategy(Employee.department)\n ).filter_by(\n date_of_birth=date\n ).all()\n return employees", "def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict", "def get_queryset(self):\n return filterUsersByName( self.request.query_params.get('username', None) )", "def get_queryset(self):\n\n return person_search_qs(self.request)", "async def filter(self, **kwargs):\n\n pass", "def get_queryset(self):\n queryset = DetalleVenta.objects.all()\n fk_venta = self.request.query_params.get('fk_venta')\n \n if fk_venta is not None:\n queryset = queryset.filter(fk_venta=fk_venta)\n return queryset\n return queryset", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def filter(self, *args, **kwargs):", "def list(self, filter=None, _check_permissions=True, together='', _jsoned=True):\r\n\r\n join = None\r\n if filter:\r\n for k, v in filter.iteritems():\r\n if None in v:\r\n filter[k] = None\r\n query = reduce(and_,\r\n (self.table[field].belongs(value) if type(value) is list else (self.table[field] == value)\r\n for field, value in filter.iteritems()))\r\n else:\r\n query = None\r\n if _check_permissions:\r\n query, join = self.CASQuery(query, verb='list')\r\n fields = (self.table.ALL,) if self.virtual_fields else self.visible_fields\r\n # objects = self.db.executesql(self.db(query)._select(*fields,join=join),as_dict=True)\r\n objects = self.sql(query, *fields, left=join, as_dict=self.virtual_fields)\r\n if self.virtual_fields and objects:\r\n # calcolo tutti i virtual fields\r\n for obj, field in product(objects, [self.table[field] for field in self.virtual_fields]):\r\n obj[field.name] = field.f(obj)\r\n\r\n vn = partial(zip, self.visible_names + self.virtual_fields)\r\n get_vn = itemgetter(*(self.visible_names + self.virtual_fields))\r\n objects = map(dict, map(vn, map(get_vn, objects)))\r\n # print objects\r\n ret = {self.name: dict(results=objects, totalResults=len(objects), )}\r\n if together:\r\n if 'permissions' in together:\r\n ret.setdefault('PERMISSIONS', {}).update(\r\n self.my_perms(ids=map(itemgetter('id'), objects)).get('PERMISSIONS', {}))\r\n\r\n # results = {self.name : objects}\r\n for resource, redest, t, field in self.find_model_path(together):\r\n # print resource, redest, field,t\r\n if t == 'm':\r\n if resource in ret:\r\n obs = map(itemgetter('id'), ret[resource]['results'])\r\n ret.setdefault('TOMANY', {})['%s_%s' % (redest, field.name)] = obs\r\n if obs:\r\n ret.update(resource_manager.resource(redest).list(filter={field.name: obs}, _jsoned=False))\r\n elif t == '1':\r\n if resource in ret:\r\n obs = list(set(map(itemgetter(field.name), ret[resource]['results'])))\r\n # ret.setdefault('TOONE',{})['%s_%s' % (resource,field.name)] = obs\r\n if obs:\r\n ret.update(resource_manager.resource(redest).list(filter={'id': obs}, _jsoned=False))\r\n elif t == 'M':\r\n if resource in ret:\r\n first = 0 if field else 1\r\n m2m_idx = '%s/%s|%s' % (resource, redest, first)\r\n obs = map(itemgetter('id'), ret[resource]['results'])\r\n ret.setdefault('MANYTOMANY', {}).setdefault(m2m_idx, []).extend(obs)\r\n if obs:\r\n resource_manager.m2m((resource, redest)).list(resource_manager.resource(redest),\r\n collection=obs)\r\n res = current.response.text\r\n ret.setdefault('m2m', {}).update(res['m2m'])\r\n obs = list(set(map(itemgetter(1 - first), imap(itemgetter('add'), res['m2m'][m2m_idx]))))\r\n # ret.setdefault('TOMANY',{})[redest] = obs\r\n if obs:\r\n res = resource_manager.resource(redest).list(filter=dict(id=obs), _jsoned=False)\r\n ret.update(res)\r\n if self.private_args:\r\n if objects:\r\n ret.update(self.private_args.list(map(itemgetter(self.field_order.index('id')), objects)))\r\n\r\n current.response.text = ret\r\n return ret", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def get_unique_employees():\n unique_names = []\n\n for entry in Entry.select():\n if entry.employee_name not in unique_names:\n unique_names.append(entry.employee_name)\n\n clear()\n return unique_names", "def entity_data(self, entity_name, limit=10):\n from sagas.ofbiz.entities import OfEntity as e, finder, record_list_df\n # limit = 10\n offset = 0\n result = finder.find_list(entity_name, limit, offset)\n result = record_list_df(entity_name, result, drop_null_cols=True, contains_internal=False)\n print(result)", "def run(self):\n query = self.query\n\n # count before filtering\n # self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()\n\n self._set_column_filter_expressions()\n self._set_global_filter_expression()\n self._set_sort_expressions()\n self._set_yadcf_data(query)\n\n # apply filters\n query = query.filter(\n *[e for e in self.filter_expressions if e is not None])\n self.filtered_query = deepcopy(query)\n\n # self.cardinality_filtered = query.add_columns(\n # self.columns[0].sqla_expr).count()\n\n # apply sorts\n query = query.order_by(\n *[e for e in self.sort_expressions if e is not None])\n\n # add paging options\n length = int(self.params.get('length'))\n if length >= 0:\n query = query.limit(length)\n elif length == -1:\n pass\n else:\n raise(ValueError(\n 'Length should be a positive integer or -1 to disable'))\n query = query.offset(int(self.params.get('start')))\n\n # add columns to query\n query = query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.filtered_query = self.filtered_query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.query = query\n # fetch the result of the queries\n column_names = [col.mData if col.mData else str(i)\n for i, col in enumerate(self.columns)]\n # self.results = [{k: v for k, v in zip(\n # column_names, row)} for row in query.all()]", "def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)" ]
[ "0.6927609", "0.6896321", "0.6624646", "0.6393193", "0.63894564", "0.6377485", "0.63540703", "0.6346314", "0.62784153", "0.6278148", "0.6203518", "0.6151426", "0.61103714", "0.6102392", "0.60782236", "0.60275084", "0.5902977", "0.58386964", "0.5763383", "0.5734938", "0.57344043", "0.5714729", "0.56883085", "0.56747353", "0.56352276", "0.5619726", "0.56113935", "0.5592339", "0.558879", "0.55742204", "0.5558649", "0.5546032", "0.5538151", "0.5534788", "0.5526068", "0.5483864", "0.54364383", "0.5388059", "0.53847575", "0.53742254", "0.5360875", "0.5359812", "0.53492385", "0.5346312", "0.53222996", "0.5317811", "0.5291449", "0.5272416", "0.522628", "0.522628", "0.5225926", "0.5212637", "0.5196732", "0.5165996", "0.515841", "0.5155461", "0.51343554", "0.5114824", "0.51148134", "0.5113528", "0.5112581", "0.51088643", "0.5104413", "0.5104121", "0.50951236", "0.50951236", "0.509261", "0.50924134", "0.50867945", "0.5086355", "0.5083142", "0.5073535", "0.50688374", "0.50680137", "0.50518143", "0.5048967", "0.5048532", "0.50395083", "0.50395083", "0.50395083", "0.5037753", "0.50357836", "0.5032959", "0.50265455", "0.50215346", "0.5016037", "0.50120485", "0.5005005", "0.5004871", "0.500285", "0.5001941", "0.50015026", "0.49892378", "0.49883312", "0.498405", "0.4983193", "0.49792033", "0.49757028", "0.4968141", "0.49647674" ]
0.6596226
3
adds a role to an employee
def add_employeeRole(self, id, role): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employeeRoles values(%s,%s)', (id, role)) # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save EmployeeRole!\n(%s)' % (error))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}", "def test_add_role(self):\n pass", "def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser", "def add_role(email, role):\n from enferno.user.models import Role\n u = User.query.filter(User.email == email).first()\n\n if u is None:\n print('Sorry, this user does not exist!')\n else:\n r = Role.query.filter(Role.name == role).first()\n if r is None:\n print('Sorry, this role does not exist!')\n u = click.prompt('Would you like to create one? Y/N', default='N')\n if u.lower() == 'y':\n r = Role(name=role)\n try:\n db.session.add(r)\n db.session.commit()\n print('Role created successfully, you may add it now to the user')\n except Exception as e:\n db.session.rollback()\n # add role to user\n u.roles.append(r)", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "def test_add_role_simple(self):\n pass", "def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "def add_role():\n check_admin()\n add_role = True\n\n form = RoleForm()\n if form.validate_on_submit():\n role = Role(title=form.title.data)\n\n try:\n db.session.add(role)\n db.session.commit()\n flash('New role successfully created')\n except:\n flash('Error: Role title already exist')\n\n return redirect(url_for('admin.get_roles'))\n\n return render_template('admin/roles/role.html', form=form, add_role=add_role, title='Add Role')", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)", "def add_role():\n\tcheck_admin()\n\tadd_role = True\n\n\tform = RoleForm()\n\tif form.validate_on_submit():\n\t\trole= Role(name= form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add role to the database \n\t\t\tdb.session.add(role)\n\t\t\tdb.session.commit()\n\t\t\tflash('You have successfully added a new role ')\n\t\texcept:\n\t\t\t#incase the role already exists\n\t\t flash(\"Error:the role already exists\")\n\n\t\t#redirect to the roles page\n\t\treturn redirect(url_for('admin.list_roles'))\n\n\t\t#load the role template\n\treturn render_template('admin/roles/role.html', add_role=add_role, form = form,title='Add Role')", "def add_role():\r\n check_admin()\r\n\r\n add_role = True\r\n\r\n form = RoleForm()\r\n if form.validate_on_submit():\r\n role = Role(name=form.name.data,\r\n description=form.description.data)\r\n\r\n try:\r\n # add role to the database\r\n db.session.add(role)\r\n db.session.commit()\r\n flash('You have successfully added a new role.')\r\n except:\r\n # in case role name already exists\r\n flash('Error: role name already exists.',category='error')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n # load role template\r\n return render_template('admin/roles/role.html', add_role=add_role,\r\n form=form, title='Add Role')", "def addRole(self, role):\n self._client.addRole(role)", "def test_add_role_simple_post(self):\n pass", "def add_employee(self, employee):\n self.employees.add(employee)", "def add_role(self, name):\n role = Role.by_name(name)\n if not role:\n role = Role(name)\n db.add(role)\n if not role in self.roles:\n self.roles.append(role)", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def define_role(self, role):\n\n self._db_manager.create_role(role)", "async def addrole(self, ctx: context.CustomContext):\n\n await ctx.send(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with the name of the role you want to create.\"\n )\n\n role_name = await ctx.converted_input(converter=converter.CaseInsensitiveRole)\n\n if isinstance(role_name, str):\n await ctx.send(\n f\"{config.YES} I will **create a new role** on this server named `{role_name}` for this.\"\n )\n try:\n discord_role = await ctx.guild.create_role(name=role_name)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.CREATE_ROLE, role_name\n )\n\n else:\n discord_role = role_name\n\n await ctx.send(\n f\"{config.YES} I'll use the **pre-existing role** named `{discord_role.name}` for this.\"\n )\n\n role_join_message = await ctx.input(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with a short message the user should see when they get the role.\"\n )\n\n try:\n await self.bot.db.execute(\n \"INSERT INTO selfrole (guild_id, role_id, join_message) VALUES ($1, $2, $3) \"\n \"ON CONFLICT (guild_id, role_id) DO UPDATE SET join_message = $3\",\n ctx.guild.id,\n discord_role.id,\n role_join_message,\n )\n except asyncpg.UniqueViolationError:\n return await ctx.send(\n f\"{config.NO} `{discord_role.name}` is already a selfrole on this server.\"\n )\n\n await ctx.send(f\"{config.YES} `{discord_role.name}` was added as a selfrole.\")", "def add_role(self, role):\n try:\n self.db_proxy.nameCheck(role.theName, 'role')\n except ARM.ARMException as ex:\n self.close()\n raise ARMHTTPError(ex)\n\n role_params = RoleParameters(\n name=role.theName,\n rType=role.theType,\n sCode=role.theShortCode,\n desc=role.theDescription,\n cProperties=[]\n )\n\n role_id = self.db_proxy.addRole(role_params)\n\n return role_id", "def addRole(self, name, description=\"\"):\n params = {\n \"f\" : \"json\",\n \"rolename\" : name,\n \"description\" : description\n }\n aURL = self._url + \"/roles/add\"\n return self._con.post(path=aURL, postdata=params)", "def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')", "def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')", "def set_role(userid, role, group, request=None):", "def test_add_role_to_project_member(self):\n pass", "async def add_role_admin(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().admin.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return json({\"proposal_id\": proposal_id})", "def addUserRole(self, name, role):\n self._client.addUserRole(name, role)", "async def rolemenu_add_role(self,\n interaction: discord.Interaction,\n name: str,\n role: discord.Role,\n emoji: str = None,\n description: str = None):\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role.id:\n return await interaction.followup.send(\n \"Role is already in the menu.\", ephemeral=True)\n if len(doc[\"roles\"]) >= 25:\n return await interaction.response.send_message(\n \"This role menu is full.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n if role.guild != interaction.guild:\n return await interaction.response.send_message(\n \"This role is not in this server.\")\n if emoji:\n if emoji.startswith(\"<\") and emoji.endswith(\">\"):\n try:\n emoji = int(emoji[1:-1].split(\":\")[2])\n except ValueError:\n return await interaction.followup.send(\"Invalid emoji.\")\n else:\n try:\n message = await interaction.original_message()\n await message.add_reaction(emoji)\n except discord.HTTPException:\n return await interaction.followup.send(\"Invalid emoji.\")\n await self.db.update_one({\"_id\": doc[\"_id\"]}, {\n \"$push\": {\n \"roles\": {\n \"description\": description,\n \"id\": role.id,\n \"emoji\": emoji,\n \"date_added\": datetime.datetime.now(datetime.datetime.u)\n }\n }\n })\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(f\"Added {role.mention} to the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()", "def add_user_to_role(request, username_or_email, role, group_title, event_name):\r\n username_or_email = strip_if_string(username_or_email)\r\n try:\r\n user = _user_from_name_or_email(username_or_email)\r\n except User.DoesNotExist:\r\n return u'<font color=\"red\">Error: unknown username or email \"{0}\"</font>'.format(username_or_email)\r\n\r\n role.add_users(user)\r\n\r\n # Deal with historical event names\r\n if event_name in ('staff', 'beta-tester'):\r\n track.views.server_track(\r\n request,\r\n \"add-or-remove-user-group\",\r\n {\r\n \"event_name\": event_name,\r\n \"user\": unicode(user),\r\n \"event\": \"add\"\r\n },\r\n page=\"idashboard\"\r\n )\r\n else:\r\n track.views.server_track(request, \"add-instructor\", {\"instructor\": unicode(user)}, page=\"idashboard\")\r\n\r\n return '<font color=\"green\">Added {0} to {1}</font>'.format(user, group_title)", "def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)", "def create_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover", "def add_role(self, principal, role):\n return permissions.utils.add_local_role(self, principal, role)", "async def addRole(self, ctx, *roles_to_add):\n already_present_roles = [] # roles that will be deleted from \"roles_to_add\"\n\n available_roles = open(\"assets/roles.txt\", \"r\").readlines()\n available_roles = [role.lower().strip() for role in available_roles]\n\n output_msg = \"\"\n\n for role_to_add in roles_to_add:\n for role in available_roles:\n if role_to_add.lower() == role:\n output_msg += f\"Failed to add {role_to_add}: role already exists.\\n\"\n already_present_roles.append(role_to_add)\n break\n\n for role in already_present_roles:\n roles_to_add.remove(role)\n\n if roles_to_add:\n with open(\"assets/roles.txt\", \"a\") as f:\n for role in roles_to_add:\n f.write(f\"{role}\\n\")\n output_msg += f\"{role} has been added successfully.\\n\"\n\n await ctx.send(output_msg)", "async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def role_add(role, nodes, node, node_vars, host_vars, extra):\n role_manager = get_role_manager()\n node += nodes\n nodes, node_vars, host_vars, extra_args = _split_vars(\n node, node_vars, host_vars, extra)\n if not nodes:\n raise ArgumentError('No nodes informed')\n\n added_nodes = role_manager.add_role(\n role, hosts_node_map=nodes, host_vars=host_vars,\n node_vars=node_vars, extra_args=extra_args)\n\n print(f\"{len(added_nodes)} nodes were added to role {role}: {', '.join(sorted(added_nodes))}\")\n return 0", "def add(self, user, role=None, roles=None):\n # TODO(adriant): resolve the roles and users into id's\n # user_id = base.getid(user)\n user_id = user\n # role_id = role\n if role:\n params = {\n 'roles': [role]\n }\n elif roles:\n params = {\n 'roles': roles\n }\n\n route = '/openstack/users/%s/roles'\n url = route % (user_id)\n try:\n self._put(url, json=params, response_key=None)\n except exc.HTTPBadRequest as e:\n print(e.message)\n return False\n\n return True", "async def add_role_member(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().member.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n pack_id=request.json.get(\"pack_id\"),\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n batch_status = await utils.send(\n request.app.config.VAL_CONN,\n batch_list,\n request.app.config.TIMEOUT,\n request.json.get(\"tracker\") and True,\n )\n if request.json.get(\"tracker\"):\n return utils.create_tracker_response(\"batch_status\", batch_status)\n return json({\"proposal_id\": proposal_id})", "def addRole(self, role_id, title='', description=''):\n if self._roles.get(role_id) is not None:\n raise KeyError('Duplicate role: %s' % role_id)\n\n self._roles[role_id] = {'id': role_id, 'title': title,\n 'description': description}", "def create_role(self, **kwargs):\n\n role = self.role_model(**kwargs)\n return self.put(role)", "def test_user_id_role_put(self):\n pass", "def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)", "def grant_role(self, role, principal_ids):", "async def addRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.add_roles(*roles)\n await ctx.send(f\"Adding {roles_str(person, roles)}\")", "def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)", "async def add(ctx, *args: commands.clean_content):\r\n if len(args) < 2:\r\n await ctx.send('Add takes 2+ parameters')\r\n return\r\n\r\n tgt_role = args[-1]\r\n if tgt_role.startswith('@'):\r\n tgt_role = tgt_role[1:]\r\n if not discord.utils.get(ctx.guild.roles, name=tgt_role):\r\n await ctx.send(f'Role {args[-1]} does not exist')\r\n return\r\n\r\n roles = list(args[:-1])\r\n\r\n for index, role in enumerate(roles):\r\n if role.startswith('@'):\r\n role = role[1:]\r\n roles[index] = role\r\n print(role)\r\n if not discord.utils.get(ctx.guild.roles, name=role):\r\n await ctx.send(f'Role {role} does not exist')\r\n return\r\n\r\n docid = db.insert({'guild': ctx.guild.id, 'roles': roles, 'target': tgt_role})\r\n await ctx.send(f'Rule {docid} created')\r\n await update_roles(ctx.guild)\r\n await check_guild_rules(ctx.guild)", "def patch(self, username, role):\n try:\n UserService.add_role_to_user(token_auth.current_user(), username, role)\n return {\"Success\": \"Role Added\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def changeRole(self, node, role):", "def createRole():\n if hasRole(): return False\n conn = iamConn()\n role = getArgs().role_name\n conn.create_role(role, assume_role_policy.strip().format(accountId()))\n conn.put_role_policy(role, 'Admin', admin_policy.strip())\n print(\"Role created:\", role)\n return True", "def add_role_to_user(self, user, role):\n user, role = self._prepare_role_modify_args(user, role)\n if role not in user.roles:\n user.roles.append(role)\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return False", "def role(self, role):\n\n self._role = int(role)", "def add_user(self, role, emp_name, username, status, password):\n Log.info(\"Start to add user.\")\n self.click(self.user_add_btn)\n self.wait_unit_el_present(self.add_user_form)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(emp_name, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"New user is added.\")", "def add_user_role(self, tenant_id, user_id, role_id):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/tenants/\" + \\\n tenant_id + \"/users/\" + user_id + \"/roles/OS-KSADM/\" + role_id\n _headers = {'x-auth-token': self.cloud_admin_info['token_project']}\n _body = None\n response = self.request(\"PUT\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while adding role\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Add user role Failed with status %s \" %\n response.status)\n return response.status\n\n LOG_OBJ.info(\"Role: %s is added to user:%s successfully.\"\n % (role_id, user_id))\n return True", "def grant_role(ctx, address):\n skale = ctx.obj['skale']\n address = to_checksum_address(address)\n skale.schains.grant_role(skale.schains.schain_creator_role(),\n address)\n print('Success')", "def add_keystone_v3_role_to_user_or_group(self, user_id, role_id,\n pro_dom_id, id_flag):\n LOG_OBJ.debug(\"Adding the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/\" + id_flag + \"s/\" + \\\n str(pro_dom_id) + \"/users/\" + str(user_id) + \"/roles/\" + \\\n str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"PUT\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while adding role\")\n print (\"No response from Server while adding role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Adding role Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\"Adding role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def add_role():\n\n role_form = RoleForm(request.form)\n\n if role_form.validate_on_submit():\n name = request.form['name']\n\n if not name or name == '' :\n flash(\"No null or empty values are allowed.\",\"warn\")\n return render_template('user/add_edit_role.html', title='Add Role',\n add=True,form=role_form)\n\n else:\n new_role = Role(name = name)\n try:\n correct = True\n db.session.add(new_role)\n db.session.commit()\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error when creating a Role.','error')\n else:\n flash(\"Our Role was created!\",\"success\")\n return redirect(url_for('user_ksat.manage_role'))\n\n return render_template('user/add_edit_role.html', title='Add Role',add=True,form=role_form)", "async def addrole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n for excluded_role in excluded_roles:\n if excluded_role == role.id:\n await ctx.send(\"%s already added to role exclusion list\" % role.name)\n return\n\n excluded_roles.append(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n\n await ctx.send(\"%s added to role exclusion list\" % role.name)", "def append_role(self, user):\n\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"AppendRole\",\n account_id=f\"{user.gov_id}@afyamkononi\",\n role_name=user.type,\n )\n ],\n creator_account=f\"{self.creator_account_details.gov_id}@afyamkononi\",\n )\n\n IrohaCrypto.sign_transaction(tx, self.creator_account_details.private_key)\n return self.send_transaction_and_return_status(tx)", "async def addtagrole(self, ctx, _role):\r\n\t\tif _role == 0:\r\n\t\t\tself.settings.ServerConfig(ctx.guild.id, 'TagRole', 0)\r\n\t\t\tawait ctx.send('Tag role set to: {}'.format(0))\r\n\t\telse:\t\r\n\t\t\trole = self.settings.Get(ctx, 'role', _role)\r\n\t\t\tif not role: return await ctx.send('Can\\'t find role: {}'.format(_role))\r\n\r\n\t\t\tself.settings.ServerConfig(ctx.guild.id, 'TagRole', role.id)\r\n\t\t\tawait ctx.send('Tag role set to: {}'.format(role))", "async def addrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not assign *{role}* role using this command.\",\n description=\"For more information run ```.help addrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* already has *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.add_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been added to *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def set_role(username, role_name=\"\"):\n\tsession = get_session()\n\tdata = {\"username\": username, \"role\": role_name}\n\tsession.post(\"{url}/api/users/set_role\".format(url=get_registry_url()), json=data)", "async def _create_new_role(self, context, name: str, target=GROUP_CATEGORY_NAME, channel=False, color: discord.Color = None):\n # todo - sanitize input, preventing \"-\" specifically\n target_role = get(context.guild.roles, name=target)\n target_position = target_role.position\n\n new_role = await context.guild.create_role(\n name=name, mentionable=True, reason=f\"Role created by {context.author}\")\n\n await context.guild.edit_role_positions(positions={new_role: target_position})", "def add_role(self, name: str) -> Role:\n role = self.find_role(name)\n if role is None:\n try:\n role = self.role_model()\n role.name = name\n self.get_session.add(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_ADD_ROLE.format(name))\n return role\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_ADD_ROLE.format(e))\n self.get_session.rollback()\n return role", "async def afterHoursSetRole(self, ctx: Context, role: discord.Role):\n await self.config.guild(ctx.guild).get_attr(KEY_ROLE_ID).set(role.id)\n await ctx.send(f\"Set the After Hours role to {role.name}\")", "def set_keystone_v3_role(self, role_id, role_new_name):\n LOG_OBJ.debug(\"Creating the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _role_info = {\"role\": {\n \"name\": role_new_name}}\n _body = json.dumps(_role_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the role\")\n print (\"No response from Server while set the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "async def add_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n errored = \"\"\n message = \"\"\n added = []\n already_added = []\n for role in roles:\n if role >= ctx.author.top_role:\n errored += (\n \"{role}: You can't set a role equal to or higher than your own.\\n\".format(\n role=role.name\n )\n )\n continue\n if role >= ctx.guild.me.top_role:\n errored += (\n \"{role}: You can't set a role that's equal to or higher than the \"\n \"bot.\\n\".format(role=role.name)\n )\n continue\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n if role.id not in roles_list:\n roles_list.append(role.id)\n added.append(role.name)\n else:\n already_added.append(role.name)\n message += errored\n if added:\n message += \"\\nAdded role(s): {roles}\".format(roles=humanize_list(added))\n if already_added:\n message += \"\\nRole(s) already added: {roles}\".format(\n roles=humanize_list(already_added)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "def add_user_role(user_name, tenant_name, role_name, auth_admin_url, admin_token):\n keystone = get_client(auth_admin_url, admin_token)\n role = keystone.roles.create(role_name)\n tenants = keystone.tenants.list()\n my_tenant = [x for x in tenants if x.name==tenant_name][0]\n users = keystone.users.list()\n my_user = [x for x in users if x.name==user_name][0]\n keystone.roles.add_user_role(my_user, role, my_tenant)", "def add_role(self, role_id: str, current_user_id=None):\n if RoleModel.is_valid_role(role_id) and not self.has_role(role_id):\n user_role = UserRoleModel(user_id=self.id, role_id=role_id, lastchange_by=current_user_id)\n self.roles.append(user_role)", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def collection_post(request):\n\n # Our account parameter\n account = request.matchdict['id_account']\n\n # Our JSON parameter, this could be validated\n json = request.json_body\n role = json['role']\n\n # Our admin object\n admin = _get_admin(request)\n\n # Check if the account exists\n if account not in admin.list_accounts():\n request.response.status_int = 404\n return\n\n # Check if the role exists\n if role in admin.list_roles(account):\n request.response.status_int = 409\n return\n\n # Create the role\n admin.add_role(account, role)\n\n # Return appropriately\n request.response.status_int = 201", "def addRoleAccess(self, role, read, write, catalog='*', repository='*'):\n return self._client.addRoleAccess(role, read, write, catalog, repository)", "def add_user_roles(userid:str, *roles):", "def post(self):\n data = request.json\n\n name = data.get('name')\n description = data.get('description')\n role = Role(name=name,\n description=description)\n db.session.add(role)\n db.session.commit()\n\n return None, 201", "def add_admin(self, uid, name, role=None):\n uid = self._check_uid(uid)\n self._router_request(\n self._make_request_data(\n 'addAdminRole',\n data=dict(\n params=dict(\n uid=uid,\n name=name,\n role=role,\n )\n )\n )\n )\n\n return self.get_admin_by_name(uid, name)", "def role_write(self, fail_on_found=False, disassociate=False, **kwargs):\n\n # Get the role, using only the resource data\n data, self.endpoint = self.data_endpoint(kwargs, ignore=['obj'])\n debug.log('Checking if role exists.', header='details')\n response = self.read(pk=None, fail_on_no_results=True,\n fail_on_multiple_results=True, **data)\n role_data = response['results'][0]\n role_id = role_data['id']\n\n # Role exists, change display settings to output something\n self.configure_display(role_data, kwargs, write=True)\n\n # Check if user/team has this role\n # Implictly, force_on_exists is false for roles\n obj, obj_type, res, res_type = self.obj_res(kwargs)\n debug.log('Checking if %s already has role.' % obj_type,\n header='details')\n data, self.endpoint = self.data_endpoint(kwargs)\n response = self.read(pk=None, fail_on_no_results=False,\n fail_on_multiple_results=False, **data)\n\n msg = ''\n if response['count'] > 0 and not disassociate:\n msg = 'This %s is already a member of the role.' % obj_type\n elif response['count'] == 0 and disassociate:\n msg = 'This %s is already a non-member of the role.' % obj_type\n\n if msg:\n role_data['changed'] = False\n if fail_on_found:\n raise exc.NotFound(msg)\n else:\n debug.log(msg, header='DECISION')\n return role_data\n\n # Add or remove the user/team to the role\n debug.log('Attempting to %s the %s in this role.' % (\n 'remove' if disassociate else 'add', obj_type), header='details')\n post_data = {'id': role_id}\n if disassociate:\n post_data['disassociate'] = True\n client.post('%s/%s/roles/' % (self.pluralize(obj_type), obj),\n data=post_data)\n role_data['changed'] = True\n return role_data", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def create_role(self, role_name, description, rights):\n org_admin_resource = self.client.get_resource(self.href_admin)\n role = E.Role(\n E.Description(description), E.RightReferences(), name=role_name)\n if rights is None:\n rights = ()\n for right in tuple(rights):\n right_record = self.get_right(right)\n role.RightReferences.append(\n E.RightReference(\n name=right_record.get('name'),\n href=right_record.get('href'),\n type=EntityType.RIGHT.value))\n return self.client.post_linked_resource(\n org_admin_resource, RelationType.ADD, EntityType.ROLE.value, role)", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def role(self, role):\n\n self._role = role", "def role(self, role):\n\n self._role = role", "async def add_role_owner(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().owner.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return json({\"proposal_id\": proposal_id})", "def add_role(profile, instance_profile, role):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n params[\"RoleName\"] = role\n return client.add_role_to_instance_profile(**params)", "def add_user(self, REQUEST):\n\n role_id = REQUEST.form['role_id']\n country_code = role_id.rsplit('-', 1)[-1]\n user_id = REQUEST.form['user_id']\n agent = self._get_ldap_agent()\n\n if not self._allowed(agent, REQUEST, country_code):\n return None\n if not nfp_can_change_user(self, user_id, no_org=False):\n # This means somebody is manipulating the DOM in order to\n # add a user that belongs to an organisation from another\n # country (the button doesn't normally appear)\n return None\n\n with agent.new_action():\n role_id_list = agent.add_to_role(role_id, 'user', user_id)\n\n role_msg = get_role_name(agent, role_id)\n msg = \"User %r added to role %s. \\n\" % (user_id, role_msg)\n\n # for Eionet Groups roles only, test if the added user is member of a\n # national organisation\n\n if self.is_eionet_group(role_id):\n if not get_national_org(agent, user_id, role_id):\n msg += (\n \"The user you want to add to an Eionet Group does not\"\n \" have a mandatory reference to an organisation for \"\n \"your country. Please corect!\")\n\n IStatusMessage(REQUEST).add(msg, type='info')\n\n log.info(\"%s ADDED USER %r TO ROLE %r\",\n logged_in_user(REQUEST), user_id, role_id_list)\n\n if '-awp-' in role_id:\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/awps?nfp=%s#role_%s' %\n (country_code, role_id))\n\n return REQUEST.RESPONSE.redirect(self.absolute_url() +\n '/nrcs?nfp=%s#role_%s' %\n (country_code, role_id))", "def role_assign(user_id, role_id):\n user = _get_user_or_404(user_id)\n role = _get_role_or_404(role_id)\n initiator_id = g.user.id\n\n authorization_service.assign_role_to_user(\n role.id, user.id, initiator_id=initiator_id\n )\n\n flash_success(\n gettext(\n '%(role_title)s has been assigned to \"%(screen_name)s\".',\n screen_name=user.screen_name,\n role_title=role.title,\n )\n )", "def assign_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'PUT', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Grant role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True", "def main_role_create(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n role_id = client.create_role(opts[\"formation\"], opts[\"name\"])\n logger.info(f\"Created new role \\\"name\\\" with id=\\\"{role_id}\\\"\")\n click.echo(role_id)", "async def createRole(self, ctx):\n await self.deleteRole(ctx=ctx, reason=\"Début de partie.\")\n await ctx.guild.create_role(name=self.categoryName)\n await asyncio.sleep(1)\n self.roleForPlayer = discord.utils.get(ctx.guild.roles, name=self.categoryName)\n print(\"Role created.\")\n member = await ctx.guild.fetch_member(bot.user.id)\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")\n for member in ctx.author.voice.channel.members:\n await member.add_roles(self.roleForPlayer, reason=\"Début de partie.\")", "def promoteRole(event,context):\n #given an email and a role promote the user to that role\n if 'role' not in event or 'auth_email' not in event or 'auth' not in event or 'user_email' not in event or 'roleValue' not in event:\n ret = {\"statusCode\":400,\"body\":\"missing email , auth or role\"}\n return config.add_cors_headers(ret)\n #check if non emprt string\n if(type(event['roleValue']) != bool):\n ret = {\"statusCode\":400,\"body\":\"Inavalid value for role\"}\n return config.add_cors_headers(ret)\n if len(event['role']) < 1:\n ret = {\"statusCode\":400,\"body\":\"Invalid role\"}\n return config.add_cors_headers(ret)\n updates = {\"$set\":{\"role.\"+event['role']:event['roleValue']}}\n #parse authorization email and user email and make call to update api. If coming from vhx-scheduler most likely will be a director\n request_data = {\n \"auth_email\":event[\"auth_email\"],\n \"user_email\":event[\"user_email\"],\n \"auth\":event[\"auth\"],\n \"updates\":updates\n }\n #make request and return the value lcs gives us\n ret = requests.post(config.BASE_URL +'/update', json = (request_data))\n return config.add_cors_headers(ret.json())", "def role_create(ctx, name, service):\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_role\n service_code = SERVICE.get(service, None)\n if service_code:\n res = IAM.client().create_role(\n RoleName=name,\n AssumeRolePolicyDocument=service_policy(service_code),\n )\n click.echo(J(res))", "def _add(self, name, permissions):\n data = {\"name\": name, \"permissions\": permissions}\n path = self.router.roles\n return self.request(method=\"put\", path=path, json=data)", "async def createrole(self, ctx, role: str):\n if role.lower() == \"muted\" or role.lower() == \"punished\":\n return await ctx.send(\"Can not create this roles.\")\n \"\"\"Create a new role\"\"\"\n role = await ctx.guild.create_role(name=role)\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Role *{role}* has been created!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def setjoinrole(self, ctx, role):\r\n guild = ctx.message.guild\r\n role = discord.utils.get(guild.roles, name=role)\r\n functions.updatesql(server=ctx.guild.id, joinrole=role.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsess!', color=discord.Colour.from_rgb(255, 0, 255)))", "def create(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.create_role(model)", "def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()", "def _add_users_to_role(self, users, rolename):\n role = Role.objects.get(name=rolename, course_id=self.course.id)\n for user in users:\n role.users.add(user)", "def edit_role(role_id, new_name=None, new_arn=None):\n\tsession = get_session()\n\told_data = get_role(role_id)\n\tdata = {}\n\tdata[\"name\"] = new_name or old_data[\"name\"]\n\tdata[\"arn\"] = new_arn or old_data[\"arn\"]\n\tresponse = session.put(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id), json=data)\n\treturn response.json()", "async def _toggle_role(self, ctx, selfrole: Selfrole):\n\n if selfrole.role not in ctx.message.author.roles:\n try:\n await ctx.message.author.add_roles(selfrole.role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.ADD_ROLE, selfrole.role.name\n )\n\n await ctx.send(f\"{config.YES} {selfrole.join_message}\")\n\n elif selfrole.role in ctx.message.author.roles:\n try:\n await ctx.message.author.remove_roles(selfrole.role)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.REMOVE_ROLE, selfrole.role.name\n )\n\n await ctx.send(\n f\"{config.YES} The `{selfrole.role.name}` role was removed from you.\"\n )" ]
[ "0.7416667", "0.73289865", "0.72949153", "0.72568375", "0.7244213", "0.71598387", "0.7032163", "0.699969", "0.6978381", "0.69690233", "0.6907972", "0.68849903", "0.687573", "0.6826044", "0.6823985", "0.6823678", "0.67929417", "0.67829245", "0.6731767", "0.6684479", "0.66809964", "0.66765994", "0.6669527", "0.6638014", "0.66187716", "0.66142064", "0.6590134", "0.65658325", "0.65430516", "0.65273905", "0.65255606", "0.65152097", "0.6506699", "0.64734226", "0.6471993", "0.64456934", "0.6431948", "0.6399968", "0.63905036", "0.6387741", "0.6362466", "0.63567305", "0.6349136", "0.634321", "0.6340279", "0.63343024", "0.6319188", "0.63035446", "0.6298525", "0.6274279", "0.6261482", "0.623455", "0.6231147", "0.6231071", "0.6230333", "0.6223553", "0.61983573", "0.61964107", "0.61961657", "0.6193398", "0.6176244", "0.6160964", "0.61337304", "0.6131952", "0.61305696", "0.61210805", "0.6116334", "0.61074066", "0.6090444", "0.60793036", "0.60719216", "0.6071206", "0.6050685", "0.6048905", "0.60357606", "0.6026842", "0.6007714", "0.59998524", "0.59983176", "0.5995316", "0.5995123", "0.59921664", "0.59921664", "0.59664494", "0.5963728", "0.59550333", "0.59513575", "0.5945916", "0.59420574", "0.5938791", "0.5932645", "0.59200215", "0.59126836", "0.59058756", "0.5894047", "0.5881833", "0.5872097", "0.5871093", "0.58652943", "0.5861503" ]
0.80732065
0
gets al the roles of an employee
def get_employeeRoles(self, id): cursor = self.dbconnect.get_cursor() cursor.execute('select * from employeeRoles where employee=%s', (id,)) roles = list() for row in cursor: roles.append(row[1]) return roles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_roles(role):", "def getRoles(self):", "def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]", "def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)", "def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles", "def get_roles(self, principal_id):", "def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)", "def get_roles():\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_roles method\n response = roles_operations.get_roles()\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + str(role.get_display_label()))\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + str(forecast_manager.get_name()))\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + str(role.get_description()))\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + str(reporting_to.get_name()))\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())", "def get_roles(self):\n return [role.role_id for role in self.roles if role]", "def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")", "def get_roles():\r\n global _roles\r\n return _roles", "def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)", "def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)", "def list(self):\n return self.client.find_all_roles()", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def get_roles(self) -> requests.models.Response:\n return self.get('v1/roles')", "def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def getRoles():\n return jsonify(listRoles(ROLES_DIR))", "def getAllRoles(self):\n\n # Learn URL of AllRoles service\n url = self.config.get(\"Authorization\",\"allroles\") # http://erra.ccss.cz/g4i-portlet/service/list/roles/en\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles url: %s\"% url)\n \n # Request all roles from LifeRay\n import httplib2\n h = httplib2.Http()\n header, content = h.request(url, \"GET\")\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response header: %s\"% header)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response content: %s\"% content)\n\n # Parse the response\n try:\n allRolesJson = json.loads(content)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles reply succesfully parsed\")\n except ValueError,e:\n logging.error(\"[LaymanAuthLiferay][getAllRoles] Cannot parse AllRoles reply: '%s'\"% content)\n raise AuthError(500, \"Cannot parse GET All Roles response [%s] as JSON:%s\"% (content,e)) \n \n roles = allRolesJson[\"roles\"]\n\n # lower() and spaces\n for rr in roles:\n rr[\"roleName\"] = rr[\"roleName\"].lower()\n rr[\"roleName\"] = \"_\".join(rr[\"roleName\"].split(' '))\n\n # Return roles\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] Return roles: %s\"% str(roles))\n return roles", "def get_granted_roles(self):", "def getRoles(context):\n\n pmemb = getToolByName(getSite(), 'portal_membership')\n roles = [role for role in pmemb.getPortalRoles() if role != 'Owner']\n return SimpleVocabulary.fromValues(roles)", "def get(self):\n return self._roles.get(self._id)", "def getRoles(self):\n\t\tpayload = ''\n\t\tif self.Roles:\n\t\t\tif type(self.Roles) != int:\n\t\t\t\tfor x in range(0,len(self.Roles)):\n\t\t\t\t\tpayload += \"%s\" % (self.Roles[x])\n\t\t\t\treturn self.Roles\n\t\t\telse:\n\t\t\t\treturn None", "def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]", "def test_list_roles(self):\n pass", "def get_roles():\n return config.get_cfg_storage(ID_ROLE)", "def get_all_roles():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT role FROM movie_crew\")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def get_roles_descriptions():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"select distinct role, job_description from department where job_description is not null \")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def list_roles(self, hints):\n raise exception.NotImplemented() # pragma: no cover", "def get_movie_roles(movie_id):\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT role FROM movie_crew WHERE movie_id = \" + movie_id )\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def token_auth_get_user_roles(user):\n print(user)\n return user.get_roles()", "def roles(self):\n return self._roles", "def roles(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"roles\")", "def get_emrfs_roles():\n role_list = []\n path_prefix = '/emrfs/'\n aws_api_reponse = iam_client.list_roles(\n PathPrefix=path_prefix,\n )\n get_paginated_results_using_marker(\n aws_api_reponse=aws_api_reponse,\n list=role_list,\n iam_client_call=iam_client.list_roles,\n field_name='Roles',\n client_call_args={'PathPrefix': path_prefix})\n\n return_list = []\n for role in role_list:\n return_list.append(role['RoleName'])\n\n return return_list", "def list_roles(self):\n resp, body = self.get(\"roles\")\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])", "def main_role_list(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n roles = client.list_roles(opts[\"formation\"])\n click.echo(\n tabulate.tabulate(\n [{\"Name\": i.name, \"Id\": i.id_} for i in roles],\n headers=\"keys\",\n ),\n )", "def listRoles(self):\n return self._client.listRoles()", "def listRoleInfo(self):\n return self._roles.values()", "def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')", "def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')", "def get_supervisor_roles(user):\n raise Exception(\"Someone needs to fix this method to no longer be dependent on model relationship if they're going to use it!\")", "def test_list_role(self):\n pass", "def getRole(self, desired=None):\n strDes = str(desired)\n logging.debug(\"[LaymanAuthLiferay][getRole]: '%s'\"%strDes)\n if not self.authorised:\n logging.error(\"[LaymanAuthLiferay][getRole] The user is not authorised\")\n raise AuthError(401, \"I am sorry, but you are not authorised\")\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"roles\"]:\n roles = self.authJson[\"userInfo\"][\"roles\"]\n if len(roles) < 1:\n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay provided empty list of roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay provided empty list of roles\") \n\n theRole = roles[0]\n for r in roles:\n if desired == r[\"roleName\"]:\n theRole = r\n\n #lower and spaces\n #theRole[\"roleName\"] = theRole[\"roleName\"].lower()\n #theRole[\"roleName\"] = \"_\".join(theRole[\"roleName\"].split(' '))\n roleName = theRole[\"roleName\"]\n logging.debug(\"[LaymanAuthLiferay][getRole] The role: '%s'\"% roleName)\n return theRole\n else: \n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay did not provide user's roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay did not provide user's roles\")", "def get_principals(self, role_id):", "def roles(self) -> Optional[Sequence['outputs.AssessmentRole']]:\n return pulumi.get(self, \"roles\")", "def list_roles(self, name_filter=None):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n\n org_filter = None\n resource_type = 'role'\n if self.client.is_sysadmin():\n resource_type = 'adminRole'\n org_filter = 'org==%s' % self.resource.get('href')\n\n query = self.client.get_typed_query(\n resource_type,\n query_result_format=QueryResultFormat.RECORDS,\n equality_filter=name_filter,\n qfilter=org_filter)\n result = []\n for r in list(query.execute()):\n result.append(\n to_dict(\n r,\n resource_type=resource_type,\n exclude=['org', 'orgName']))\n return result", "def fusion_api_get_roles(self, uri=None, param='', api=None, headers=None):\n return self.roles.get(uri=uri, api=api, headers=headers, param=param)", "def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []", "def basic_auth_get_user_roles(login_details):\n user = User.query.filter_by(email=login_details[\"username\"]).first()\n\n return user.get_roles()", "def get_all_roles(self):\n token = self.get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n return self.jwt_role_callback(unverified_claims)", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def getRoleInfo(self, role):", "def get_roles_ids(self, principal_id):", "def get_roles(self, principal):\n return permissions.utils.get_local_roles(self, principal)", "def test_ipam_roles_list(self):\n pass", "def getCloudRoleList():\n \n roleList = {}\n\n # get all available roles\n try:\n list = Roletype.objects.all()\n \n # loop through list\n for listInfo in list:\n roleList[listInfo.id] = listInfo\n \n except Exception, e:\n debugException(e)\n \n return roleList", "async def get_user_roles(request):\n\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(status=400, text=\"Incorrect user_id\")\n\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\n \"username\": user.username,\n \"user_id\": user.id,\n \"roles\": {\"owner\": [], \"member\": [], \"manager\": []}, # FIXME : use USER_ROLES\n }\n\n roles = (\n request.cirrina.db_session.query(UserRole)\n .filter_by(user_id=user_id)\n .join(Project)\n .filter(UserRole.project_id == Project.id)\n .order_by(Project.name)\n .values(UserRole.role, Project.id, Project.name)\n )\n\n for role in roles:\n data[\"roles\"][role.role].append({\"id\": role.id, \"name\": role.name})\n\n return web.json_response(data)", "def get_user_roles(user=None):\n if user is None:\n user = g.user\n return user.roles", "def roles(self) -> List[str]:\n\n role_list = []\n for spec in self.specs.values():\n role = spec.role()\n if role not in role_list:\n role_list.append(role)\n return role_list", "def roles(self):\n db = self['__store'].db\n my_roles = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n groups.id\n from `groups`, subgroups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'U'\n \"\"\",\n self._id)\n }\n return my_roles", "def test_a_get_all_roles(self):\n print '##----++++ PRUEBA UNITARIA ROL ++++----##'\n print '+++ Obtener todos los roles +++'\n request = self.client.get('/administrarrol', follow_redirects=True)\n self.assertNotIn('Sin permisos para administrar roles', request.data, 'No tiene permisos para ver los roles')\n self.assertEqual(request._status, '200 OK', 'Error al obtener roles como '+ TEST_USER)\n print '*-- Obtiene todos los roles -- request result: ' + request._status + ' --*'\n print'*---test 1 rol---*'", "def get_roles(member: discord.Member):\n role_list = []\n for role in member.roles:\n role_list.append(str(role))\n return role_list", "def admin_roles(request):\n user = User.objects.get(username=request.user.username)\n permisos = get_permisos_sistema(user)\n return render_to_response('admin/roles/roles.html',{'user':user,\n 'ver_roles':'Ver roles' in permisos,\n 'crear_rol': 'Crear rol' in permisos,\n 'mod_rol': 'Modificar rol' in permisos,\n 'eliminar_rol': 'Eliminar rol' in permisos},context_instance=RequestContext(request))", "async def roles(self, ctx):\n\n pass", "async def list_roles(self, ctx: commands.Context):\n all_roles = await self.config.guild(ctx.guild).autoroles()\n maybe_not_found = []\n message = \"\"\n for role in all_roles:\n fetched_role = ctx.guild.get_role(role)\n if not fetched_role:\n maybe_not_found.append(role)\n continue\n message += \"- {name} (`{id}`).\\n\".format(name=fetched_role.name, id=fetched_role.id)\n if maybe_not_found:\n clean_list = list(set(all_roles) - set(maybe_not_found))\n await self.config.guild(ctx.guild).autoroles.set(clean_list)\n message += \"\\nSome roles has been removed since I was unable to find them.\"\n if message:\n for line in pagify(message):\n await ctx.send(line)\n else:\n await ctx.send(\"No role has been added.\")", "def roles(self):\r\n return self._roles_str.split(\",\")", "def _get_role(self):\n return self.__role", "def roleNames(self):\n return self._roles", "def roles(self):\n role_ids = self.role_ids\n if role_ids is None:\n roles = None\n else:\n roles = sorted(create_partial_role_from_id(role_id) for role_id in self.role_ids)\n \n return roles", "def roles(self):\n # type: (...) -> Set[Role]\n return self._roles", "def get_roles_output(filter: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRolesResult]:\n ...", "def get_country_roles(country):\n cnx,cur = connect_to_db()\n cur.execute(\"SELECT DISTINCT role FROM locations, movie_crew \"\n \"WHERE locations.country LIKE '%\" + country + \"%' AND locations.movie_id = movie_crew.movie_id\")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def extract_semantic_roles(self): \n entitySemanticRoleList = [] \n for semanticRole in self.semanticRoleList:\n subject = semanticRole.get('subject', 'NULL')\n eobject = semanticRole.get('object', 'NULL')\n if self.find_in_keywords_and_entities(subject, eobject):\n entitySemanticRoleList.append(semanticRole) \n \n for role in self.watsonLanguageModel['semantic_roles']:\n subject = 'NULL'\n eobject = 'NULL'\n action = 'NULL'\n predicate = 0\n if 'subject' in role:\n subject = role['subject'].get('text')\n predicate += 1\n if 'object' in role:\n eobject = role['object'].get('text')\n predicate += 1\n if 'action' in role:\n action = role['action'].get('text')\n predicate += 1\n if self.find_in_keywords_and_entities(subject, eobject) and (predicate > 2 or (action !='NULL' and eobject != 'NULL')) : \n entitySemanticRoleList.append({'subject':subject, 'action':action, 'object': eobject, 'sentence': role['sentence']})\n\n return entitySemanticRoleList", "def list_role(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/roles'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1RoleList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_list_cluster_role(self):\n pass", "def courses_with_role(self):\r\n return CourseAccessRole.objects.filter(role=self.role, user=self.user)", "def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role", "def test_ipam_roles_read(self):\n pass", "def list_keystone_v3_roles(self):\n LOG_OBJ.debug(\"List the roles.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing role\")\n print (\"No response from Server while listing role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" List roles Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" List roles Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Roles list : %s \" % output)\n print (\"Roles list : %s \" % output)\n return output['roles']", "def get_roles(self,\n *options, # type: GetRolesOptions\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[RoleAndDescription]\n\n final_args = forward_args(kwargs, *options)\n timeout = final_args.get(\"timeout\", None)\n return list(map(lambda r: RoleAndDescription.load_from_server(r),\n self._admin_bucket.get_roles(timeout).value))", "def _get_role_ids(identity_client, parsed_args):\n role_id = None\n implied_role_id = None\n\n roles = identity_client.roles.list()\n\n for role in roles:\n role_id_or_name = (role.name, role.id)\n\n if parsed_args.role in role_id_or_name:\n role_id = role.id\n elif parsed_args.implied_role in role_id_or_name:\n implied_role_id = role.id\n\n return (role_id, implied_role_id)", "def get_roles(filter: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRolesResult:\n __args__ = dict()\n __args__['filter'] = filter\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('datadog:index/getRoles:getRoles', __args__, opts=opts, typ=GetRolesResult).value\n\n return AwaitableGetRolesResult(\n filter=pulumi.get(__ret__, 'filter'),\n id=pulumi.get(__ret__, 'id'),\n roles=pulumi.get(__ret__, 'roles'))", "def test_get_roles_user(self):\n contrib_as = self.make_assignment(\n self.project, self.user_bob, self.role_contributor\n )\n guest_as = self.make_assignment(\n self.project, self.user_carol, self.role_guest\n )\n roles = self.project.get_roles()\n self.assertIn(contrib_as, roles)\n self.assertIn(guest_as, roles)\n roles = self.project.get_roles(user=self.user_carol)\n self.assertNotIn(contrib_as, roles)\n self.assertIn(guest_as, roles)", "def read_roles():\n roles = defaultdict(list)\n invert_is_a = defaultdict(list)\n with open('relation.tsv','r') as inf:\n for line in inf:\n x = line.strip().split('\\t')\n if x[1] == 'has_role':\n roles[f'CHEBI:{x[3]}'].append(f'CHEBI:{x[2]}')\n elif x[1] == 'is_a':\n child = f'CHEBI:{x[3]}'\n parent = f'CHEBI:{x[2]}'\n invert_is_a[parent].append(child)\n #Now include parents\n ancestors = get_ancestors(invert_is_a)\n for node,noderoles in roles.items():\n if node == 'CHEBI:64663':\n print('hi')\n restroles= []\n for role in noderoles:\n moreroles=ancestors[role]\n restroles += moreroles\n roles[node] += restroles\n return roles", "def getRole(self, desired=None):\n return {\"roleName\":\"hasici\",\n \"roleTitle\":\"Soptici\"}", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def InspireRoles(self, default=[None]):\n return self.data.get('inspire_roles', default)", "def get_employees(self):\n return self.employees", "def get_roles(self, include_remote=True):\n rbac_service = get_rbac_backend().get_service_class()\n result = rbac_service.get_roles_for_user(\n user_db=self, include_remote=include_remote\n )\n return result", "def effective_roles(self):\n # type: (...) -> List[RoleAndOrigins]\n return self._effective_roles", "def list_assigned_keystone_v3_roles(self, **kwargs):\n LOG_OBJ.debug(\"List the roles.\")\n\n url_filter = \"\"\n for argument in kwargs.keys():\n if \"id\" in url_filter:\n url_filter += \"&\"\n if argument in [\"role\", \"user\"]:\n url_filter += argument + \".id=\" + kwargs[argument]\n elif argument in [\"domain\", \"project\"]:\n url_filter += \"scope.\" + argument + \".id=\" + kwargs[argument]\n\n _url = \"http://\" + self.host_ip + \":35357/v3/role_assignments\"\n if url_filter:\n _url += \"?\" + url_filter\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing \"\n \"roles assignment\")\n print (\"No response from Server while listing roles assignment\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" List roles assignment is Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" List roles asignment is Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Roles assignment list : %s \" % output)\n print (\"Roles assignment list : %s \" % output)\n return output['role_assignments']", "def test_get_children_role(self):\n root = role_middleware.get_root()\n children = role_middleware.get_children(root.id)\n result = [(ro.name, ro.id) for ro in children]\n print(result)", "def get_role(role_id):\n\n \"\"\"\n example\n role_id = 3409643000000026005\n \"\"\"\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_role method that takes role_id as parameter\n response = roles_operations.get_role(role_id)\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + role.get_display_label())\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + forecast_manager.get_name())\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + role.get_description())\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + reporting_to.get_name())\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())", "def get_role_list(people, role):\n people = ast.literal_eval(people)\n crew = []\n\n for person in people:\n if person['job'] == role:\n crew.append(str(person['name']))\n\n return crew if len(crew) else []", "def user_roles():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user roles\")\n user_roles = get_user_roles(access_token)\n return json.dumps(list(user_roles))", "def collection_get(request):\n\n # Our account parameter\n account = request.matchdict['id_account']\n\n # Our admin object\n admin = _get_admin(request)\n\n # Check if the account exists\n if account not in admin.list_accounts():\n request.response.status_int = 404\n return\n\n # Get the roles\n list_roles = admin.list_roles(account)\n\n # Return appropriately\n request.response.status_int = 200\n return {\n 'roles':\n list_roles\n }", "def test03_perm_roles(self):\n print_ln('test16_perm_roles')\n \n try:\n pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*'))\n for perm in pList: \n print_ln(\"Role Perm obj name=\" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id)\n rList = review.perm_roles(perm)\n for role in rList:\n print_ln(\"Assigned role=\" + role, 1)\n except Exception as e:\n self.fail('test16_perm_roles failed, exception=' + e.msg)", "def present_roles(self):\n print(\"User\" + str(self.unique_id) + \": roles=\")\n for group in self._roles:\n print(\"\\tGroup\" + str(group) + \" -> [\"\n + self.get_role_from_type(group, roles_influence) + \", \"\n + self.get_role_from_type(group, roles_neighbors) + \", \"\n + self.get_role_from_type(group, roles_activities) + \", \"\n + self.get_role_from_type(group, roles_attitude) + \"]\")\n print('')" ]
[ "0.7949602", "0.77490324", "0.7219508", "0.7100693", "0.70895886", "0.70715743", "0.6987245", "0.6968574", "0.6919542", "0.6890405", "0.6882044", "0.6865154", "0.6842712", "0.67842984", "0.67706275", "0.67660475", "0.6756659", "0.673916", "0.6718372", "0.6637145", "0.66315967", "0.6626008", "0.6624409", "0.66179127", "0.6606336", "0.6598714", "0.6573045", "0.6572775", "0.6529231", "0.6503123", "0.6472473", "0.6433626", "0.6421189", "0.637404", "0.6357579", "0.6344778", "0.6341852", "0.6335649", "0.6334175", "0.6313716", "0.6308171", "0.6288173", "0.6280025", "0.62700564", "0.6269134", "0.62645847", "0.62488675", "0.624626", "0.6242805", "0.6208971", "0.6204049", "0.62015104", "0.62015104", "0.6197812", "0.6175233", "0.61608213", "0.6155379", "0.6135312", "0.61324036", "0.61290485", "0.61167824", "0.60851544", "0.6064717", "0.60308325", "0.603046", "0.59978753", "0.5996386", "0.59888303", "0.5981494", "0.5972701", "0.596579", "0.5958147", "0.594103", "0.59292734", "0.59252876", "0.59193546", "0.5914341", "0.59102225", "0.59090745", "0.589394", "0.589179", "0.5887753", "0.5872616", "0.58610445", "0.5858364", "0.5856639", "0.5854413", "0.5843137", "0.5832999", "0.58240616", "0.58008665", "0.58001643", "0.5795982", "0.57929975", "0.57929224", "0.579166", "0.57824194", "0.57701796", "0.5765859", "0.576307" ]
0.81541693
0
changes the data of an employee
def change_employee(self, employee): cursor = self.dbconnect.get_cursor() try: if employee.id == None: raise Exception('no id given') cursor.execute('select * from employee where employeeID=%s', (str(employee.id),)) if cursor.rowcount == 0: raise Exception('no employee found with that id') cursor.execute( 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s', (employee.name, employee.email, employee.office, employee.title, employee.internOrExtern, employee.active, employee.promotor, employee.id)) self.dbconnect.commit() except: self.dbconnect.rollback() raise Exception('unable to change employee')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def change_employee(self,changed_identity):\r\n\r\n changed_emp = Employee(*changed_identity)\r\n changed_str = changed_emp.get_changes_registration_str()\r\n\r\n return_value = self.change_object_in_DB(\"employee\", changed_str, changed_emp._id) # Bring 'id' seperately, so next function can find line number\r\n return return_value", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def setEmployees(self, employees):\n self.employees = employees", "def employee(self, employee: object):\n\n self._employee = employee", "def update(self, request, pk):\n serializer = data_serializers.UpdateEmployeeRequestSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_employee_entity = self.controller.update_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.ObjectEntityDoesNotExist\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def employees(self, employees: object):\n\n self._employees = employees", "def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def update_employee(emp_id, key=None, value=None, items=None):\n if items is None:\n if key is None or value is None:\n return {\"Error\": \"At least one key/value pair is required\"}\n items = {key: value}\n elif isinstance(items, str):\n items = salt.utils.yaml.safe_load(items)\n\n xml_items = \"\"\n for pair in items:\n xml_items += '<field id=\"{}\">{}</field>'.format(pair, items[pair])\n xml_items = \"<employee>{}</employee>\".format(xml_items)\n\n status, result = _query(\n action=\"employees\",\n command=emp_id,\n data=xml_items,\n method=\"POST\",\n )\n\n return show_employee(emp_id, \",\".join(items.keys()))", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def replace_employee(employees, old_employee, new_employee):\n #getting index of the old employee and saving it\n index = employees.index(old_employee)\n #deleting the old employee\n del employees[index] #yes, I remember about \"pop\" built-in function from the lecture, just like this one better :)\n #inserting the new employee to the position of the old one\n employees.insert(index, new_employee)", "def UpdateEmployee():\n staff = current_user\n form = UpdateEmployeeForm()\n if form.validate_on_submit():\n staff.first_name=form.first_name.data.lower()\n staff.last_name=form.last_name.data.lower()\n staff.email=form.email.data\n staff.location=form.location.data\n db.session.commit()\n flash(f'Employee Updated', category='Success')\n elif request.method == 'GET':\n form.first_name.data=staff.first_name.capitalize()\n form.last_name.data=staff.last_name.capitalize()\n form.email.data=staff.email\n form.role.choices=[staff.role]\n form.location.data=staff.location\n return render_template('update_employee.html', title=\"Update Employee\", form=form)", "def make_commissioned(self,salary,commission,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"3\"\n print(\"{}{}\".format(name,\" was successfully changed to be a commissioned employee\"))\n self.emp_dict[id][7] = salary\n self.emp_dict[id][9] = commission\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def update_employee(cls, schema, uuid, employee_json):\n employee = cls.get_employee_by_uuid(uuid)\n if employee is None:\n raise ValueError('Invalid employee uuid')\n employee = schema.load(\n employee_json, session=db.session, instance=employee\n )\n db.session.add(employee)\n db.session.commit()\n return employee", "def update_data():\n pass", "def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def put(self, employee_id):\n\n employee = EmployeeModel.find_by_id(employee_id)\n if employee is None:\n return {'message': \"There is no employee with this ID, or your access_token is invalid.\"}, 404\n else:\n \"\"\" check if employee entered the building today\"\"\"\n if WorkdayModel.find_latest_workday(employee.id):\n \"\"\"checking if employee already entered building today\"\"\"\n last_workday = WorkdayModel.find_latest_workday(employee.id)\n\n if last_workday.time_in.day == datetime.today().day:\n last_workday.time_out = datetime.today()\n # calculate hours_worked| .time converts to H:M\n duration = last_workday.time_out - last_workday.time_in\n # duration is a datetime.timedelta\n duration = (datetime.min + duration).time()\n last_workday.hours_worked = duration\n try:\n last_workday.save_to_db()\n except:\n return {'message': 'An error occurred updating worked hours'}, 500\n\n return last_workday.json()\n\n return {'message': 'First use of card, or employee did not start work today'}, 200", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def update(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data", "def write(self, vals):\n\n for record in self:\n employee_id = record.env['hr.employee'].browse(record.id)\n\n change_type = change_period = False\n\n if vals.get('contract_type'):\n change_type = True if vals['contract_type'] != employee_id.contract_type else False\n\n if vals.get('contract_period'):\n change_period = True if vals['contract_period'] != employee_id.contract_period else False\n\n if change_type or change_period:\n # _generate_nik parameter is vals\n new_vals = {\n 'company_id': record.company_id.id,\n # 'estate_id': record.estate_id.id, extend at estate module\n 'contract_type': vals['contract_type'] if 'contract_type' in vals else record.contract_type,\n 'contract_period': vals['contract_period'] if 'contract_period' in vals else record.contract_period,\n # 'nik_number': record.nik_number,\n 'internship': record.internship,\n 'outsource': record.outsource\n }\n\n vals['nik_number'] = self.generate_nik(new_vals)\n _logger.info(_('Employee %s has new Employee Identity Number %s: ' % (employee_id.name, vals['nik_number'])))\n return super(Employee, self).write(vals)", "def updateEMPStudyData(self, study_id, study_score, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_study_data', [study_id, study_score, web_app_user_id])", "def process_employees_salary(self, employees_info: List[List[str]]) -> None:\n pass", "def setData(self,newData):\r\n pass", "def edit_user(self, username, employee, role, status, change_pwd=False, *password):\n self.click(self.user_edit_save_btn)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(employee, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n if change_pwd:\n self.click(self.change_password)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_edit_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"User is edited and saved.\")", "def profile_page(cls, employee_id, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"employees.csv\")\n table = database_connection.table\n employee = Employee(employee_id)\n\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) check data, \"\n \"(2) update first name, \"\n \"(3) update last name, \"\n \"(4) save changes, \"\n \"(5) exit without saving \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n elif choice=='1':\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n elif choice=='2':\n first_name = input(\"Enter your first name: \")\n employee.set_first_name(first_name)\n elif choice=='3':\n last_name = input(\"Enter your last name: \")\n employee.set_last_name(last_name)\n elif choice=='4':\n table[\n (table['employee_id']==employee.get_employee_id())\n ] = pd.Series(\n {'employee_id': employee.get_employee_id(),\n 'first_name': employee.get_first_name(),\n 'last_name': employee.get_last_name(),\n }\n )\n database_connection.overwrite()\n logger.log(\"Information saved!\")\n else:\n break", "def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)", "def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"tthelen@uos.de\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")", "def employee_count(self, employee_count):\n\n self._employee_count = employee_count", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")", "def update_E(self):", "def edit_employee_hours(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_Hours_Form(obj = employee)\n\n if form.validate_on_submit():\n \n employee.completed = form.completed.data, \n employee.required = form.required.data,\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_hours.html\", employee = employee, form = form)", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def add_employee(self, employee):\n self.employees.add(employee)", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def updateEMPSampleData(self, sample_id, sample_score, emp_status, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_sample_data', [sample_id, sample_score, emp_status, web_app_user_id])", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def test_update_office(self):\n url = '/api/v1/consultorios/{}/'.format(self.app_client.id)\n\n data = {\n \"hospital\": \"Hospital 2\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def update_timesheet(item):\n\tj=json.loads(item)\n\tprint(\"-----------------------garffff---------------------\")\n\tnew_employee=None;\n\ttimesheet=frappe.get_doc(\"Time Sheet\",j[\"name\"])\n\tjarray=[]\n\tfor passed_employee in j['employees']:\n\t\tif 'new' in passed_employee.keys():\n\t\t\t#create employee\n\t\t\tnew_employee=frappe.get_doc({\n\t\t\t\t\"doctype\":\"employee_link_with_time\",\n\t\t\t\t\"employee\":passed_employee['employee']\n\t\t\t});\n\n\t\tjarray.append(passed_employee['employee']);\n\t\tfor employee in timesheet.employees:\n\t\t\tif passed_employee[\"employee\"]==employee.employee:\n\t\t\t\tif \"start\" in passed_employee:\n\t\t\t\t\temployee.start=passed_employee[\"start\"]\n\t\t\t\tif \"end\" in passed_employee:\n\t\t\t\t\temployee.end=passed_employee[\"end\"];\n\tforRemove=[]\n\tfor employee_container in timesheet.employees:\n\t\tif employee_container.employee not in jarray:\n\t\t\tforRemove.append(employee_container)\n\tprint(\"___________REMOVE______________\")\n\tprint(forRemove);\n\tif forRemove:\n\t\tfor remove in forRemove:\n\t\t\ttimesheet.employees.remove(remove)\n\n\tif new_employee is not None:\n\t\ttimesheet.append(\"employees\",new_employee)\n\n\t#handel status\n\ttimesheet.status=j[\"status\"]\n\ttimesheet.save()\n\treturn frappe.get_doc(\"Time Sheet\",j[\"name\"])", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def on_edit(self, dataobj):", "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "def update_data(self, newData):\r\n self.AllData = newData", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def update(table, id_):\n\n # your code\n key = common.check_for_key(id_,table)\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n return_inputs = ui.get_inputs(['Name', 'Age'], 'Enter New Values')\n modif_index = key\n\n table[modif_index][NAME] = return_inputs[FIRST_PROP]\n table[modif_index][AGE] = return_inputs[SECOND_PROP]\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n return table", "def edit_register(id):\n add_employee = False\n\n employee = Employee.query.get_or_404(id) #from table\n print('----update 1----')\n form = UpdateForm(obj=employee) #if not 404\n print('----update 2----')\n if form.validate_on_submit():\n employee.email = email=form.email.data\n employee.username=form.username.data\n employee.glad_id=form.glad_id.data\n employee.tel_no=form.tel_no.data\n employee.role_id=form.role_id.data\n employee.password=form.password.data\n\n # UPDATE employee to the database\n print('----update----',employee.role_id)\n db.session.commit()\n flash('You have successfully updated! ')\n\n # # redirect to the login page\n # return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Update')", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def modify_user(user_data):\r\n raise NotImplementedError()", "def test_update_record(self):\n pass", "def test_update(self):\n payload = {\n 'name': 'Pecho inclinado',\n 'description': \"New description\",\n 'muscle_group': \"pecho\"\n }\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Exercise.objects.get(id=self.exer1.id).name, payload['name'])", "def editRecord(self):\n selectedData = self.controller.chooseRecord(\"Enter the record number: \") - 1\n print(self.dto.getRecord()[selectedData].__dict__)\n if self.controller.confirmMsg(\"Do you want to edit this data? (y/n): \") == \"y\":\n self.controller.modifyData(self.dto.getRecord()[selectedData])\n print(\"Record edited.\")", "def edit_employee_certifications(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Cert_Form(obj = employee)\n \n form.cert.choices = db.session.query(Cert.id , Cert.cert_name).all()\n \n \n if form.validate_on_submit():\n \n cert = Cert.query.get(form.cert.data) \n \n\n if cert.expire:\n received = form.received.data\n year = received.year\n month = received.month\n day = received.day\n\n start_date = datetime(year = year, month = month, day = day)\n change_unit = cert.good_for_unit\n change_time = cert.good_for_time\n \n if change_unit == \"days\": \n delta = timedelta(days = change_time)\n elif change_unit == \"weeks\":\n delta = timedelta(days = change_time * 7)\n elif change_unit == \"months\":\n delta = timedelta(days = change_time * 30)\n else:\n delta = timedelta(days = change_time * 365)\n\n due_date = start_date + delta\n employees = employee_certification(employee_id = employee_id, cert_id = cert.id, received = received, due_date = due_date)\n \n #cert.employees.append(employee))\n #db.session.add(cert)\n #employee.certs.append(dates)\n db.session.add(employees)\n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def set_data(self, new_data):\n self.data = new_data", "def setData(self,newData):\r\n self.title.setVal(newData.title),\r\n self.first.setVal(newData.first),\r\n self.middle.setVal(newData.middle),\r\n self.last.setVal(newData.last),\r\n self.suffix.setVal(newData.suffix),\r\n self.phone.setVal(newData.phone),\r\n self.ext.setVal(newData.phoneExt),\r\n self.email.setVal(newData.email),\r\n self.affiliation.setVal(newData.affiliation)\r\n self.fullName.setVal(self.formatName())", "def edit_payee(self, payee_id, new_payee_name):\n # [todo] - add check that new_payee_name is unique\n\n # open a cursor\n cur = self.get_cursor()\n\n edit_payee_statement = \"UPDATE payees \" + \\\n \"SET payee_name='{0}' \".format(new_payee_name) + \\\n \"WHERE payee_id={0}\".format(payee_id)\n\n cur.execute(edit_payee_statement)\n\n # close the cursor\n self.close_cursor()", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def test_update__endtoend__3(\n address_book, FieldFactory, UpdateablePersonFactory, browser):\n field_name = FieldFactory(\n address_book, IPerson, 'Bool', u'Ever met').__name__\n UpdateablePersonFactory(address_book, **{field_name: False})\n browser.login('mgr')\n browser.keyword_search(KEYWORD, apply='Update')\n browser.getControl('field').displayValue = ['person -- Ever met']\n browser.getControl('Next').click()\n browser.getControl('yes').click()\n browser.getControl('operation').displayValue = [\n 'replace existing value with new one']\n browser.getControl('Next').click()\n # Update sets the value to 'yes':\n assert '<td>Tester</td><td>yes</td>' in browser.contents_without_whitespace", "def set_active(self, employee_id, active):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET is_active = %s '\n 'WHERE id=%s;',\n (active, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def assign_employee(id):\r\n check_admin()\r\n\r\n employee = Employee.query.get_or_404(id)\r\n\r\n # prevent admin from being assigned a department or role\r\n if employee.is_admin:\r\n abort(403)\r\n\r\n form = EmployeeAssignForm(obj=employee)\r\n if form.validate_on_submit():\r\n employee.department = form.department.data\r\n employee.role = form.role.data\r\n db.session.add(employee)\r\n db.session.commit()\r\n flash('You have successfully assigned a department and role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_employees'))\r\n\r\n return render_template('admin/employees/employee.html',\r\n employee=employee, form=form,\r\n title='Assign Employee')", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def give_raise(self):\r\n self.salary = 45000", "def reloadData(self):\n self.dto.readFromData()\n print(\"Record reloaded.\")", "def setUp(self):\n self.my_employee = Employee('knight', 'lee', 10000)", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def change_person(self):\n # TODO:\n # if render_person is none: throw error message to click first\n\n info = self.current_person()\n info[\"dob\"] = str(info[\"dob\"])\n # launch module\n self.EditPeople.edit_person(info)\n self.EditPeople.show()", "def updateEMPStudy(self, study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, \n portal_type, study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().callproc('qiime_assets.emp_study_update', \n [study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, portal_type, \n study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries])", "def save_change(self):\n try:\n self.customer.budget.income = abs(float(self.incomeItem.text()))\n self.incomeItem.setText(f\"{self.customer.budget.income}\")\n fixed_expenses = {\n \"rent\": abs(float(self.listOfExpensesSEK.item(4).text())),\n \"subscription\": abs(float(self.listOfExpensesSEK.item(2).text())),\n \"insurance\": abs(float(self.listOfExpensesSEK.item(3).text())),\n \"others\": abs(float(self.listOfExpensesSEK.item(5).text()))\n }\n variable_expenses = {\n \"food\": abs(float(self.listOfExpensesSEK.item(11).text())),\n \"bills\": abs(float(self.listOfExpensesSEK.item(12).text())),\n \"transportation\": abs(float(self.listOfExpensesSEK.item(13).text())),\n \"hygien\": abs(float(self.listOfExpensesSEK.item(14).text())),\n \"clothes\": abs(float(self.listOfExpensesSEK.item(15).text())),\n \"entertainment\": abs(float(self.listOfExpensesSEK.item(16).text())),\n \"others\": abs(float(self.listOfExpensesSEK.item(17).text()))\n }\n self.customer.budget.set_budget(self.customer.budget.income,\n variable_expenses, fixed_expenses)\n # update instead of set\n DB.update_variable_expenses(self.customer.email, variable_expenses)\n DB.update_fixed_expenses(self.customer.email, fixed_expenses)\n DB.update_income(self.customer.budget.income, self.customer.email)\n total_fix, total_var = self.customer.budget.get_expenses()\n self.listOfExpensesSEK.item(1).setText(total_fix)\n self.listOfExpensesSEK.item(10).setText(total_var)\n self.customer.budget.set_buffert(abs(float(\n self.listOfExpensesSEK.item(20).text()\n )))\n DB.update_buffert(self.customer.email, abs(float(self.listOfExpensesSEK.item(20).text())))\n self.label_3.setText(str(self.customer.budget.income -\n self.customer.budget.get_total_expenses())\n )\n except Exception:\n self.popUp.exec_()", "def update(self, bigip, data=None, modify=False):\n self._data['executeAction'] = 'definition'\n super(ApplicationService, self).update(bigip, data=data, modify=modify)", "def update_office_details(cls, office_id):\n\n patch_data = Views.get_data()\n\n cls.check_for_required_fields(fields=['name'], dataDict=patch_data)\n cls.validate_office_name(patch_data['name'])\n office = OfficeModel()\n office_exists = office.get_one(office_id)\n if office_exists is not None:\n ##\n update_data = office.clean_insert_dict(patch_data, False)\n office.update(update_data, office_id)\n\n res = {\"status\": 202, \"data\": office.sub_set()}\n return make_response(jsonify(res), 202) # Accepted\n msg = \"Office with id {} not found\".format(office_id)\n res = jsonify(\n {\"status\": 404, 'error': msg})\n return make_response(res, 404)", "def update(self):\n self.getDbRecord().update()", "def _update(self, course_name: str, newdata: ParseType) -> None:\n\n self.courses[course_name] = newdata", "def updateData(self,d):\n for f in self.fields:\n n = f.name()\n if n in d:\n f.setValue(d[n])", "def _update_internal(self, entity_id, data, commit=True):\n input_data = self.to_model(data)\n self.validate_present(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not update using empty data.\")\n entity = db_session.query(self.model).get(entity_id)\n if not entity:\n raise NotFound(\"Could not find any entity with specified parameters.\")\n\n for k, v in input_data.items():\n try:\n setattr(entity, k, v)\n except ValueError as e:\n raise UnprocessableEntity(f\"Could not save value.\", fields=k, what=BAD_VALUE) from e\n\n if commit:\n db_session.commit()\n \n return self.to_obj(entity)", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})", "def show_employee_edit_form(self, staff_ob, number):\n\n print(self.LENGTH_STAR * \"*\")\n print(f\"EDIT {staff_ob.role.upper()}\\n\")\n\n if number == 1:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s address\\nThe current address is: {staff_ob.address}\")\n new_address = self.get_address()\n while new_address == False:\n new_address = self.get_address()\n self.check_action_edit_form(staff_ob, number, new_address)\n\n elif number == 2:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s mobile number\\nThe current mobile number is: {staff_ob.mobile_number}\")\n new_mobile_number = self.get_mobile_number()\n while new_mobile_number == False:\n new_mobile_number = self.get_mobile_number\n self.check_action_edit_form(staff_ob, number, new_mobile_number)\n \n elif number == 3:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s email\\nThe current the email is: {staff_ob.email}\")\n new_email = self.get_email()\n while new_email == False:\n new_email = self.get_email()\n self.check_action_edit_form(staff_ob, number, new_email)\n \n print(f\"\\n{staff_ob.name}'s information successfully changed!\\n\")\n \n return", "def edit_person():\n # get person name from user\n responses = accept_inputs([\"Person's name\"])\n person_name = responses[\"Person's name\"]\n # check for existence\n results = query_with_results(\"select * from person where name = ?\", [person_name])\n if len(results) == 0:\n print(\"No person found with name '%s'.\" % person_name)\n return\n else:\n # get id of person\n id = query_with_results(\"select id from person where name = ?\", [person_name])[0][0]\n # the task exists, so ask the user for the new description\n responses = accept_inputs([\"New name\"])\n # update db\n query_no_results(\"update person set name = ? where id = ?\", [responses[\"New name\"], id])\n print(\"Person with old name '%s' changed to '%s'.\" % (person_name, responses[\"New name\"]))", "def mail_method(self,address,city,state,zip,name):\n id = self.find_employee_id(name)\n if id in self.pymthd:\n self.pymthd[id] = \"Mailed Check\"\n print(\"{}{}\".format(name, \" was successfully changed to Mailed Check\"))\n self.emp_dict[id][1] = address\n self.emp_dict[id][2] = city\n self.emp_dict[id][3] = state\n self.emp_dict[id][4] = zip\n self.emp_dict[id][6] = \"2\"\n return self.pymthd, self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])", "def updateJobData(self, jobName):\n self.jobRow.setText(jobName)\n self.updateSelectedLayer()", "def alterar_cliente(self, ID, nome, sobrenome, tel_list, email_list, empresa):\r\n if nome != '':\r\n print(f'Alterando nome para {nome}')\r\n self.clientes[ID].nome = nome.title()\r\n elif sobrenome != '':\r\n print(f'Alterando sobrenome para {sobrenome}')\r\n self.clientes[ID].sobrenome = sobrenome.title()\r\n elif len(tel_list) > 0:\r\n print(f'Alterando telefones para {tel_list}')\r\n self.clientes[ID].tel_list = tel_list\r\n elif len(email_list) > 0:\r\n print(f'Alterando email para {email_list}')\r\n self.clientes[ID].email_list = email_list\r\n elif empresa != '':\r\n print(f'Alterando empresa para {empresa}')\r\n self.clientes[ID].empresa = empresa.title()", "def update_record(self):\n # print(self.get_hours_diff())\n conn = sqlite3.connect(\"LmtPilots.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Pilots_hours\")\n rowids = [row[0] for row in cur.execute('SELECT rowid FROM Pilots_hours')]\n cur.executemany('UPDATE Pilots_hours SET total=? WHERE id=?', zip(self.get_hours_diff(), rowids))\n conn.commit()\n self.db_model2.select()\n # print(self.get_tot_hours())", "def test_edit_office(self):\n access_token = self.generate_admin_token()\n self.create_office()\n update_data = {\n \"name\":\"Office of the president\",\n \"Type\": \"federal\"\n }\n response=self.client.patch(\n \"api/v2/admin/offices/1\",\n data=json.dumps(update_data),\n headers={\"content-type\":\"application/json\",\n \"Authorization\": f\"Bearer {access_token}\"}\n )\n \n self.assertEqual(response.status_code, 200)", "def main():\n # create a list of test employees and managers\n testList = [\n {'type': 'employee', 'firstName': 'Mickey', 'lastName': 'Mouse', 'SSN': '100-12-3456', 'salary': 1500.00},\n {'type': 'manager', 'firstName': 'Walt', 'lastName': 'Disney', 'SSN': '100-00-0000', 'salary': 5000.00,\n 'title': 'Head Of Disneyland', 'yearBonus': 1000.00},\n {'type': 'employee', 'firstName': 'Donald', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 1000.00},\n {'type': 'manager', 'firstName': 'Minnie', 'lastName': 'Mouse', 'SSN': '999-99-999', 'salary': 10000.00,\n 'title': 'Head Of Mouse HouseHold', 'yearBonus': 15000.00},\n {'type': 'manager', 'firstName': 'Daisy', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 12000.00,\n 'title': 'Head Of Duck HouseHold', 'yearBonus': 10000.00}]\n\n # Define percentRaise (0.1 == 10%)\n percentRaise = 0.1\n\n # Create Employees and Managers Object using the Test data\n employeeList = loadEmployees(testList)\n\n # Sort employee List, which will ustilize Employee's __lt__ and __eq__ methods\n employeeList.sort()\n\n # Loop over Employee and Manager Objects\n print(\"Employees and Manager should be sorted by last name, then first\\n\")\n for employee in employeeList:\n if type(employee) == Manager:\n print(\"Manager:\")\n else:\n print(\"Employee:\")\n # Print Employee or Manager\n print(employee)\n # Give Raise to Employee or Manager\n employee.giveRaise(percentRaise)\n # Print New Salary\n print(\"With %.2f%% Raise, Salary: $%.2f\\n\" % (percentRaise * 100, employee.salary))\n\n # Employee docStrings\n print(\"\\nEmployee docstring for each method\")\n print(\"Employee.__doc__=\" + Employee.__doc__)\n print(\"Employee.__init__.__doc__=\" + Employee.__init__.__doc__)\n print(\"Employee.giveRaise.__doc__=\" + Employee.giveRaise.__doc__)\n print(\"Employee.__str__.__doc__=\" + Employee.__str__.__doc__)\n print(\"Employee.__eq__.__doc__=\" + Employee.__eq__.__doc__)\n print(\"Employee.__lt__.__doc__=\" + Employee.__lt__.__doc__)\n\n print(\"\\nManger docstring for each method\")\n print(\n \"Since Manager inherits from Employee, several of the methods ('giveRaise', '__eq__' and '__lt__') and the corresponding docstring will originate from the Employee class\\n\")\n print(\"Manager.__doc__=\" + Manager.__doc__)\n print(\"Manager.__init__.__doc__=\" + Manager.__init__.__doc__)\n print(\"Manager.giveRaise.__doc__=\" + Manager.giveRaise.__doc__)\n print(\"Manager.__str__.__doc__=\" + Manager.__str__.__doc__)\n print(\"Manager.__eq__.__doc__=\" + Manager.__eq__.__doc__)\n print(\"Manager.__lt__.__doc__=\" + Manager.__lt__.__doc__)", "def update(self, request, pk=None):\n exp = Experiment.objects.get(pk=pk)\n serializer = ExperimentSerializer(exp, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return send_response(request.method, serializer)", "def set_data(self, name, new_data):\n if not self.writable:\n return None\n\n try:\n new_data = np.array(new_data) #Convert to array data\n except Exception:\n raise Exception('set_data failed to convert type %s data to array'%type(new_data))\n\n \n if not isinstance(name, basestring):\n raise TypeError('set_data() can only accept string keys.')\n \n \n if name in self._stringmap:\n raise AttributeError('%s cannot correspond to a string name already present in \\\n in the underlying dataframe.'%name)\n \n if name in self._reserved:\n raise NameError('%s conflicts with pandasplotdata reserved names: \"%s\"'%(name, ','.join(self._reserved)))\n \n \n if name in self._extras:\n self.data_changed = {'changed':[name]} \n else:\n self.data_changed = {'added':[name]} \n\n self._extras[name]=new_data\n\n return name #THIS MUST BE HERE (why?)", "def edit_employee_locations(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Loc_Form(obj = employee)\n \n form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n \n if form.validate_on_submit():\n \n location = Location.query.get(form.location.data) \n employee.locations.append(location)\n db.session.add(employee)\n \n db.session.commit()\n\n \n\n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n \n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)", "def mark_attendance(employee):\r\n # loads date from computer\r\n today = datetime.datetime.now()\r\n mark = today.strftime(\"%d/%m/%Y %H:%M\")\r\n # adds to attendance list in object\r\n employee.attendance.append(mark)\r\n return employee.attendance", "def update_meal():" ]
[ "0.7302678", "0.718177", "0.7112663", "0.7027216", "0.7005247", "0.68823713", "0.6850561", "0.67905223", "0.6775437", "0.6432232", "0.6327525", "0.6219686", "0.61889017", "0.6159903", "0.61581737", "0.6071851", "0.60707927", "0.597288", "0.5945182", "0.58981615", "0.5867029", "0.5863868", "0.58178544", "0.5784019", "0.5756484", "0.57548106", "0.5739588", "0.5733408", "0.5715754", "0.57060283", "0.57011396", "0.56917787", "0.5688112", "0.5685853", "0.56651855", "0.56519413", "0.565176", "0.5633432", "0.56097513", "0.5600553", "0.5595472", "0.5595472", "0.5595472", "0.5595472", "0.55362713", "0.5534443", "0.55148166", "0.5476295", "0.54522896", "0.54396933", "0.54275435", "0.5414082", "0.5396589", "0.53875476", "0.538521", "0.5382033", "0.5381409", "0.53686655", "0.53461987", "0.5333831", "0.53239274", "0.5318164", "0.5314957", "0.53141963", "0.53059405", "0.5303647", "0.53034645", "0.52939475", "0.5265239", "0.5264362", "0.5262535", "0.5260386", "0.52503985", "0.5237464", "0.5237425", "0.5232367", "0.5230321", "0.5223314", "0.5223081", "0.52135104", "0.52011055", "0.52006537", "0.51978284", "0.51945686", "0.51868", "0.51858", "0.51808107", "0.517457", "0.51696897", "0.51688343", "0.51683235", "0.5157271", "0.51554894", "0.51533955", "0.51474875", "0.5145731", "0.5141457", "0.5140933", "0.514042", "0.5139971" ]
0.7703579
0
get all the projects of an employee IMPORTANT not all fields will be completed only the fields in the project table and that of the activeYears
def get_employeeProjects(self, id): from Project import Project cursor = self.dbconnect.get_cursor() cursor.execute('select project from projectpromotor where employee=%s', (id,)) projectsId = list() for row in cursor: projectsId.append(row[0]) projects = list() for projId in projectsId: cursor.execute('select * from project where projectID=%s', (projId,)) # returns exactly one row from the table row = cursor.fetchone() project = Project(row[0], row[1], row[2], row[3]) cursor.execute('select year from projectYearConnection where projectID=%s', (projId,)) years = list() for row in cursor: years.append(row[0]) project.activeYear = years projects.append(project) return projects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def get_projects():\n return Project.query.all()", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def get_projects(self, include_stats, is_active_val=None):\n\n # read all kinds of project info and computed counts from the db\n # into a pandas data frame\n projects_df = self._read_projects_df_from_db(\n include_stats=include_stats)\n\n # if an active value has been provided, look only at project records\n # that have that active value. NB this has to be a test against None,\n # not against \"false-ish\" (if not is_active_val)\n if is_active_val is not None:\n is_active_val_mask = projects_df[p.IS_ACTIVE_KEY] == is_active_val\n filtered_df = projects_df.loc[is_active_val_mask]\n projects_df = filtered_df\n\n if include_stats:\n # cut stats columns out into own df (w same index as projects one)\n stats_keys = p.get_computed_stats_keys()\n stats_df = projects_df[stats_keys].copy()\n projects_df = projects_df.drop(stats_keys, axis=1)\n\n # within computed stats columns (ONLY--does not apply to\n # descriptive columns from the project table, where None is\n # a real, non-numeric value), NaN and None (which pandas treats as\n # interchangeable :-| ) should be converted to zero. Everything\n # else should be cast to an integer; for some weird reason pandas\n # is pulling in counts as floats\n stats_df = stats_df.fillna(0).astype(int)\n\n stats_dict = stats_df.to_dict(orient='index')\n\n result = []\n # NB: *dataframe*'s to_dict automatically converts numpy data types\n # (e.g., numpy.bool_, numpy.int64) to appropriate python-native data\n # types, but *series* to_dict does NOT do this automatic conversion\n # (at least, as of this writing). Be cautious if refactoring the below\n projects_dict = projects_df.to_dict(orient='index')\n for k, v in projects_dict.items():\n if include_stats:\n v[p.COMPUTED_STATS_KEY] = stats_dict[k]\n result.append(p.Project.from_dict(v))\n\n return result", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def active_projects(self):\n return self.projects.filter(active=True)", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def test_get_projects_expanded(self):\n pass", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def projects(self):\r\n return p.Projects(self)", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def getProjectsQueryForEvalForOrgs(org_keys):\n query = getProjectsQueryForOrgs(org_keys)\n query.filter(\n 'status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return query", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects_of_user(self, user_id):\n res = self.conn.cursor().execute(\"\"\"SELECT * FROM projects p JOIN users_projects up \n ON p.id = up.project_id \n WHERE owner=? OR up.user_id=?\n GROUP BY p.id\n ORDER BY last_update DESC\"\"\", (user_id, user_id,))\n return res.fetchall()", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def get_reanalysis_projects_by_accession(self, accession):\n request_url = self.api_base_url + \"projects/reanalysis/\" + accession\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def get_all_project_record(deleted=False):\n session = get_session()\n query = session.query(models.ProjectAccountRecord).\\\n filter_by(deleted=deleted).\\\n all()\n\n return query", "def project_list(ctx, parent_project_id, output_format, columns):\n data = ctx.obj.get_projects(parent_project_id=parent_project_id)\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['project'])\n elif output_format == 'json':\n output_json_data(data)", "def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def get_projects(self):\n return self.jira.projects()", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def get_credentialed_projects_query(user):\n dua_signatures = DUASignature.objects.filter(user=user)\n\n completed_training = (\n Training.objects.get_valid()\n .filter(user=user)\n .values_list(\"training_type\")\n )\n not_completed_training = TrainingType.objects.exclude(pk__in=completed_training)\n required_training_complete = ~Q(required_trainings__in=not_completed_training)\n\n accepted_data_access_requests = DataAccessRequest.objects.filter(\n requester=user, status=DataAccessRequest.ACCEPT_REQUEST_VALUE\n )\n\n contributor_review_with_access = Q(\n access_policy=AccessPolicy.CONTRIBUTOR_REVIEW\n ) & Q(data_access_requests__in=accepted_data_access_requests)\n\n credentialed_with_dua_signed = Q(\n access_policy=AccessPolicy.CREDENTIALED\n ) & Q(duasignature__in=dua_signatures)\n\n query = required_training_complete & (\n contributor_review_with_access | credentialed_with_dua_signed\n )\n return query", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def get_projects_accessible_through_events(user):\n events_all = Event.objects.filter(Q(host=user) | Q(participants__user=user))\n\n active_events = set(events_all.filter(end_date__gte=datetime.now()))\n\n accessible_datasets = EventDataset.objects.filter(event__in=active_events, is_active=True)\n\n accessible_projects_ids = []\n for event_dataset in accessible_datasets:\n if has_access_to_event_dataset(user, event_dataset):\n accessible_projects_ids.append(event_dataset.dataset.id)\n\n query = Q(id__in=accessible_projects_ids)\n return query", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def test_returns_all_projects_if_difficulty_set_to_all(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n self.test_project_1.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_1.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.difficulty = ProjectDifficulty.CHALLENGING.value\n test_project_4.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"ALL\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # User is only permitted for test_project 1, 2 and 4, since test_project_3 is DRAFT.\n self.assertEqual(len(response.json[\"results\"]), 3)\n self.assertNotIn(\n self.test_project_3.id, [i[\"projectId\"] for i in response.json[\"results\"]]\n )", "def getProjectsForOrgs(org_keys, limit=1000):\n q = getProjectsQueryForOrgs(org_keys)\n return q.fetch(limit)", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def _get_projects(filters):\n # First order the objects, so separate that out\n orders_query = [o for o in filters if o['type']=='order']\n # Filter objects next, so separate those out\n filters_query = [f for f in filters if f['type']=='filter']\n\n projects = Project.objects.all()\n # We need a dictonary to pass to Django's filter function\n query_dict = {}\n # Order the projects based on the ordering queries\n for orders in orders_query:\n projects = projects.order_by(orders['property'])\n # create the dictonary based on the filtering queries\n for filters in filters_query:\n # First, if we want to filter by user, find the user\n if filters['property'] =='user':\n try:\n user_p = UserProfile.objects.get(email=filters['value'])\n query_dict[filters['property']] = user_p\n except UserProfile.DoesNotExist:\n raise Http404(\"User does not exist\")\n # Second, if the filter is by tags, change the query phrase\n # to 'tags__tag_name' - this is because tags is a ManyToManyField\n # and we want to search by the tag_name property of Tag objects\n elif filters['property'] == 'tags':\n filters['property'] = 'tags__tag_name'\n query_dict[filters['property']] = filters['value']\n else:\n # Make a dictionary, property: value, and you can pass it to filter fn\n query_dict[filters['property']] = filters['value']\n projects = projects.filter(**query_dict)\n return projects", "def get_accessible_projects(user):\n query = Q(deprecated_files=False)\n\n query &= get_public_projects_query()\n\n if user.is_authenticated:\n query |= get_restricted_projects_query(user)\n\n if user.is_credentialed:\n query |= get_credentialed_projects_query(user)\n\n query |= get_projects_accessible_through_events(user)\n\n return PublishedProject.objects.filter(query).distinct()", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def test_get_projects(self):\n pass", "def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response", "def open_projects_user(user):\n return Project.objects.prefetch_related('task_set').filter(user=user, open=True)", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def get_create_projects(target, proposal_ref, proposal_code='lb'):\n\n # Note that in the loader this is based on information in the PROPOSALS and VISITS files\n # TODO Multiple Visits can be defined in a file apparently - future improvement.\n # TODO NB LIne above in delete_users - redundant if using ISPYB??.\n # For the online loader it comes from the proposal_ref\n\n projects = []\n # The first word is the ISPY proposal/visit name that is used as the title of the project.\n # It can be set to OPEN in which case there are no users.\n visit = proposal_ref.split()[0]\n # If the visit is not prefixed by the proposal code\n # (typically a 2-letter sequence like \"lb\") then prefix it.\n if visit[0].isdigit():\n visit = f\"{proposal_code}{visit}\"\n project = Project.objects.get_or_create(title=visit)[0]\n projects.append(project)\n\n # If not open then delete users for the project and re-add them based on supplied fed-ids.\n delete_users(project)\n\n # Update project_id on target.\n target.project_id.add(project)\n\n # Remaining words in proposal_ref (if any) must be fedid's which are used to find users information.\n num_users = 0\n for fedid in proposal_ref.split()[1:]:\n user = User.objects.get_or_create(username=fedid, password=\"\")[0]\n project.user_id.add(user)\n num_users += 1\n if num_users == 0:\n project.open_to_public = True\n\n target.upload_progess = 10.00\n target.save()\n\n return projects", "def list_projects(arn=None, nextToken=None):\n pass", "def get_projects(self, page_size, page, sort_direction, sort_conditions):\n request_url = self.api_base_url + \"projects?\" + \"pageSize=\" + str(page_size) + \"&page=\" + str(page) + \"&sortDirection=\" + sort_direction + \"&sortConditions=\" + sort_conditions\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()", "def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()", "def get_all_projects():\n return jsonify(admin.get_all_projects(current_app.scoped_session()))", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def get_projects(self):\n return self._gitlab.owned_projects(per_page=1000)", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def index():\n active = None\n projects = Projects.query.all()\n tasks = Tasks.query.all()\n\n if len(projects) == 1:\n projects[0].active = True\n active = projects[0].project_id\n db.session.commit()\n\n if projects:\n for project in projects:\n if project.active:\n active = project.project_id\n if not active:\n projects[0].active = True\n active = projects[0].project_id\n else:\n projects = None\n\n if projects:\n return render_template('clamytoe.html', tasks=tasks, projects=projects, active=active)\n else:\n return render_template('clamytoe.html', tasks=tasks, active=active)", "def getProjectsQueryForOrgs(org_keys):\n query = getProjectsQuery()\n query.filter('org IN', org_keys)\n return query", "def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects", "def gather_project_entries(self):\n\n user_inputs = [\n self.customer_name.get(), self.proj_date.get(),\n self.proj_descrpt.get(), self.proj_estdatest.get(),\n self.proj_estdateend.get(), self.proj_estbudget.get(),\n self.proj_actdatest.get(), self.proj_actdateend.get(),\n self.proj_actcost.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def getProjectsQueryForEval(keys_only=False, ancestor=None, **properties):\n q = getProjectsQuery(keys_only, ancestor, **properties)\n q.filter('status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return q", "def all_projects(request):\n\n game_projects = GameProject.objects.all()\n profile = get_object_or_404(Profile, user=request.user)\n query = None\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\")\n return redirect(reverse('all_projects'))\n\n queries = Q(title__icontains=query) | Q(description__icontains=query) \\\n | Q(owner__user__username__icontains=query)\n game_projects = game_projects.filter(queries)\n\n for game_project in game_projects:\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n\n template = 'gameproject/all_projects.html'\n context = {\n 'game_projects': game_projects,\n 'profile': profile,\n 'search_term': query\n }\n\n return render(request, template, context)", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def getProjects(self):\n\n return self.__projects", "def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")", "def test_list_project_request(self):\n pass", "def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def get_projects(self):\n return conf.projects", "def project_all(request, format=None):\n if request.method == 'GET':\n projects = Project.objects.all().order_by('key')\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)", "def get_project_ids(self, node=None, name=None):\n project_ids = []\n queries = []\n # Return all project_ids in the data commons if no node is provided or if node is program but no name provided\n if name == None and ((node == None) or (node == \"program\")):\n print(\"Getting all project_ids you have access to in the data commons.\")\n if node == \"program\":\n print(\n \"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs.\"\n )\n queries.append(\"\"\"{project (first:0){project_id}}\"\"\")\n elif name != None and node == \"program\":\n if isinstance(name, list):\n print(\n \"Getting all project_ids in the programs '\" + \",\".join(name) + \"'\"\n )\n for program_name in name:\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (program_name)\n )\n elif isinstance(name, str):\n print(\"Getting all project_ids in the program '\" + name + \"'\")\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (name)\n )\n elif isinstance(node, str) and isinstance(name, str):\n print(\n \"Getting all project_ids for projects with a path to record '\"\n + name\n + \"' in node '\"\n + node\n + \"'\"\n )\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"%s\",submitter_id:\"%s\"}){project_id}}\"\"\"\n % (node, name)\n )\n elif isinstance(node, str) and name == None:\n print(\n \"Getting all project_ids for projects with at least one record in the node '\"\n + node\n + \"'\"\n )\n query = \"\"\"{node (first:0,of_type:\"%s\"){project_id}}\"\"\" % (node)\n df = pd.json_normalize(self.sub.query(query)[\"data\"][\"node\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n if len(queries) > 0:\n for query in queries:\n res = self.sub.query(query)\n df = pd.json_normalize(res[\"data\"][\"project\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n my_ids = sorted(project_ids, key=str.lower)\n print(my_ids)\n return my_ids", "def test_returns_easy_projects_if_difficulty_set_to_easy(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"EASY\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def _get_project_by_manager(userid):\n return Project.objects.filter(project_open=True, manager=userid).order_by(\n \"created_at\"\n )", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def request_project_list(event_id):\n is_moar = bool(request.args.get('moar', type=bool))\n host_url = request.host_url\n return get_project_list(event_id, host_url, is_moar)", "def getPastProjects(self)->list:\n returnList=[]\n for i in range(0,randint(1, 10)):\n randumProjectId = randint(0, 109)\n if randumProjectId not in returnList:\n returnList.append(randumProjectId)\n\n return returnList", "def test_list_project(self):\n pass", "def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList", "def personnel_projects_table(personnel_search_table_selected_indices, selected_funding, selected_year, rows):\n personnel_outcome_data, personnel_name = personnel_outcomes_helper(personnel_search_table_selected_indices, selected_funding, selected_year, rows)\n if personnel_outcome_data is not None:\n personnel_outcome_data = personnel_outcome_data[[\"type\", \"pub_title\"]]\n return personnel_outcome_data.to_dict('records')\n\n return pd.DataFrame(data=None, columns=[\"type\", \"pub_title\"]).to_dict('records') # None cannot be passed back as it will cause an error.", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def projects(self):\n campaigns = self.campaigns.all()\n return Project.published_objects.filter(campaigns__in=campaigns)", "def projects(self):\n ret_val = []\n params = {\"fields\": Project.FIELDS}\n projects = self._request(\"get\", \"projects\", params=params)\n\n for project in projects:\n ret_val.append(Project(project))\n\n return ret_val", "def get(self):\n opts = PROJECTS_OPTS_PARSER.parse_args()\n filters = PROJECT_FILTERS_PARSER.parse_args()\n filters = clean_attrs(filters)\n\n query = Project.query\n\n if not current_user.is_authenticated():\n query = query.filter_by(public=True)\n\n if opts['order'] == 'recent':\n query = (\n query.\n join(Project.jobs, isouter=True).\n group_by(Project).\n order_by(sql_func.max(Job.create_ts).desc().nullslast())\n )\n\n if filters:\n query = query.filter(*[\n getattr(Project, field) == value\n for field, value in filters.items()\n ])\n\n marshaler = dict(items=ALL_LIST_ROOT_FIELDS['items'])\n values = dict(items=query.all())\n\n args = PROJECT_LIST_PARSER.parse_args()\n\n if args['meta']:\n marshaler['meta'] = ALL_LIST_ROOT_FIELDS['meta']\n values['meta'] = {'total': query.count()}\n values['meta'].update(Project.get_status_summary(filters))\n\n if args['latest_job']:\n marshaler['items'] = ITEMS_MARSHALER_LATEST_JOB\n\n return marshal(values, marshaler)", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def getProjectsQuery(keys_only=False, ancestor=None, **properties):\n q = db.Query(project_model.GSoCProject, keys_only=keys_only)\n\n if ancestor:\n q.ancestor(ancestor)\n\n for k, v in properties.items():\n q.filter(k, v)\n\n return q" ]
[ "0.6504841", "0.6427155", "0.6327203", "0.6314313", "0.6288553", "0.62257266", "0.62068164", "0.6192607", "0.6165686", "0.6157085", "0.6130755", "0.61147606", "0.61008775", "0.60764706", "0.60260594", "0.60055494", "0.5962676", "0.5942284", "0.5927183", "0.58933854", "0.5890941", "0.58865345", "0.5879308", "0.5842099", "0.5814713", "0.58113974", "0.58000505", "0.5764027", "0.57557017", "0.5742416", "0.5730648", "0.5727785", "0.571063", "0.57023937", "0.57007754", "0.5685758", "0.5681323", "0.5676576", "0.5672552", "0.56645554", "0.5662469", "0.564539", "0.5632567", "0.56148803", "0.56056124", "0.56011075", "0.5590909", "0.5580417", "0.5563645", "0.55562484", "0.5541988", "0.5532553", "0.55295193", "0.55270255", "0.5517101", "0.5512093", "0.5509482", "0.5509075", "0.5504822", "0.5503071", "0.54932064", "0.54915476", "0.54908633", "0.54896533", "0.54868907", "0.54696715", "0.5467915", "0.5453434", "0.54428744", "0.54331917", "0.5429449", "0.5422451", "0.5413298", "0.5405996", "0.53946525", "0.53913933", "0.53850204", "0.53676784", "0.5346834", "0.5331806", "0.5325214", "0.5311058", "0.5306964", "0.529319", "0.52846515", "0.5282084", "0.5279548", "0.52656794", "0.5264632", "0.52622217", "0.52621794", "0.5260515", "0.5243937", "0.52431", "0.5234916", "0.52326167", "0.52205855", "0.52205855", "0.52198136", "0.5214898" ]
0.72116566
0
Relative Strength Index Is a momentum indicator, measuring the magnitude of recent price changes. 70 is overbought 30 is oversold
def RelativeStrengthIndex(self, timeperiod=14): return ta.RSI(self.data.close,timeperiod)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateRelativeStrengthIndex(self, series, interval=14):\n\n if not isinstance(series, pd.Series):\n raise TypeError('Pandas Series required.')\n\n if not isinstance(interval, int):\n raise TypeError('Interval integer required.')\n\n if(len(series) < interval):\n raise IndexError('Pandas Series smaller than interval.')\n\n diff = series.diff(1).dropna()\n\n sum_gains = 0 * diff\n sum_gains[diff > 0] = diff[diff > 0]\n avg_gains = sum_gains.ewm(com=interval-1, min_periods=interval).mean()\n\n sum_losses = 0 * diff\n sum_losses[diff < 0] = diff[diff < 0]\n avg_losses = sum_losses.ewm(\n com=interval-1, min_periods=interval).mean()\n\n rs = abs(avg_gains / avg_losses)\n rsi = 100 - 100 / (1 + rs)\n\n return rsi", "def relative_strength(prices, n):\n\n deltas = np.diff(prices)\n seed = deltas[:n+1] # takes the last 1 price differences? 12 market days?\n up = seed[seed>=0].sum()/n\n down = -seed[seed<0].sum()/n\n rs = up/down\n rsi = np.zeros_like(prices)\n rsi[:n] = 100. - 100./(1.+rs)\n\n for i in range(n, len(prices)):\n delta = deltas[i-1] # cause the diff is 1 shorter\n\n if delta>0:\n upval = delta\n downval = 0.\n else:\n upval = 0.\n downval = -delta\n\n up = (up*(n-1) + upval)/n\n down = (down*(n-1) + downval)/n\n\n rs = up/down\n rsi[i] = 100. - 100./(1.+rs)\n\n return rsi", "def seriesResistance(self):\n return 13.38 * math.pow(self.concentration, -0.8397)", "def resistance(stock):\n output= stock_max(stock)-(stock_max(stock)*.05)\n return output", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def relative_energy_drift(x_pred, x_true, t=-1):\n energy_pred = total_energy(x_pred[t])\n energy_true = total_energy(x_true[t])\n return (energy_pred-energy_true) / energy_true", "def get_strength(self):\n return 10 - self.get_agility()", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def PV_ExpsMaint(t):\n if t > last_t:\n return 0\n else:\n return - prj_exps_Maint(t) + PV_ExpsMaint(t + 1) / (1 + DiscRate(t))", "def front_column_model_p_gain():", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def VarianceOfAbsAcceleration(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n FRFacc = [H[wincr].dot(np.diagonal(self.M)) * self.omega_range[wincr] ** 2 for wincr in\n range(len(self.spectrum))]\n Habs2 = [(np.abs(np.ones(len(vector), dtype=float) - vector) ** 2) for vector in FRFacc]\n PSDexc = self.spectrum\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr] * PSDexc[wincr] for wincr in range(len(self.spectrum))]\n AccPSD = [abs(RespPSD[wincr] + 0*PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(AccPSD, self.omega_range, axis=0))\n return variance", "def strength(self) -> float:\n ...", "def adjust(self, rating, series):\n return series[0] - self.expect(rating, series[1])", "def adjust(self, rating, series):\n return series[0] - self.expect(rating, series[1])", "def compute_gain(loudness, renormalize_loudness):\n gain = []\n for i in range(len(loudness)):\n delta_loudness = renormalize_loudness[i] - loudness[i]\n gain.append(np.power(10.0, delta_loudness / 20.0))\n return gain", "def calc_gain(s, i):\n return math.sqrt((i + s) / (6 * s))", "def rate_position(current, target):\n return (target[0] - current[0]) ** 2 + (target[1] - current[1]) ** 2", "def silverman(n: int, ess: float) -> float:\n\n return (ess * (n + 2) / 4) ** (-1 / (n + 4))", "def delta(self):\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def addMomentumIndicators(self):\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n if not 'ema12' in self.df.columns:\n self.df['ema12'] = self.df.close.ewm(span=12, adjust=False).mean()\n\n if not 'ema26' in self.df.columns:\n self.df['ema26'] = self.df.close.ewm(span=26, adjust=False).mean()\n\n if not self.df['ema12'].dtype == 'float64' and not self.df['ema12'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'ema12' column not int64 or float64.\")\n\n if not self.df['ema26'].dtype == 'float64' and not self.df['ema26'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'ema26' column not int64 or float64.\")\n\n # calculate relative strength index\n self.df['rsi14'] = self.calculateRelativeStrengthIndex(\n self.df['close'], 14)\n # default to midway-50 for first entries\n self.df['rsi14'] = self.df['rsi14'].fillna(50)\n\n # calculate moving average convergence divergence\n self.df['macd'] = self.df['ema12'] - self.df['ema26']\n self.df['signal'] = self.df['macd'].ewm(span=9, adjust=False).mean()\n\n # calculate on-balance volume (obv)\n self.df['obv'] = np.where(self.df['close'] > self.df['close'].shift(1), self.df['volume'], \n np.where(self.df['close'] < self.df['close'].shift(1), -self.df['volume'], self.df.iloc[0]['volume'])).cumsum()\n\n # obv change percentage\n self.df['obv_pc'] = self.df['obv'].pct_change() * 100\n self.df['obv_pc'] = np.round(self.df['obv_pc'].fillna(0), 2)", "def wind_heat_transfer_coefficient(self) -> float:\n\n return 3.8 + 2 * self.wind_speed\n # return 4.5 + 2.9 * self.wind_speed", "def relative_rate(self):\n return _TestA_swig.cleanslate_sptr_relative_rate(self)", "async def _refresh_rsi(self, pair: str):\n\n source = self.adjusted_close_values[pair][-config['rsi_size']:]\n deltas = common.math.diff(source)\n\n n = config['rsi_window']\n seed = deltas[:n + 1]\n seed_ups = [value for value in seed if value >= 0]\n seed_downs = [value for value in seed if value < 0]\n up = sum(seed_ups) / n\n down = -sum(seed_downs) / n\n\n try:\n rs = up / down\n except ZeroDivisionError:\n rs = 0\n\n rsi = [0] * len(source)\n rsi[:n] = [100.0 - 100.0 / (1.0 + rs) for _ in range(n)]\n\n for i in range(n, len(source)):\n delta = deltas[i - 1]\n\n if delta > 0:\n upval = delta\n downval = 0.0\n else:\n upval = 0.0\n downval = -delta\n\n up = (up * (n - 1) + upval) / n\n down = (down * (n - 1) + downval) / n\n\n try:\n rs = up / down\n except ZeroDivisionError:\n rs = 0\n\n rsi[i] = 100.0 - 100.0 / (1.0 + rs)\n\n self.relative_strength_indexes[pair] = rsi", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def williams_variable_accum_dist(close, open, high, low, volume):\n range = high - low", "def forward_price(S, t, r):\n return S / np.exp(-r * t)", "def get_blr_strength(self,\n blr: BoundedLinearRegressions) -> float:\n # Return 0 if slopes are different signs.\n if blr.minima_regression.slope * blr.maxima_regression.slope < 0:\n return 0\n\n # Return 0 if not enough data.\n if len(blr.candles) <= 3:\n return 0\n\n # Find high and low prices of the trendline period.\n high_price = max([candle.high for candle in blr.candles])\n low_price = max([candle.low for candle in blr.candles])\n\n # Find start and end of the period.\n start_moment = max([candle.moment for candle in blr.candles])\n end_moment = min([candle.moment for candle in blr.candles])\n\n # Take signal strength to be the average of the two slopes.\n minima_slope_pct = abs(blr.minima_regression.y_of_x(end_moment) - blr.minima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n maxima_slope_pct = abs(blr.maxima_regression.y_of_x(end_moment) - blr.maxima_regression.y_of_x(start_moment)) \\\n / max(0.01, high_price - low_price)\n signal_strength = (minima_slope_pct + maxima_slope_pct) / 2.0\n\n # Scale down signal strength.\n signal_strength = min(1, signal_strength / 5.0)\n\n # Ensure the signal strength has the correct sign.\n if blr.minima_regression.slope < 0:\n signal_strength += -1\n\n return signal_strength", "def calcSmoothedGains(series, shortWind, longWind):\n shortMa = series.rolling(str(shortWind)+'min').mean().shift(-shortWind)\n longMa = series.rolling(str(longWind)+'min').mean().shift(-longWind)\n\n # Calc Buy hold and Sell signals\n buySellRatio = longMa - shortMa\n return buySellRatio", "def update_relative_weight(self):\n self.relative_weight = 1\n # Add up all of the historical cpu datapoints (higher CPU = more weight)\n for i in self.cpu_datapoints:\n self.relative_weight += i\n # Multiply by the status value (so VMs with red alarm have most weight)\n self.relative_weight *= (self.heartbeat_status * 10)", "def referenceIllum(temp, wavelength):\n ct=temp\n if ct <= 0:\n return 0\n if ct < 4000:\n return planckian(ct, wavelength)\n if ct < 5000:\n p=planckian(ct, wavelength)\n d=dseries(ct, wavelength)\n return p+(d-p)*(ct-4000)/1500.0\n return dseries(ct, wavelength)", "def rel_agreement_index(self) -> float:\n a = ((self.predicted - self.true) / self.true) ** 2\n b = np.abs(self.predicted - np.mean(self.true))\n c = np.abs(self.true - np.mean(self.true))\n e = ((b + c) / np.mean(self.true)) ** 2\n return float(1 - (np.sum(a) / np.sum(e)))", "def relative_momentum(self, prices, lookback, n_selection, long_only=False):\n returns = prices.pct_change(periods=lookback).fillna(0)\n rank = returns.rank(axis=1, ascending=False)\n long_signal = (rank <= n_selection).applymap(self.bool_converter)\n short_signal = -(rank >= len(rank.columns) - n_selection + 1).applymap(self.bool_converter)\n if long_only == True:\n signal = long_signal\n else:\n signal = long_signal + short_signal\n return signal", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def getRPSA(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n return getPSA(ChargeSA)/temp", "def RSI(self, window):\n\n close = self.df.close\n\n delta = close.diff().dropna()\n\n self.df[\"change\"] = delta\n\n u = delta * 0\n d = u.copy()\n u[delta > 0] = delta[delta > 0]\n d[delta < 0] = -delta[delta < 0]\n\n u[u.index[window - 1]] = np.mean(u[:window]) # first value is sum of avg gains\n u = u.drop(u.index[: (window - 1)])\n\n d[d.index[window - 1]] = np.mean(d[:window]) # first value is sum of avg losses\n d = d.drop(d.index[: (window - 1)])\n\n rs = (\n pd.DataFrame.ewm(u, com=window - 1, adjust=False).mean()\n / pd.DataFrame.ewm(d, com=window - 1, adjust=False).mean()\n )\n rsi = 100 - 100 / (1 + rs)\n\n self.df[\"rsi\"] = rsi\n\n return rsi", "def gbce_index(self):\n stocks_vwsp = [Stock.get_instance().get_stock_by_symbol(tr.symbol).vwsp for tr in Trade.get_instance()]\n try:\n return (reduce(operator.mul, stocks_vwsp, 1)) ** (1.0/len(stocks_vwsp))\n except ZeroDivisionError:\n return 0.0", "def rsi(df, lag):\n\n def avg_gain():\n gains = [\n df[i][\"c\"] - df[i - 1][\"c\"] if df[i][\"c\"] >= df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_gain = [sum(gains[:lag]) / float(lag)]\n [avg_gain.append(((avg_gain[-1] * 13) + gain) / 14.0) for gain in gains[lag:]]\n return avg_gain\n\n def avg_loss():\n losses = [\n abs(df[i][\"c\"] - df[i - 1][\"c\"]) if df[i][\"c\"] < df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_loss = [sum(losses[:lag]) / float(lag)]\n [avg_loss.append(((avg_loss[-1] * 13) + loss) / 14.0) for loss in losses[lag:]]\n return avg_loss\n\n gains = avg_gain()\n losses = avg_loss()\n\n raw_rsi = [\n round(100 - (100 / (1 + (gains[i] / losses[i]))), 2) for i in range(len(gains))\n ]\n df = df[-1 * len(raw_rsi) :]\n\n return [raw_rsi[i] for i in range(len(df))]", "def loss(actual: float, expect: float) -> float:\n return -(expect * math.log10(actual)\n + (1 - expect) * math.log10(1 - actual))", "def linear_momentum(self):\r\n return self.mass * self.vel", "def rsi_apply_nb(ts, window, ewm, adjust, cache_dict):\n h = hash((window, ewm))\n roll_up, roll_down = cache_dict[h]\n rs = roll_up / roll_down\n return 100 - 100 / (1 + rs)", "def _calculate_strehl(self):\n\n self.strehl = np.exp(-1*((2*np.pi/self.science_wavelength)*self.high_order_wfe)**2)", "def calc_relhum(dewpt,t):\n\n relhum=100.*(np.exp((const.es_Abolton*dewpt)/(const.es_Bbolton+dewpt))/np.exp((const.es_Abolton*t)/(const.es_Bbolton+t)))\n return relhum", "def estimate(self, reps):\n return self.onerm / MaxCalc.coefficients[reps - 1]", "def efficiency_cal(self):\n Temp = 0\n for i in self.supplyseries:\n for j in self.demandseries:\n if(self.shortestpathij(i, j) == None):\n continue\n Temp += 1/self.shortestpathij(i, j)\n \n self.efficiency = 1/(self.supplynum*self.demandnum)*Temp", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)", "def percentageChange(self):\n try:\n curPrice = self.dailyData[-1].currentPrice\n closePrice = self.historicData[-1].closePrice\n except IndexError: # Just return zero when no historic or dailyData is available yet\n return 0.0\n return (curPrice - closePrice)/closePrice * 100", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def __straightness_correction(self):\n self.elapsed_ticks_left, self.elapsed_ticks_right = \\\n read_enc_ticks(self.initial_ticks_left, self.initial_ticks_right)\n\n print(\"L: \" + str(self.elapsed_ticks_left) + \"\\tR: \" + str(self.elapsed_ticks_right))\n\n # Handle invalid encoder readings\n if self.elapsed_ticks_left < 0 and self.elapsed_ticks_right < 0:\n print(\"Bad encoder reading\")\n return (0, 0)\n if self.elapsed_ticks_left > self.elapsed_ticks_right:\n print(\"Right slow\")\n return (-get_inc(self.speed), get_inc(self.speed))\n elif self.elapsed_ticks_left < self.elapsed_ticks_right:\n print(\"Left slow\")\n return (get_inc(self.speed), -get_inc(self.speed))\n else:\n print(\"Equal\")\n return (0, 0)", "def support(stock):\n output= stock_min(stock)+(stock_min(stock)*.05)\n return output", "def twr_ret(self) -> float:\n if float(self.tsdf.iloc[0]) == 0.0:\n raise Exception('First data point == 0.0')\n return float(((self.tsdf.iloc[-1] / self.tsdf.iloc[0]) ** (1 / self.length) - 1) * self.periods_in_a_year)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def spread(self):\n if self._ticks:\n return self._ticks[-1][2] - self._ticks[-1][1]\n else:\n candles = None\n if self._candles.get(Instrument.TF_SEC):\n candles = self._candles[Instrument.TF_SEC]\n elif self._candles.get(60):\n candles = self._candles[Instrument.TF_MIN]\n\n if candles:\n return candles[-1].spread\n\n # or another way to query it\n return 0.0", "def power(self):\r\n return self.model * self.percent / 100", "def calc_mard(df):\n df = add_error_fields(df)\n\n abs_relative_difference_in_measurement_range = df.loc[\n df[\"withinMeasRange\"], \"absRelDiff\"\n ]\n\n return np.mean(abs_relative_difference_in_measurement_range)", "def CalculateSpeedIndex(self):\n time_completeness_list = self.GetTimeCompletenessList()\n prev_completeness = 0.0\n speed_index = 0.0\n prev_time = time_completeness_list[0][0]\n for time, completeness in time_completeness_list:\n # Add the incemental value for the interval just before this event.\n elapsed_time = time - prev_time\n incompleteness = (1.0 - prev_completeness)\n speed_index += elapsed_time * incompleteness\n\n # Update variables for next iteration.\n prev_completeness = completeness\n prev_time = time\n return speed_index", "def relative_rate(self):\n return _spacegrant_swig.invert_bit_sptr_relative_rate(self)", "def net_position(self):\n average_price = 0\n sum = 0\n for transaction in self.transactions:\n average_price += abs(transaction[0]/transaction[1])\n sum += transaction[1]\n\n average_price /= len(self.transactions) \n average_price *= sum\n \n return average_price", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def showAverageGainWon(self) :\n averageGainWon = 0\n for level in self.level_history :\n averageGainWon += level.profit\n averageGainWon = averageGainWon/len(self.level_history)\n Scenario.messageGetAverageGainWon(averageGainWon)", "def get_lift(self):\n return 0.0", "def chaikan_accum_dist(open,high,low,close,volume):\n return ((close - open)/range) * volume\n return volume * ((close-low)) - (high-close))/(high-low)", "def fuel_prediction(self):\n\n return 0", "def acceleration(p,s,damp=0, v=0):\n return -p * s - damp*v", "def fix_mix(orig_wts, mvn_asset_rets, spending_rate, rebal_freq=None):\r\n n_scenarios, time_steps, n_assets = mvn_asset_rets.shape\r\n wealth_index = np.zeros((int(time_steps/12), n_scenarios))\r\n for scenario in range(n_scenarios):\r\n asset_rets = mvn_asset_rets[scenario]\r\n cum_pf_rets_component_wise = orig_wts # Initial weight adopted for first time step\r\n if rebal_freq is None:\r\n for period in range(time_steps):\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * asset_rets[period]\r\n if period % 12 == 0:\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * (1-spending_rate)\r\n wealth_index[int(period/12), scenario] = np.sum(cum_pf_rets_component_wise)\r\n else:\r\n for period in range(time_steps):\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * asset_rets[period]\r\n if period % rebal_freq == 0:\r\n cum_pf_rets_component_wise = np.sum(\r\n cum_pf_rets_component_wise) * orig_wts # Rebalnce occurs at the end of the period\r\n if period % 12 == 0:\r\n cum_pf_rets_component_wise = cum_pf_rets_component_wise * (1 - spending_rate)\r\n wealth_index[int(period / 12), scenario] = np.sum(cum_pf_rets_component_wise)\r\n return wealth_index", "def MAE_rel(self):\n try:\n return(self.MAE / self.price_open)\n except:\n return", "def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))", "def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper", "def stEnergy(frame):\n return np.sum(frame ** 2) / np.float64(len(frame))", "def stEnergy(frame):\n return numpy.sum(frame ** 2) / numpy.float64(len(frame))", "def soundspeed(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n g_tt = liq_g(2,0,temp,pres)\n g_tp = liq_g(1,1,temp,pres)\n g_pp = liq_g(0,2,temp,pres)\n csqinv = (g_tp**2/g_tt - g_pp) / g_p**2\n c = csqinv**(-.5)\n return c", "def stock_price_summary(price_changes):\n\n gains = 0.0\n losses = 0.0\n\n for change in price_changes:\n if change > 0:\n gains += change\n elif change < 0:\n losses += change\n\n return (math.floor(gains*100)/100, math.ceil(losses*100)/100)", "def get_S_r(self):\n\n S_r = np.sum((self.eta_model - self.eta_exp) ** 2.)\n\n return S_r", "def relative_rate(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_relative_rate(self)", "def prevalence_index(self):\n return _div(abs(self.TP - self.TN), self.grand_total)", "def _modulation_index(average_amplitudes: np.ndarray) -> float:\n average_amplitudes = average_amplitudes.astype(np.float64)\n try:\n assert np.all(average_amplitudes > 0), \\\n \"Envelope-derived amplitudes must be positive.\"\n except AssertionError as err:\n raise ValueError(str(err))\n # normalize to something probability-like\n P = normalize(average_amplitudes)\n # computed KL distance: log(N)-H(P), and normalize with log(N)\n return 1.0 - shannon_entropy(P) / np.log(P.size)", "def viral_loss_rate(self):\n return (\n self.viral_decay_rate\n + self.ventilation_rate\n + self.viral_surface_deposition\n )", "def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def rsi(df, periods = 14, ema = True):\n close_delta = df['Close'].diff()\n\n # Make two series: one for lower closes and one for higher closes\n up = close_delta.clip(lower=0)\n down = -1 * close_delta.clip(upper=0)\n \n if ema == True:\n\t # Use exponential moving average\n ma_up = up.ewm(com = periods - 1, adjust=True, min_periods = periods).mean()\n ma_down = down.ewm(com = periods - 1, adjust=True, min_periods = periods).mean()\n else:\n # Use simple moving average\n ma_up = up.rolling(window = periods, adjust=False).mean()\n ma_down = down.rolling(window = periods, adjust=False).mean()\n \n rsi = ma_up / ma_down\n rsi = 100 - (100/(1 + rsi))\n return rsi", "def overall_sensitivity(self):\n if self.mod1:\n s = torch.max(torch.max(self.weight, -1)[0], -1)[0].item()\n else:\n s = torch.max(torch.sqrt(torch.sum(self.weight * self.weight, -1)))[0].item()\n s *= np.sqrt(2. / np.e)\n return s", "def SuperTrend(df, period, multiplier, ohlc=['open', 'high', 'low', 'close']):\n\n ATR(df, period, ohlc=ohlc) \n atr = 'ATR_' + str(period) \n st = 'ST_' + str(period) + '_' + str(multiplier) \n stx = 'STX_' + str(period) + '_' + str(multiplier) \n \"\"\" \n SuperTrend Algorithm : \n BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR \n BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR \n FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND)) \n THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND) \n FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND)) \n THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND) \n SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN \n Current FINAL UPPERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN \n Current FINAL UPPERBAND \n \"\"\" \n # Compute basic upper and lower bands \n df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr] \n df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]\n\n # Compute final upper and lower bands \n df['final_ub'] = 0.00 \n df['final_lb'] = 0.00 \n for i in range(period, len(df)): \n df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df['Close'].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1] \n df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df['Close'].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1] \n # Set the Supertrend value \n df[st] = 0.00 \n for i in range(period, len(df)): \n df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] <= df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] > df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] >= df['final_lb'].iat[i] else 0\n df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] < df['final_lb'].iat[i] else 0.00 \n # Mark the trend direction up/down \n df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)\n\n # Remove basic and final bands from the columns \n df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1) \n df.fillna(0, inplace=True)\n\n return df", "def refractive_index_glass(wavelength, type='soda-lime-low-iron'):\n\n if type.lower() == 'soda-lime-low-iron':\n wavelength = wavelength / 1000\n n = 1.5130 - 0.003169 * wavelength ** 2 + 0.003962 * wavelength ** -2 + 0 * 1j\n\n # n[wavelength < 0.3] = n[wavelength < 0.3] + 1j*0\n elif type.upper() == 'BK7':\n wavelength = wavelength / 1000\n n = np.sqrt(1 + \\\n (1.03961212 * wavelength ** 2) / (\n wavelength ** 2 - 0.00600069867) + \\\n (0.231792344 * wavelength ** 2) / (\n wavelength ** 2 - 0.0200179144) + \\\n (1.01046945 * wavelength ** 2) / (\n wavelength ** 2 - 103.560653)\n )\n\n return n", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def effectiveness(self):\n self._effectiveness = 0.20 * self.ANA + 0.20 * self.DAM + 0.20 * self.MOA + 0.20 * self.MFA + 0.20 * self.NOP\n return round(self._effectiveness, 5)", "def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)", "def coefficient(self) -> float:\n ...", "def calculate(self) -> float:", "def getWNSA1(ChargeSA):\n temp = 0.0\n for i in ChargeSA:\n temp = temp+i[2]\n if temp == 0.0:\n return 0.0\n\n return getPNSA1(ChargeSA)*temp/1000", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def opamp_gain(R1, Rf):\n R1, Rf = map(_normalizevalue, (R1, Rf))\n gain = 1 + (Rf/R1)\n return gain", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def water_evapotranspiration_flux(evap):\n return evap * (-1)", "def momentum_indicators(df):\n p = \"mom_\"\n high, low, close = convert_df_to_features(df, False)\n # AO\n i = AwesomeOscillatorIndicator(high, low, 10, 70)\n df[p + \"ao_10_log\"] = list(map(lambda x: uf.log_abs(x,\n zeros=True), i.awesome_oscillator()))\n # PPO\n i = PercentagePriceOscillator(close, 40, 20, 10)\n df[p + \"ppo_10_signal_log\"] = list(\n map(lambda x: uf.log_abs(x * 1000), i.ppo_signal()))\n i = PercentagePriceOscillator(close, 120, 60, 30)\n df[p + \"ppo_30_hist\"] = i.ppo_hist()\n # ROC\n i = ROCIndicator(close, 50)\n df[p + \"roc_50_log\"] = list(map(lambda x: uf.log_abs(x *\n 100, zeros=True), i.roc()))\n # RSI\n i = RSIIndicator(close, 30)\n df[p + \"rsi_30\"] = i.rsi()\n # SR\n i = StochasticOscillator(close, high, low, 54, 9)\n df[p + \"sr_9_signal\"] = i.stoch_signal()\n # SRSI\n i = StochRSIIndicator(close, 90, 15, 15)\n df[p + \"srsi_15_k\"] = i.stochrsi_k()\n i = StochRSIIndicator(close, 180, 30, 30)\n df[p + \"srsi_30\"] = i.stochrsi()\n i = StochRSIIndicator(close, 60, 10, 10)\n df[p + \"srsi_10_d\"] = i.stochrsi()\n # TSI\n i = TSIIndicator(close, 40, 20)\n df[p + \"tsi_20_log\"] = list(map(lambda x: uf.log_abs(x *\n 100, zeros=True), i.tsi()))\n # WR\n i = WilliamsRIndicator(high, low, close, 50)\n df[p + \"wr_50\"] = i.williams_r()\n return df" ]
[ "0.6922888", "0.6838217", "0.6272428", "0.61821586", "0.5993364", "0.5774363", "0.5760759", "0.5744267", "0.57409", "0.5716144", "0.5701222", "0.57010186", "0.56948054", "0.5676221", "0.56436366", "0.5643337", "0.5643337", "0.5614214", "0.55853677", "0.55841684", "0.5581936", "0.55774474", "0.5549964", "0.55470043", "0.55348694", "0.55343586", "0.5533847", "0.5531412", "0.5526525", "0.55263186", "0.5493273", "0.5482266", "0.54673034", "0.5458768", "0.545713", "0.5454392", "0.5451182", "0.5443221", "0.54420775", "0.54127336", "0.5408959", "0.5406839", "0.53961647", "0.5394579", "0.5388058", "0.5382208", "0.53796476", "0.5378219", "0.53748035", "0.5354535", "0.5348853", "0.5347424", "0.53418684", "0.5340064", "0.5338784", "0.53383714", "0.5332317", "0.5322452", "0.53219575", "0.53211683", "0.5315042", "0.53123313", "0.5310711", "0.5307556", "0.53035724", "0.52998245", "0.5288882", "0.52831435", "0.5282047", "0.52793336", "0.5273504", "0.5270666", "0.52699125", "0.5269398", "0.5267078", "0.5266856", "0.5265758", "0.5265314", "0.5260696", "0.5244107", "0.524261", "0.52365017", "0.52307725", "0.5230171", "0.5226024", "0.5224373", "0.5221757", "0.5213", "0.52128583", "0.5208712", "0.5207482", "0.52067375", "0.5206396", "0.5199626", "0.5194419", "0.51835144", "0.5181048", "0.51791275", "0.5175227", "0.5172125" ]
0.655698
2
MACD Moving Average Convergence/Divergence
def MovingAverageConvergenceDivergence(self, fastperiod=12, slowperiod=26,signalperiod=9): df = pd.DataFrame() df['macd'], df['signal'], df['history'] = ta.MACDFIX(self.data.close, 9) return df[-30:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeMACD(x, slow=26, fast=12):\n emaslow = ExpMovingAverage(x, slow)\n emafast = ExpMovingAverage(x, fast)\n return emaslow, emafast, emafast - emaslow", "def computeMACD(x, slow=26, fast=12):\n emaslow = ExpMovingAverage(x, slow)\n emafast = ExpMovingAverage(x, fast)\n return (emaslow, emafast, emafast - emaslow)", "def MACD(DF,a,b,c):\n df = DF.copy()\n df[\"MA_Fast\"]=df[\"Adj Close\"].ewm(span=a,min_periods=a).mean()\n df[\"MA_Slow\"]=df[\"Adj Close\"].ewm(span=b,min_periods=b).mean()\n df[\"MACD\"]=df[\"MA_Fast\"]-df[\"MA_Slow\"]\n df[\"Signal\"]=df[\"MACD\"].ewm(span=c,min_periods=c).mean()\n df.dropna(inplace=True)\n return df", "def calc_mard(df):\n df = add_error_fields(df)\n\n abs_relative_difference_in_measurement_range = df.loc[\n df[\"withinMeasRange\"], \"absRelDiff\"\n ]\n\n return np.mean(abs_relative_difference_in_measurement_range)", "def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)", "def macd(df):\n if not isinstance(df, pd.DataFrame):\n raise ValueError(\"df must by a Data Frame\")\n \n df['Percent Change'] = df['Adj Close'].pct_change()\n exp1 = df['Adj Close'].ewm(span=12, adjust=False).mean()\n exp2 = df['Adj Close'].ewm(span=26, adjust=False).mean()\n macd = exp1 - exp2\n macd_signal = macd.ewm(span=9, adjust=False).mean()\n return macd, macd_signal", "def macd(df, close_price_col_name=\"Close\"):\r\n\t# Add column to store value of MACD\r\n\tdf['Dif'] = df[close_price_col_name].ewm(span=12).mean() - df[close_price_col_name].ewm(span=26).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[\"Dif\"], label=\"Moving Average Convergence/Divergence (MACD)\")\r\n\tplt.title(\"Visualization of Moving Average Convergence/Divergence\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[\"Dif\"] # delete the WMA column for re-graphing\r", "def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume", "def mad(v):\n return np.mean(np.abs(v - np.mean(v)))", "def compute_MAE(e):\n\n return np.mean(np.abs(e))", "def moving_avg_COVID19(self):\r\n \r\n # Calculate moving weekly averages and range. Use the total range of the pandemic\r\n \r\n # First month of outbreak (that we have data for)\r\n first_month = min(self.day_mort_gov[0].month,self.day_m_google[0].month)\r\n # First day of outbreak\r\n first_day = min(self.day_mort_gov[0].day,self.day_m_google[0].day)\r\n self.outbreak_start_date = datetime.datetime(2020,first_month,first_day)\r\n # Last day of data\r\n last_data_month = max(self.day_mort_gov[-1].month,self.day_m_google[-1].month)\r\n last_data_day = max(self.day_mort_gov[-1].month,self.day_m_google[-1].month)\r\n self.outbreak_last_data_date = datetime.datetime(2020,last_data_day,last_data_day)\r\n \r\n self.num_days_outbreak = (self.outbreak_last_data_date-self.outbreak_start_date).days\r\n \r\n\r\n \r\n # Get days and data on days\r\n self.outbreak_obs_days = np.zeros(self.num_days_outbreak ,dtype=datetime.datetime)\r\n self.outbreak_obs_days[0] = self.outbreak_start_date\r\n self.t_outbreak = np.arange(0,self.num_days_outbreak,step=1)\r\n self.R_data_daily = np.nan*np.ones(self.num_days_outbreak )\r\n self.m_data_daily = np.nan*np.ones(self.num_days_outbreak )\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if day > 0:\r\n \r\n self.outbreak_obs_days[day] = self.outbreak_obs_days[day-1] + datetime.timedelta(days=1)\r\n \r\n for day2 in range(0,len(self.day_mort_gov)):\r\n \r\n if (self.outbreak_obs_days[day].day == self.day_mort_gov[day2].day and self.outbreak_obs_days[day].month == self.day_mort_gov[day2].month):\r\n\r\n self.R_data_daily[day] = self.R_per_day_gov[day2]\r\n \r\n \r\n break\r\n \r\n for day3 in range(0,len(self.day_m_google)):\r\n \r\n if (self.outbreak_obs_days[day].day == self.day_m_google[day3].day and self.outbreak_obs_days[day].month == self.day_m_google[day3].month):\r\n \r\n self.m_data_daily[day] = self.m_google[day3]\r\n \r\n \r\n break\r\n \r\n \r\n # Get weekly sets\r\n \r\n # Firstly we find weeks\r\n self.num_weeks_outbreak = 0\r\n\r\n for day in range(0,self.num_days_outbreak):\r\n\r\n if self.outbreak_obs_days[day].weekday() == 0:\r\n \r\n \r\n if day + 7 < self.num_days_outbreak-1:\r\n \r\n self.num_weeks_outbreak = self.num_weeks_outbreak + 1\r\n \r\n # Next find specific date for week\r\n self.outbreak_obs_weekly = np.zeros(self.num_weeks_outbreak,dtype=datetime.datetime)\r\n self.R_week_50 = np.nan*np.ones(self.num_weeks_outbreak)\r\n self.R_week_95 = np.nan*np.ones((2,self.num_weeks_outbreak))\r\n self.m_week_50 = np.nan*np.ones(self.num_weeks_outbreak)\r\n self.m_week_95 = np.nan*np.ones((2,self.num_weeks_outbreak))\r\n \r\n \r\n week = 0\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if self.outbreak_obs_days[day].weekday() == 0:\r\n \r\n \r\n if day + 7 < self.num_days_outbreak-1:\r\n self.outbreak_obs_weekly[week] = self.outbreak_obs_days[day] + (self.outbreak_obs_days[day+7] - self.outbreak_obs_days[day])/2\r\n self.R_week_95[0,week] = np.percentile(self.R_data_daily[day:day+8],5)\r\n self.R_week_95[1,week] = np.percentile(self.R_data_daily[day:day+8],95) \r\n \r\n self.R_week_50[week] = np.percentile(self.R_data_daily[day:day+8],50)\r\n self.R_week_95[0,week] = np.percentile(self.R_data_daily[day:day+8],5)\r\n self.R_week_95[1,week] = np.percentile(self.R_data_daily[day:day+8],95) \r\n self.m_week_95[0,week] = np.percentile(self.m_data_daily[day:day+8],5)\r\n self.m_week_95[1,week] = np.percentile(self.m_data_daily[day:day+8],95) \r\n self.m_week_50[week] = np.percentile(self.m_data_daily[day:day+8],50) \r\n \r\n week = week + 1\r\n \r\n \r\n \r\n # Get Monthly sets\r\n # Firstly we find weeks\r\n \r\n self.num_months_outbreak = 0\r\n \r\n current_month = -1\r\n\r\n for day in range(0,self.num_days_outbreak):\r\n\r\n if self.outbreak_obs_days[day].month > current_month:\r\n \r\n current_month = self.outbreak_obs_days[day].month\r\n num_days_in_month = (datetime.datetime(2020,current_month+1,1))-datetime.datetime(2020,current_month,1) \r\n self.num_months_outbreak = self.num_months_outbreak + 1\r\n \r\n \r\n # Next find specific date for week\r\n self.outbreak_obs_months = np.zeros(self.num_months_outbreak,dtype=datetime.datetime)\r\n \r\n self.R_month_50 = np.nan*np.ones(self.num_months_outbreak)\r\n self.R_month_95 = np.nan*np.ones((2,self.num_months_outbreak))\r\n self.m_month_50 = np.nan*np.ones(self.num_months_outbreak)\r\n self.m_month_95 = np.nan*np.ones((2,self.num_months_outbreak))\r\n \r\n \r\n current_month = -1\r\n month = 0\r\n \r\n for day in range(0,self.num_days_outbreak):\r\n \r\n if self.outbreak_obs_days[day].month > current_month: \r\n \r\n current_month = self.outbreak_obs_days[day].month\r\n dmonth = datetime.datetime(2020,current_month+1,1)-datetime.datetime(2020,current_month,1)\r\n self.outbreak_obs_months[month] = self.outbreak_obs_days[day] + (datetime.datetime(2020,current_month+1,1)-datetime.datetime(2020,current_month,1))/2\r\n num_days_in_month = min(day+dmonth.days,self.num_days_outbreak)\r\n self.R_month_95[0,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],5)\r\n self.R_month_95[1,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],95)\r\n self.R_month_50[month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],50)\r\n self.R_month_95[0,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],5)\r\n self.R_month_95[1,month] = np.nanpercentile(self.R_data_daily[day:num_days_in_month],95) \r\n self.m_month_95[0,month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],5)\r\n self.m_month_95[1,month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],95) \r\n self.m_month_50[month] = np.nanpercentile(self.m_data_daily[day:num_days_in_month],50) \r\n \r\n month = month + 1\r\n \r\n return", "def monthly_avg(dacycle,avg):\n \n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n\n daydir = os.path.join(analysisdir , 'data_%s_daily'%avg)\n monthdir = os.path.join(analysisdir,'data_%s_monthly'%avg)\n\n if not os.path.exists(monthdir):\n print \"Creating new output directory \" + monthdir\n os.makedirs(monthdir)\n\n\n files = os.listdir(daydir) # get daily files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if len(files) < 28:\n print 'No month is yet complete, skipping monthly average'\n return\n\n fileinfo = {}\n for filename in files: # parse date from each of them\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m-%d')\n fileinfo[filename] = date\n\n years = [d.year for d in fileinfo.values()] # get actual years\n months = set([d.month for d in fileinfo.values()]) # get actual months\n \n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(months=+1)\n\n ndays_in_month = (nd-sd).days\n \n avg_files = [os.path.join(daydir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if len(avg_files) != ndays_in_month: # only once month complete \n #print 'New month (%02d) is not yet complete, skipping monthly average'%(sd.month)\n pass\n else:\n targetfile = os.path.join(monthdir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y-%m')))\n if not os.path.exists(targetfile):\n print \"New month (%02d) is complete, I have %d days for the next file\"%(sd.month,ndays_in_month)\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n else:\n pass\n\n sd = nd", "def calculate_decay_metric(discont_lst, city_data_lst, roots_lst):\n \n\n discont_indices = get_discont_indices(city_lst) #indices of discontinuities in the city data\n \n real_disconts = []\n for i in range(len(discont_indices) - 1): #for each discontinuity except for the last one (we'll take care of it below)\n upper = discont_indices[i + 1] \n lower = discont_indices[i]\n real_disconts += [upper - lower] # add to real disconts length between the i-th and the i+1-th discontinuity\n \n \n real_disconts += [len(city_data_lst) - 1 - discont_indices[len(discont_indices) - 1]] # the last discont len\n \n\n \"\"\"(2) Creatingthe ideal disconts based on the ideal decay coefficients\"\"\"\n \n ideal_disconts = [] \n decay_coeff = roots_lst[len(discont_indices) + 1] #decay coefficient that generates our ideal geometric discontinuity distribution\n \n for k in range(1, len(discont_indices) + 2): #for each number in the list of \n ideal_disconts += [len(discont_lst) * decay_coeff**k] #add ideal discont to list\n \n\n \"\"\"(3) calculates the final average of the pairwise differences between the ideal distribution\n of discontinuities and the real distribution of discontinuities\"\"\"\n \n pairwise_diffs = 0\n for j in range(len(ideal_disconts)): #for each j indexing the number of ideal discontinuities \n pairwise_diffs += abs(real_disconts[j] - ideal_disconts[j]) #calculates difference between the indexes of ideal and real discontinuities\n\n return pairwise_diffs / (len(discont_indices) + 1) #returns pairwise differences normalized by number of discontinuities\n \n \n \"\"\"Calculates the decay metric over each city dataset in the sample.\n \n inputs:\n - discont_data array of discontinuity data for the cities in question\n - city_data: array of raw patch data for cities in question\n - root_lst: sufficiently large list of roots for the number of discontinuities in question \n \n \"\"\"\n \n def decay_metric(discont_lst, city_data, root_lst):\n outer = [] #outer layer indexed by city\n i = 0\n while i < len(city_data): \n inner = []\n j = 0\n while j < len(nice_lst[i]): #inner layer indexed by time\n inner += [calculate_decay_metric(discont_data[i][j], city_data[i][j], root_lst)] #calculate decay metric\n j += 1\n i += 1\n outer += [inner]\n return np.array(final) #convert to np array and return", "def mavg_cr(params : Dict[str, int], dataParams : Dict[str, str], logger : logging = None) -> pd.DataFrame:\n \"\"\" params.S : short period \"\"\"\n \"\"\" params.L : long period \"\"\"\n \"\"\" Long when short mvag cross over the long mvag from below, vice versa. \"\"\"\n if logger is None:\n logger = setup_logger(\"mavg_logger\")\n\n data = pd.DataFrame()\n try:\n exch, sym, freq, resample_to = dataParams[\"exch\"], dataParams[\"sym\"], dataParams[\"freq\"], dataParams[\"resample_to\"]\n data = load_symbol(exch, sym, freq, resample_to)\n except KeyError as e:\n logger.error(f\"Couldn't load data for strategy mavg with data {dataParams}, {e}.\")\n else:\n logger.info(\"Loaded data for strategy mavg {params}.\")\n\n try:\n S, L, D = params[\"S\"], params[\"L\"], params[\"D\"]\n except KeyError as e:\n logger.error(f\"No defined S/L in mavg, {e}.\")\n else:\n if \"close\" in data.columns:\n close = pd.DataFrame({\"close\" : data[\"close\"]})\n s_avg = close.rolling(window=S, min_periods=int(S/2)).mean()\n l_avg = close.rolling(window=L, min_periods=int(L/2)).mean()\n data[\"short\"] = s_avg[\"close\"]\n data[\"long\"] = l_avg[\"close\"]\n data[\"signal\"] = 0.0\n data.loc[data[\"short\"].shift(D) > data[\"long\"].shift(D), \"signal\"] = 1.0\n data.loc[data[\"short\"].shift(D) < data[\"long\"].shift(D), \"signal\"] = -1.0\n\n return data", "def get_vwmacd(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.VW_MACD(data)\n if result is None:\n raise IndicatorException\n return result", "def calculate_clim_anoms(var, var_dates):\n d_counts=[]\n var_clim = np.zeros_like(var)\n var_climstd = np.zeros_like(var)\n for m in range(1,13): #for each month\n mo_ind = (var_dates[1,:]==m)\n day_options = np.unique(var_dates[2,mo_ind])\n \n #print(day_options) #for diagnostics \n for d in range(0,np.size(day_options)): #for each possible day\n d_ind = (mo_ind) & (var_dates[2,:]==day_options[d])\n\n var_days = var[:,:,d_ind]\n var_daysav = np.nanmean(var_days,2)\n var_daysstd = np.nanstd(var_days,2)\n \n var_clim[:,:,d_ind] = np.transpose(np.tile(var_daysav,(np.sum(d_ind),1,1)),(1,2,0))\n var_climstd[:,:,d_ind] = np.transpose(np.tile(var_daysstd,(np.sum(d_ind),1,1)),(1,2,0))\n \n d_counts.append(np.sum(d_ind)) #this is just for diagnostics\n \n var_anom = var - var_clim\n var_anom_scaled = var_anom/var_climstd\n \n return var_anom, var_anom_scaled;", "def MAD(X):\n return np.median(np.abs(X - np.median(X)))", "def moving_average(data, beta):\n avg = 0\n maverages = []\n for i in range(len(data)):\n avg = avg * beta + (1 - beta) * data[i]\n maverages.append(avg / (1 - (beta ** (i + 1))))\n return maverages", "def moving_average_convergence(x, nslow=8, nfast=4):\n NO2_emafast,NO2_emaslow,NO2_macd = opensensor.getMovingAverageConvergence(x, nslow, nfast)\n return pandas.Series(NO2_macd)", "def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda", "def Moving_Average_System(signal,M = 10):\t\t\t\t\t\t\t\t# Function of Moving Average System using Ideal Delay System\n\tp,q,s = M,signal.shape[0]- M,signal.shape[0]\n\tsignal_new = np.zeros(s+M)\n\t\n\tfor i in range(M+1):\n\t\tsignal_new[M-i:M-i+s] += Signal_Ideal_Delay(signal,d=i)[0]\n\t\t\n\tsignal_new = signal_new/(M + 1)\t\t\n\ttime = np.arange(0,s+M)\n\t\n\treturn signal_new,time", "def calculate_mae(e):\n return np.mean(np.abs(e))", "def moving_average_convergence(data, nslow=26, nfast=12):\n\ttry:\n\t\tx = data['Adj Close'].tolist()\n\texcept:\n\t\tx = data.tolist()\n\n\temaslow = moving_average(x, nslow, type='exponential')\n\temafast = moving_average(x, nfast, type='exponential')\n\treturn emafast - emaslow", "def estimate_bpm(D):\n if len(D) < 2*ignore:\n return 0\n else:\n return 1/np.mean(np.diff(D))*60", "def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp", "def MACD_mod(df,nl=12,nh=26,nsig=9):\n # Get just the adjusted close\n close = df['Adj Close']\n mal = close.ewm(span=nl).mean()\n mah = close.ewm(span=nh).mean()\n macd = mal-mah\n sig = macd.ewm(span=nsig).mean()\n \n df['MACD'] = macd-sig\n return df", "def strategies(df, days, MA=False, EMA=False, MACD=False):\r\n if MA == True:\r\n # simple moving average\r\n df[\"MA\"] = df[\"Adj Close\"].rolling(window=days).mean()\r\n\r\n if EMA == True:\r\n # exponential moving average\r\n df[\"EMA\"] = df[\"Adj Close\"].ewm(span=days).mean()\r\n\r\n if MACD == True:\r\n # exponential moving average\r\n df[\"EMA_26\"] = df[\"EUR/USD.Adjusted\"].ewm(span=26).mean()\r\n df[\"EMA_126\"] = df[\"EUR/USD.Adjusted\"].ewm(span=126).mean()\r\n\r\n return df", "def strategies(df, days, MA=False, EMA=False, MACD=False):\r\n if MA == True:\r\n # simple moving average\r\n df[\"MA\"] = df[\"Adj Close\"].rolling(window=days).mean()\r\n\r\n if EMA == True:\r\n # exponential moving average\r\n df[\"EMA\"] = df[\"Adj Close\"].ewm(span=days).mean()\r\n\r\n if MACD == True:\r\n # exponential moving average\r\n df[\"EMA_26\"] = df[\"EUR/USD.Adjusted\"].ewm(span=26).mean()\r\n df[\"EMA_126\"] = df[\"EUR/USD.Adjusted\"].ewm(span=126).mean()\r\n\r\n return df", "def compute_mae(e):\n return np.mean(np.abs(e))", "def calculate_msd(self, ensemble=False):\n\n print('Calculating MSD...', end='', flush=True)\n start = timer.time()\n self.msd = timeseries.msd(self.z_interpolated.T[..., np.newaxis], 0, ensemble=ensemble, nt=self.nt).T\n print('Done in %.3f seconds' % (timer.time() - start))", "def DAM(self):\n return self.get_class_average(self.DAM_class_level)", "def mom(x):\n with mp.extradps(5):\n x = _validate_x_bounds(x, low=0, high=1,\n strict_low=True, strict_high=True)\n M1 = _mean(x)\n M2 = _mean([t**2 for t in x])\n c = (M1 - M2) / (M2 - M1**2)\n a = M1*c\n b = (1 - M1)*c\n return a, b", "def test_period_average():\n\n time_point = datetime(2012, 12, 31)\n period = 25\n spy = DEFAULT_ASSET_FACTORY.make_asset(\"SPY\")\n\n weatherman = weathermen.period_average(CALENDAR)\n forecast = weatherman(DEFAULT_ASSET_FACTORY, time_point, period)\n\n assert is_close(forecast.cagr(spy), .152)", "def MACD(df_dict, a=12 ,b=26, c=9):\n for df in df_dict:\n df_dict[df][\"ma_fast\"] = df_dict[df][\"close\"].ewm(span=a, min_periods=a).mean()\n df_dict[df][\"ma_slow\"] = df_dict[df][\"close\"].ewm(span=b, min_periods=b).mean()\n df_dict[df][\"macd\"] = df_dict[df][\"ma_fast\"] - df_dict[df][\"ma_slow\"]\n df_dict[df][\"signal\"] = df_dict[df][\"macd\"].ewm(span=c, min_periods=c).mean()\n df_dict[df].drop([\"ma_fast\",\"ma_slow\"], axis=1, inplace=True)", "def total_smear(self, DM):\n return sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*self.downsamp)**2.0 +\n self.BW_smearing**2.0 +\n self.sub_smearing**2.0 +\n self.chan_smear(DM)**2.0)", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()", "def get_MAE(prediction, actual_values):\n prediction_vector = prediction.flatten()\n diffs = np.abs(prediction_vector - actual_values)\n\n return np.mean(diffs)", "def ma(df, close_price_col_name=\"Close\", ma_col_name=\"MA\"):\r\n\r\n\t# Check N positive integer\r\n\twhile True:\r\n\r\n\t\tN = input(\"Please input period for moving average model (a positive integer (recommend: 10, 20, 50, 100, or 200 )): \")\r\n\r\n\t\ttry:\r\n\t\t\tif int(N) > 0:\r\n\t\t\t\tbreak\r\n\r\n\t\t\telif \".\" in N:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a float \")\r\n\t\t\t\tcontinue\r\n\r\n\t\t\telif int(N) < 0:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a negative one \")\r\n\t\t\t\tcontinue\r\n\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Please input a positive integer, not a string\")\r\n\t\t\tcontinue\r\n\r\n\t# Add column to store value of MA\r\n\tdf[ma_col_name] = df[close_price_col_name].rolling(window=int(N), min_periods=0).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[ma_col_name], label=\"Moving average \" + N + \" days\")\r\n\tplt.title(\"Visualization of Moving Average \" + N + \" days\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[ma_col_name] # delete the MA column for re-graphing\r", "def mae(actual, predicted):\n rms = np.abs(actual-predicted)\n\n # Returning the sqaure root of the root mean square\n return float(rms.mean())", "def addMovingAverages(self):\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n # calculate cumulative moving average\n self.df['cma'] = self.df.close.expanding().mean()\n\n # calculate exponential moving averages\n self.df['ema12'] = self.df.close.ewm(span=12, adjust=False).mean()\n self.df['ema26'] = self.df.close.ewm(span=26, adjust=False).mean()\n\n # calculate simple moving averages\n self.df['sma20'] = self.df.close.rolling(20, min_periods=1).mean()\n self.df['sma50'] = self.df.close.rolling(50, min_periods=1).mean()\n self.df['sma200'] = self.df.close.rolling(200, min_periods=1).mean()", "def std_mc_acc(ground_truth, prediction):\n y_ok = prediction == ground_truth\n acc = []\n for unique_y in np.unique(ground_truth):\n acc.append(np.sum(y_ok[ground_truth == unique_y]) * 1.0 / np.sum(ground_truth == unique_y))\n return np.mean(acc)", "def test_deconvolve_to_motor_error(self):\n tau = 50.0\n mrate = 50.0\n Mrate = 100.0\n\n tmax = 50.0\n dt = 0.1\n\n self.rule.tau = tau\n self.rule.min_rate = mrate\n self.rule.max_rate = Mrate\n self.rule.compress_rates = False\n self.rule.gain = 1\n self.rule.tau_deconv1 = tau\n\n self.motor.error_fct = lambda _: np.ones(self.Nsrc)\n\n M = simulation.StateMonitor(self.rule, 'out')\n\n sim = simulation.Simulation(self.source, self.motor, self.rule, M, dt=dt)\n sim.run(tmax)\n \n # the output should be almost constant\n self.assertAlmostEqual(np.std(M.out)/np.mean(M.out), 0)", "def test_KDA_approximate_montecarlo_convergence(testdata_cbma_full):\n est_a = KDA(null_method=\"approximate\")\n n_iters = 10\n est_e = KDA(null_method=\"montecarlo\", n_iters=n_iters)\n res_a = est_a.fit(testdata_cbma_full)\n res_e = est_e.fit(testdata_cbma_full)\n # Get smallest p-value above 0 from the montecarlo estimator; above this,\n # the two should converge reasonably closely.\n min_p = 1 / n_iters\n p_idx = res_e.maps[\"p\"] > min_p\n p_approximate = res_a.maps[\"p\"][p_idx]\n p_montecarlo = res_e.maps[\"p\"][p_idx]\n # Correlation must be near unity and mean difference should be tiny\n assert np.corrcoef(p_approximate, p_montecarlo)[0, 1] > 0.98\n assert (p_approximate - p_montecarlo).mean() < 1e-3", "def MACD(df_dict, a=12, b=26, c=9):\r\n for df in df_dict:\r\n df_dict[df][\"ma_fast\"] = df_dict[df][\"close\"].ewm(span=a, min_periods=a).mean()\r\n df_dict[df][\"ma_slow\"] = df_dict[df][\"close\"].ewm(span=b, min_periods=b).mean()\r\n df_dict[df][\"macd\"] = df_dict[df][\"ma_fast\"] - df_dict[df][\"ma_slow\"]\r\n df_dict[df][\"signal\"] = df_dict[df][\"macd\"].ewm(span=c, min_periods=c).mean()\r\n df_dict[df].drop([\"ma_fast\", \"ma_slow\"], axis=1, inplace=True)", "def get_convergence_episode(self):\n values = self.stats['return_stats']['episode_totals']\n _, y, (y_lower, _) = self._moving_average(\n values, window=_ROLLING_WINDOW, p=_CONFIDENCE_LEVEL)\n # The convergence is established as the first time the average return\n # is above the lower bounds of the final return.\n first_episode = max(np.argmax(y >= y_lower[-1]), 1)\n return first_episode", "def MACD(prices, slow, fast, signal):\r\n emaslow = expMovingAverage(prices, slow)\r\n emafast = expMovingAverage(prices, fast)\r\n emasignal = expMovingAverage(prices, signal )\r\n return emaslow, emafast, emafast - emaslow, emasignal", "def compute_mape(self, data):\n return float(100) / len(self.ground_truth) * np.sum(np.abs((self.ground_truth.value - data.value) / self.ground_truth.value))", "def mad(X,c=0.6744897501960817,**kwargs):\n \n s = np.median(np.abs(X - np.median(X,axis=0)),axis=0)/c\n s = np.array(s).reshape(-1)\n # statsmodels.robust.mad is not as flexible toward matrix input, \n # sometimes throws a value error in ufunc\n return s", "def compute_advantage_montecarlo(V, s, ss, r, absorbing, gamma):\n r = r.squeeze()\n q = np.zeros(len(r))\n v = V(s).squeeze()\n\n q_next = V(ss[-1]).squeeze().item()\n for rev_k in range(len(r)):\n k = len(r) - rev_k - 1\n q_next = r[k] + gamma * q_next * (1. - absorbing[k])\n q[k] = q_next\n\n adv = q - v\n return q[:, np.newaxis], adv[:, np.newaxis]", "def calc_mad(a,b):\n comb = a + b\n idx = np.array(range(len(a)))[~np.isnan(comb)]\n a1=a[idx]\n b1=b[idx]\n N = len(a1)\n mad = np.sum(np.abs(a1-b1))/N\n return mad", "def SimpleMovingAverage(self, timeperiod = 14): \r\n return ta.SMA(self.data.close,timeperiod)", "def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower", "def get_macd(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.MACD(data)\n if result is None:\n raise IndicatorException\n return result", "def update_and_calculate(self, value):\r\n retval = -1\r\n diff = abs(self.ewma - value)\r\n if self.n >= 5: # only calculate meandevs if collected > 5 data pts.\r\n if self.ewmmd > 0:\r\n meandevs = diff/self.ewmmd\r\n else:\r\n meandevs = diff/.00001\r\n retval = meandevs\r\n \r\n # update ewma/ewmmd\r\n self.n += 1\r\n if self.n > 1:\r\n if self.n > 2:\r\n self.ewmmd = (.125*diff) + (.875*self.ewmmd)\r\n else:\r\n self.ewmmd = diff\r\n self.ewma = (.125*value) + (.875*self.ewma)\r\n else:\r\n self.ewma = value\r\n return retval", "def monthly_avgs(HC):\n monthlies = np.zeros((int(len(HC)/12),12))\n counter_m = 0 #keeps track of years\n counter_n = 0 #keeps track of months\n for i in range(len(HC)):\n if counter_n<12:\n monthlies[counter_m,counter_n] = HC[i]\n counter_n += 1\n else:\n counter_m += 1\n monthlies[counter_m,0] = HC[i]\n counter_n = 1\n monthly_avgs = np.zeros((12))\n months = np.zeros((12))\n for i in range(12):\n monthly_avgs[i] = np.mean(monthlies[:,i])\n months[i] = i+1\n \n return months, monthly_avgs", "def get_evmacd(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.EV_MACD(data)\n if result is None:\n raise IndicatorException\n return result", "def compute_momentum_signals(self, series):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n t1 = self.compute_EMA(temp, num_days=12)\n t2 = self.compute_EMA(temp, num_days=26)\n MACD = t1-t2\n signal_line = self.compute_EMA(MACD, num_days=9)\n return MACD, signal_line", "def calculateLatestThreeDayMA(self, closingPrices):\n return ((closingPrices[0]+closingPrices[1]+closingPrices[2])/3)", "def mch_approximation( samples, dlamda ):\n dE = calc_e(samples,dlamda)\n dE -= dE.min()\n ZFraction = 1. / np.mean(np.exp(-dE))\n predsisj = pair_corr( samples, weights=np.exp(-dE)/len(dE) )[1] * ZFraction \n assert not (np.any(predsisj<-1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj", "def sma(self, normalize=False, window=20):\n adj_close = self.daily['Adj Close']\n if normalize: adj_close = self.normalize(adj_close)\n sma = adj_close.rolling(window).mean()\n return sma", "def cal_beam_MADMFD(infile):\n\n data = np.loadtxt(infile)\n maxfdensity = data[:,8]\n mad_maxfdensity = round(median_absolute_deviation(maxfdensity), 3)\n \n return mad_maxfdensity", "def RMSD(ccdata1, ccdata2):\n natom = ccdata1.natom\n rmsd = 0.0\n maxdiff = 0.0\n for i in range(natom):\n diff = norm(ccdata1.atomcoords[0][i] - ccdata2.atomcoords[0][i])\n rmsd += diff\n if diff > maxdiff:\n maxdiff = diff\n\n rmsd /= natom\n\n return rmsd, maxdiff", "def mad(errors):\n med = np.median(errors)\n return np.median(np.abs(errors - med))", "def calc_conf(deviation, tolerance, mape):\n return (1 - ((mape / 100) * (deviation/tolerance))) * 100", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]", "def smape(actual: np.ndarray, predicted: np.ndarray):\n return np.mean(np.divide(np.abs(actual - predicted), (np.abs(actual) + np.abs(predicted) + np.finfo(float).eps) * 0.5))", "def calcDVavg(supplyvol, demandvol):\n dvavg = (supplyvol - demandvol)/(0.5 * (supplyvol + demandvol))\n return dvavg", "async def update_mas(self, pair: str):\n\n for window in config['ma_windows']:\n try:\n num = self.last_update_nums[pair]\n source = self.adjusted_close_values[pair]\n ma = self.source_close_value_mas[pair][window]\n source_len = len(source)\n\n for index in range(source_len - num, source_len):\n average = sum(source[index - window:index]) / window\n ma.append(average)\n\n truncate = len(ma) - self.min_tick_length\n if truncate > 60:\n del ma[:truncate]\n\n self.close_value_mas[pair][window] = ma\n\n except IndexError:\n self.log.error('Cannot update MA {} for {} with data length of {}!',\n window, pair, len(self.adjusted_close_values[pair]))\n\n for window in config['vdma_windows']:\n try:\n num = self.last_update_nums[pair]\n source = self.base_24hr_volumes[pair][1]\n ma = self.volume_deriv_mas[pair][window]\n source_len = len(source)\n\n for index in range(source_len - num, source_len):\n average = sum(source[index - window:index]) / window\n ma.append(average)\n\n truncate = len(ma) - self.min_tick_length\n if truncate > 60:\n del ma[:truncate]\n\n except IndexError:\n self.log.error('Cannot update VDMA {} for {} with data length of {}!',\n window, pair, len(self.base_24hr_volumes[pair][1]))\n\n self.log.debug('{} Updated moving averages.', pair, verbosity=1)", "def nmaef(actual: np.ndarray, predicted: np.ndarray):\n mage = np.mean(np.fabs(predicted - actual))\n avg_m = np.mean(predicted)\n avg_o = np.mean(actual)\n\n # df_pos == M >= O -> avg(M)/avg(O) - 1\n # df_neg == M < O -> 1 - avg(O)/avg(M)\n if avg_m >= avg_o:\n return mage / avg_o\n else:\n return mage / avg_m", "def test_compare_averages_asymptotics(self):\n # load models\n models = [pybamm.lead_acid.LOQS(), pybamm.lead_acid.Full()]\n\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 10, \"x_s\": 10, \"x_p\": 10}\n\n # discretise models\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 17, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # test averages\n comparison = StandardOutputComparison(solutions)\n comparison.test_averages()", "def yearly_avg(dacycle,avg):\n\n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )\n yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)\n\n if not os.path.exists(yeardir):\n print \"Creating new output directory \" + yeardir\n os.makedirs(yeardir)\n\n files = os.listdir(monthdir) # get monthly files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if not files:\n print \"No full year finished yet, skipping yearly average...\"\n return\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')\n fileinfo[filename] = date\n\n years = set([d.year for d in fileinfo.values()])\n\n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(years=+1)\n \n avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if not len(avg_files) == 12 : \n print \"Year %04d not finished yet, skipping yearly average...\"%sd.year\n else:\n targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))\n \n if not os.path.exists(targetfile):\n print \"Year %04d is complete, I have 12 months for the next file\"%sd.year\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n\n sd = nd", "def mean_annual_cycle(data):\n ntime, nlat, nlon = data.shape\n # reshape from [nmonth,nlat,nlon] to [nyear,12,nlat,nlon]\n work = MA.reshape(data,(-1,12,nlat,nlon))\n # compute mean annual cycle\n mean_data = MA.average(work,0)\n return mean_data", "def calc_mae(y: np.ndarray, y_hat: np.ndarray) -> float:\n return np.mean(np.abs(y - y_hat))", "def calcAdl(self):\n adl = []\n for i,j in enumerate(self.stock_data['Close']):\n #Calculating money flow measure\n mfm_nom = ((j-self.stock_data['Low'][i])-(self.stock_data['High'][i]))\n mfm_deno = self.stock_data['High'][i]-self.stock_data['Low'][i]\n mfm = mfm_nom/mfm_deno\n #Calculating money flow volume\n mfv = mfm*self.stock_data['Volume'][i]\n #Calculating accumulated distributin line\n if not adl:\n print(mfm)\n adl.append(mfv)\n else:\n print(adl)\n adl.append(mfv+adl)\n def_dates, def_points, k = self.angle(self.dates, adl, self.type) \n return k", "def ewma(y, alpha):\n avg = np.zeros(len(y))\n avg[0] = y[0]\n for i in range(1, len(y)):\n avg[i] = alpha * y[i] + (1 - alpha) * avg[i - 1]\n\n return avg", "def calc_meandiff(sig):\n\n return np.mean(np.diff(sig))", "def test_realistic_mean_dose(self):\n\n for struct, data in self.test_structs.items():\n dvh = DVH(data[\"doses\"], data[\"volumes\"])\n diff = dvh.mean_dose - data[\"monaco_dvh_mean_dose\"]\n self.assertLessEqual(abs(diff), 1)", "def calculateAverage(self): \n if not self.lastTransferAverage: \n size=[0,0,0,0]\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n size[i]=self.lastNbrSamplesPerSeg\n self.lastAverageArray = [zeros(size[0]),zeros(size[1]),zeros(size[2]),zeros(size[3])]\n nbrSamp=self.lastNbrSamplesPerSeg\n for i in range(0,4):\n if self.lastTransferredChannel & (1 << i):\n nbrSeg=self.lastNbrSegmentsArray[i]\n for j in range (0,nbrSamp):\n for k in range(0,nbrSeg): \n self.lastAverageArray[i][j]+=self.lastWaveformArray[i][k*nbrSamp+j]\n self.lastAverageArray[i][j]/=nbrSeg\n self.lastAverageCalculated=True\n else: print \"NOn averaged data are not available\"", "def EMA(df, base, target, period, alpha=False):\n\n con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]])\n\n if (alpha == True):\n # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period\n df[target] = con.ewm(alpha=1 / period, adjust=False).mean()\n else:\n # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1)\n df[target] = con.ewm(span=period, adjust=False).mean()\n\n df[target].fillna(0, inplace=True)\n return df", "def WCA_SA(targetMDG, WCAresult):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG, WCAresult))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n Temperature = 20\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, Temperature)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n if Temperature > 0:\n Temperature -= 0.5\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n max_climber.remove_empty_cluster()\n return max_climber.result", "def _ewma_computation(self, period=50, column='adj_close'):\n return pd.Series(\n self.ohlcv[column].ewm(ignore_na=False, min_periods=period - 1,\n span=period).mean())", "def __calculate_moving_average(self,df):\n for m in Config.moving_average_conf:\n if m['type']=='simple':\n df[m['label']]=round(df[Config.PRICE_COL].rolling(window=m['days']).mean(),2)\n else:\n df[m['label']]=round(df[Config.PRICE_COL].ewm(span=m['days'], adjust=False).mean(),2)\n\n return df", "def mae(predicted, actual):\n #maybe make some assertions, assume have same length & in right order\n interm_total = 0\n for i in range(len(predicted)):\n interm_total += abs(predicted[i] - actual[i])\n return interm_total / len(predicted)", "def MA(self, symbol, period, shift):\n data = self.get_latest_bars_values(symbol, 'Close', period + shift)\n if len(data[0:-shift]) != 0:\n return np.mean(data[0:-shift])\n return 0", "def mean_variance_analysis(df):\n rets = np.log(df['close']/df['close'].shift(1))\n\n std = rets.std()* 252\n\n annualized_returns = rets.mean() * 252\n\n print(f'The annualized returns of the stock is {annualized_returns}, and the standard deviation of the stock is {std}')", "def calc_damage_moments(m_edge,freq,fos=2):\n\n d = 0.\n R = 0.5 #root cylinder radius\n I = 0.25*np.pi*(R**4-(R-0.08)**4)\n\n sigma = m_edge*R/I\n\n #find the peak stresses\n pp = scipy.signal.find_peaks(sigma)[0]\n pn = scipy.signal.find_peaks(-sigma)[0]\n p = np.append(0,pp)\n p = np.append(p,pn)\n p = np.append(p,len(sigma)-1)\n p = np.sort(p)\n peaks = np.zeros(len(p))\n vv = np.arange(len(sigma))\n v = np.zeros(len(p))\n\n for i in range(len(p)):\n peaks[i] = sigma[p[i]]\n v[i] = vv[p[i]]\n\n #rainflow counting\n array = rainflow(peaks)\n\n alternate = array[0,:]/2.\n mean = array[1,:]\n count = array[3,:]\n\n # Goodman correction\n # su = 3450000.\n # su = 4590000.\n # su = 596000.\n su = 535000.\n # su = 459000.\n mar = alternate/(1.-mean/su)\n\n npts = len(mar)\n\n # plt.plot(count,mar,'o')\n # plt.show()\n\n #damage calculations\n n = np.zeros(npts)\n m = 10.\n fos = 1.15\n # fos = 1.75\n for i in range(npts):\n # Nfail = 10.**(((-mar[i]*fos)/su+1.)/0.1)\n Nfail = ((su)/(mar[i]*fos))**m\n n[i] = Nfail\n mult = 20.*365.*24.*6.*freq\n\n d += count[i]*mult/Nfail\n # if count[i]*mult/Nfail > 0.02:\n # print Nfail\n\n\n # plt.plot(count,n,'o')\n # plt.show()\n\n\n return d", "def mse(predicted, actual):\n diff = predicted - actual\n return np.average(diff * diff, axis=0)", "def mch_approximation(samples, dlamda):\n dE = calc_e(samples, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = pair_corr(samples, weights=np.exp(-dE)/len(dE), concat=True) * ZFraction \n assert not (np.any(predsisj < -1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj", "def DM_for_newparams(self, dDM, downsamp):\n other_smear = sqrt((1000.0*self.obs.dt)**2.0 +\n (1000.0*self.obs.dt*downsamp)**2.0 +\n BW_smear(dDM, self.obs.BW, self.obs.f_ctr)**2.0 +\n self.sub_smearing**2.0)\n return 0.001*other_smear/self.obs.chanwidth*0.0001205*self.obs.f_ctr**3.0", "def duty_cycle(self):\n diff = np.diff(self.lc.time)\n t = np.median(diff)\n std = np.std(diff)\n mask = diff > (t + 3 * std)\n return (1 - np.sum(diff[mask]) / np.sum(diff))", "def timeseriesCVscore(self, params):\n errors = []\n\n # values = series.values\n values = self.train_ts\n self.alpha, self.beta, self.gamma = params\n\n # set the number of folds for cross-validation\n tscv = TimeSeriesSplit(n_splits=3)\n\n # iterating over folds, train model on each, forecast and calculate error\n for train, test in tscv.split(values):\n\n self.train = values[train]\n self.test = values[test]\n self.triple_exponential_smoothing()\n predictions = self.result[-len(self.test) :]\n actual = values[test]\n error = mape(list(actual), predictions)\n errors.append(error)\n\n # print \"error: \"\n # print errors\n return np.mean(np.array(errors))", "def calc_meanad(sig):\n m = np.mean(sig)\n diff = [abs(x-m) for x in sig]\n\n return np.mean(diff)", "def test_dual_averaging(self):\n\n # we need to wrap the gradient in a namedtuple as we optimize for a target\n # acceptance probability in the context of HMC.\n f = lambda x: (x - 1) ** 2\n grad_f = jax.jit(jax.grad(f))\n\n # Our target gradient is 0. we increase the rate of convergence by\n # increasing the value of gamma (see documentation of the algorithm).\n init, update, final = optimizers.dual_averaging(gamma=0.3)\n unpdate_fn = self.variant(update)\n\n da_state = init(3)\n for _ in range(100):\n x = jnp.exp(da_state.log_x)\n g = grad_f(x)\n da_state = unpdate_fn(da_state, g)\n\n self.assertAlmostEqual(final(da_state), 1.0, delta=1e-1)", "def moving_average(data, temporal_window=100):\n window = np.ones(temporal_window) / temporal_window\n return np.convolve(data, window, 'valid')", "def gain_loss_month_daily(current_data):\n months = current_data.groupby(pd.Grouper(freq='MS'))\n losses_monthly = pd.DataFrame(0, columns=current_data.columns, index=['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\n 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])\n for mo in months:\n # Per month per year, sum daily gains/losses\n monthly_sum = np.zeros((mo[1].shape[1],))\n for i in range(mo[1].shape[0] - 1):\n # loop over all days in month, except last day\n monthly_sum = monthly_sum + (mo[1].iloc[i+1] - mo[1].iloc[i])\n mon_str = str(mo[0].month_name()[:3])\n for index, val in monthly_sum.items():\n if val < 0:\n losses_monthly.loc[mon_str, index] = losses_monthly.loc[mon_str, index] + 1\n\n return losses_monthly", "def mad(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n n = len(observations)\n mean = sum(observations)/n\n deviations = [xi - mean for xi in observations]\n abs_deviations = [abs(xi) for xi in deviations]\n mad = sum(abs_deviations)/n\n print(f\"Mean Absolute Deviation is: {mad}\")\n return mad", "def agc(data,dt,wagc=1.0):\n nt = data.shape[0] # number of time samples\n iwagc = int(wagc/dt/2) # half window size in samples\n data_orig = np.copy(data) # copy of input data\n d = data_orig # copy of input data\n nwin = iwagc # nwin is #samples in RMS computation\n sum = 0\n\n # compute initial window for first datum\n sum = np.apply_along_axis(lambda m: np.sum(m[0:iwagc]**2.0), axis=0, arr=d)\n with np.errstate(divide='ignore', invalid='ignore'):\n d[0,:] = np.true_divide(data_orig[0,:],np.sqrt(sum/nwin))\n d[0,d[0,:] == np.inf] = 0\n d[0,:] = np.nan_to_num(d[0,:])\n\n # The value tmp gets subtracted each time the moving window moves 1 sample\n # forward\n tmp = data_orig[0]**2.0\n\n # ramping on\n # Add a squared sample and increase window length nwin each iteration\n for t in range(0,iwagc+1):\n sum = sum + data_orig[t+iwagc,:]**2.0\n nwin += 1\n with np.errstate(divide='ignore', invalid='ignore'):\n d[t,:] = np.true_divide(data_orig[t,:],np.sqrt(sum/nwin))\n d[t,d[t,:] == np.inf] = 0\n d[t,:] = np.nan_to_num(d[t,:])\n\n\t# middle range -- full rms window\n # Add and subtract a squared sample\n for t in range(iwagc+1,nt-iwagc):\n sum = sum + data_orig[t+iwagc,:]**2.0 - tmp\n tmp = data_orig[t-iwagc,:]**2.0\n with np.errstate(divide='ignore', invalid='ignore'):\n d[t,:] = np.true_divide(data_orig[t,:],np.sqrt(sum/nwin))\n d[t,d[t,:] == np.inf] = 0\n d[t,:] = np.nan_to_num(d[t,:])\n\n # ramping off\n # Subtract a squared sample and decrease window length nwin each iteration\n for t in range(nt-iwagc,nt):\n sum = sum - tmp\n tmp = data_orig[t-iwagc,:]**2.0\n nwin -= 1\n with np.errstate(divide='ignore', invalid='ignore'):\n d[t,:] = np.true_divide(data_orig[t,:],np.sqrt(sum/nwin))\n d[t,d[t,:] == np.inf] = 0\n d[t,:] = np.nan_to_num(d[t,:])\n\n return d", "def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema", "def __calc_mmd_maxconc(self,event):\n \n # Use smoothed data\n if self.particle_mode:\n data = np.log10(gaussian_filter(self.par_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.par_diam,self.par_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n if self.ion_mode:\n data = np.log10(gaussian_filter(self.ion1_data,self.smooth,mode='constant'))\n dpdp,tt = np.meshgrid(self.ion1_diam,self.ion1_time)\n points = np.concatenate((tt.flatten()[np.newaxis].T,\n dpdp.flatten()[np.newaxis].T,\n data.flatten()[np.newaxis].T),\n axis=1)\n\n # Transform polygon perimeter to path\n try:\n banana_perimeter = Path(np.array(list(zip(self.polyx,self.polyy))))\n except ValueError:\n print (\"No polygon found\")\n return\n\n # Eliminate nans and infs from dndlogdp\n points = np.delete(points,np.argwhere((np.isnan(points[:,2]))|(np.isinf(points[:,2]))),axis=0)\n banana_points = points[banana_perimeter.contains_points(points[:,[0,1]]),:]\n\n if len(banana_points)==0:\n print (\"Found no points inside polygon.\")\n return\n \n # Grouping the size distribution data points\n if self.particle_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.par_diam if x in banana_points[:,1]]\n if self.ion_mode:\n pre_sorted_banana_points = [banana_points[banana_points[:,1]==x,:] for x in self.ion1_diam if x in banana_points[:,1]]\n \n sorted_banana_points = [x[x[:,0].argsort()] for x in pre_sorted_banana_points]\n \n for i in range(0,len(sorted_banana_points)):\n x = sorted_banana_points[i][:,0] - self.mintime\n y = sorted_banana_points[i][:,2]\n a=np.max(y)\n mu=np.mean(x)\n sigma=np.std(x)\n try:\n params,pcov = curve_fit(self.__gaus,x,y,p0=[a,mu,sigma])\n if ((params[1]>=x.max()) | (params[1]<=x.min())):\n print (\"Peak outside range. Skipping %f\" % (sorted_banana_points[i][0,1]))\n else:\n self.mmd_dp = np.append(self.mmd_dp,sorted_banana_points[i][0,1])\n self.mmd_time = np.append(self.mmd_time,params[1] + self.mintime)\n except:\n print (\"Diverges. Skipping %f\" % (sorted_banana_points[i][0,1]))\n\n # Plot the result on ax\n self.mmd_plot.set_data(self.mmd_time,self.mmd_dp)\n plt.draw()" ]
[ "0.6501676", "0.64562273", "0.6412754", "0.61223274", "0.6114551", "0.6047385", "0.5973728", "0.5865776", "0.5864788", "0.5819191", "0.575621", "0.5704251", "0.5700242", "0.5680569", "0.56672055", "0.56551254", "0.56424147", "0.56197876", "0.5551434", "0.5538618", "0.553018", "0.5524272", "0.55204755", "0.55169195", "0.55136293", "0.5503769", "0.5502352", "0.5502352", "0.54956526", "0.5494738", "0.5493412", "0.5485446", "0.5477621", "0.547242", "0.5470582", "0.54685545", "0.5453453", "0.54482836", "0.54319024", "0.54152817", "0.5413906", "0.5403734", "0.54027456", "0.5396397", "0.53873426", "0.5376697", "0.53706723", "0.53703725", "0.5368071", "0.5364864", "0.536144", "0.5341053", "0.53298247", "0.5326495", "0.5325919", "0.5324508", "0.53139824", "0.5309572", "0.53077006", "0.53021", "0.5298943", "0.52949864", "0.5292898", "0.52826995", "0.5277422", "0.527673", "0.5275845", "0.52610576", "0.5259331", "0.5255731", "0.52503854", "0.5242219", "0.5240853", "0.52405316", "0.5239378", "0.523607", "0.523042", "0.5226924", "0.52255255", "0.5218266", "0.5217291", "0.5216931", "0.5214187", "0.5206701", "0.5206681", "0.5206148", "0.5205319", "0.5201994", "0.5195133", "0.5185624", "0.5183696", "0.5180811", "0.5180196", "0.5178867", "0.5172748", "0.51720566", "0.51592207", "0.5157288", "0.51496404", "0.51477635" ]
0.56003624
18
The Simple Moving Average (SMA) is calculated by adding the price of an instrument over a number of time periods and then dividing the sum by the number of time periods. The SMA is basically the average price of the given time period, with equal weighting given to the price of each period. Simple Moving Average SMA = ( Sum ( Price, n ) ) / n
def SimpleMovingAverage(self, timeperiod = 14): return ta.SMA(self.data.close,timeperiod)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SMA(serie, n):\r\n\r\n return serie.rolling(window=n).mean()", "def get_SMA(values, window=20):\n\treturn values.rolling(window, center=False).mean()", "def sma(matrix, interval):\n\n # declare empty SMA numpy array\n s = np.zeros((matrix.shape[0] - interval))\n\n # calculate the value of each point in the Simple Moving Average array\n for t in range(0, s.shape[0]):\n s[t] = np.sum(matrix[t:t + interval])/interval\n\n return s", "def sma(matrix, interval):\n\n # declare empty SMA numpy array\n s = np.zeros((matrix.shape[0] - interval))\n\n # calculate the value of each point in the Simple Moving Average array\n for t in range(0, s.shape[0]):\n s[t] = np.sum(matrix[t:t + interval])/interval\n\n return s", "def add_simple_moving_average(smas, n, data):\n total = sum([data[-1-i] for i in range(n)])\n smas.append(total/n)", "def SMA(self, n=PERIOD_7, **kwargs):\n\n prices = self.df.close\n\n sma = prices.rolling(n, min_periods=MIN_PERIOD).mean()\n\n self.df[\"sma_\" + str(n)] = sma\n\n return sma", "def get_sma(self,period):\n #df=pandas.DataFrame()\n sma=self.close.rolling(period).mean()\n return sma", "def sma(self) -> float:\n return self._sma", "def get_moving_average(close, span):\n i = SMAIndicator(close, window=span)\n return i.sma_indicator()", "def sma(self, normalize=False, window=20):\n adj_close = self.daily['Adj Close']\n if normalize: adj_close = self.normalize(adj_close)\n sma = adj_close.rolling(window).mean()\n return sma", "def SMA(df, base, target, period):\n\n df[target] = df[base].rolling(window=period).mean()\n df[target].fillna(0, inplace=True)\n\n return df", "def arima_sma(prices, signal, name):\n\n sma_window = signal['params']['sma_window']\n sma_close = talib.SMA(prices['close'], sma_window).to_numpy()[:, None]\n signal['data'] = arima(sma_close, signal['params']['arima_window'], name)", "def test_sma(self):\n periods = 200\n sma_qufilab = qufilab.sma(self.close, periods)\n sma_talib = talib.SMA(self.close, periods)\n np.testing.assert_allclose(sma_qufilab, sma_talib, rtol = self.tolerance)", "def sma(y, n):\n N = len(y) - n\n if n < 0:\n raise ValueError(\"Input doesn't contain enough data for moving average.\")\n\n out = [y[i:i+n].mean() for i in range(len(y) - n)]\n out = np.array(out)\n\n return out", "def get_sma(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.SMA(data)\n if result is None:\n raise IndicatorException\n return result", "def moving_average(data, beta):\n avg = 0\n maverages = []\n for i in range(len(data)):\n avg = avg * beta + (1 - beta) * data[i]\n maverages.append(avg / (1 - (beta ** (i + 1))))\n return maverages", "def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def ma(df, close_price_col_name=\"Close\", ma_col_name=\"MA\"):\r\n\r\n\t# Check N positive integer\r\n\twhile True:\r\n\r\n\t\tN = input(\"Please input period for moving average model (a positive integer (recommend: 10, 20, 50, 100, or 200 )): \")\r\n\r\n\t\ttry:\r\n\t\t\tif int(N) > 0:\r\n\t\t\t\tbreak\r\n\r\n\t\t\telif \".\" in N:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a float \")\r\n\t\t\t\tcontinue\r\n\r\n\t\t\telif int(N) < 0:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a negative one \")\r\n\t\t\t\tcontinue\r\n\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Please input a positive integer, not a string\")\r\n\t\t\tcontinue\r\n\r\n\t# Add column to store value of MA\r\n\tdf[ma_col_name] = df[close_price_col_name].rolling(window=int(N), min_periods=0).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[ma_col_name], label=\"Moving average \" + N + \" days\")\r\n\tplt.title(\"Visualization of Moving Average \" + N + \" days\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[ma_col_name] # delete the MA column for re-graphing\r", "def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema", "def ema(s, n):\r\n\r\n ema = []\r\n j = 1\r\n\r\n #get n sma first and calculate the next n period ema\r\n sma = sum(s[:n]) / n\r\n multiplier = 2 / float(1 + n)\r\n ema.append(sma)\r\n\r\n #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\r\n ema.append(( (s[n] - sma) * multiplier) + sma)\r\n\r\n #now calculate the rest of the values\r\n for i in s[n+1:]:\r\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\r\n j = j + 1\r\n ema.append(tmp)\r\n\r\n return ema", "def moving_average(self, a, n=3):\n ret = np.nancumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def moving_average(data, period, type='simple'):\n\ttry:\n\t\tx = np.asarray(data['Adj Close'])\n\texcept:\n\t\tx = np.asarray(data)\n\n\tif type == 'simple':\n\t\tweights = np.ones(period)\n\telse:\n\t\tweights = np.exp(np.linspace(-1., 0., period))\n\n\tweights /= weights.sum()\n\n\ta = np.convolve(x, weights, mode='full')[:len(x)]\n\ta[:period] = a[period]\n\treturn a", "def moving_average(a, n=3) :\r\n a = a.ravel()\r\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\r\n ret = np.cumsum(a, dtype = float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n ret=ret[n - 1:] / n\r\n return ret", "def simple_moving_average(n, data):\n result = []\n for m in range(n-1, len(data)):\n total = sum([data[m-i] for i in range(n)])\n result.append(total/n)\n return result", "def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret", "def TAS(px, high, low, w=10, n=3):\r\n\r\n minn = low.rolling(window=w).min() # min de minimos\r\n maxx = high.rolling(window=w).max() # max de maximos\r\n\r\n k = 100 * (px - minn) / (maxx - minn)\r\n d = SMA(k, n)\r\n return k, d", "def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp", "def WMA(serie, n=10):\r\n wg = np.arange(1, n+1)\r\n wma = serie.rolling(n).apply(lambda x: np.dot(x, wg)/wg.sum(), raw=True)\r\n\r\n return wma", "def EMA(serie, n):\r\n\r\n ewm = serie.ewm(n, adjust=False).mean()\r\n ewm[0:n] = [np.nan]*n\r\n return ewm", "def SMA(A: pd.DataFrame, n) -> pd.DataFrame:\r\n At = pivot_table(A)\r\n for i in range(len(At.columns)):\r\n At.iloc[:, i] = talib.SMA(At.iloc[:, i], n)\r\n res = stack_table(At)\r\n return res", "def moving_average(a, n: int = 3) -> np.array:\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def calculateLatestThreeDayMA(self, closingPrices):\n return ((closingPrices[0]+closingPrices[1]+closingPrices[2])/3)", "def moving_average(sig, n=100):\n window = deque(maxlen=n) # last n scores\n sig_ma = []\n for i in range(len(sig)):\n window.append(sig[i])\n sig_ma.append(np.mean(window))\n return sig_ma", "def Moving_Average_System(signal,M = 10):\t\t\t\t\t\t\t\t# Function of Moving Average System using Ideal Delay System\n\tp,q,s = M,signal.shape[0]- M,signal.shape[0]\n\tsignal_new = np.zeros(s+M)\n\t\n\tfor i in range(M+1):\n\t\tsignal_new[M-i:M-i+s] += Signal_Ideal_Delay(signal,d=i)[0]\n\t\t\n\tsignal_new = signal_new/(M + 1)\t\t\n\ttime = np.arange(0,s+M)\n\t\n\treturn signal_new,time", "def moving_averages(ts_init, window):\n ts_init = pd.Series(ts_init)\n if len(ts_init) % 2 == 0:\n ts_ma = ts_init.rolling(window, center=True).mean()\n ts_ma = ts_ma.rolling(window=2, center=True).mean()\n ts_ma = np.roll(ts_ma, -1)\n else:\n ts_ma = ts_init.rolling(window, center=True).mean()\n return ts_ma", "def wma(df, close_price_col_name=\"Close\", wma_col_name=\"WMA\"):\r\n\r\n\t# Check N positive integer\r\n\twhile True:\r\n\r\n\t\tN = input(\"Please input period for moving average model (a positive integer (recommend: 10, 20, 50, 100, or 200 )): \")\r\n\r\n\t\ttry:\r\n\t\t\tif int(N) > 0:\r\n\t\t\t\tbreak\r\n\r\n\t\t\telif \".\" in N:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a float \")\r\n\t\t\t\tcontinue\r\n\r\n\t\t\telif int(N) < 0:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a negative one \")\r\n\t\t\t\tcontinue\r\n\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Please input a positive integer, not a string\")\r\n\t\t\tcontinue\r\n\r\n\t# Add column to store value of WMA\r\n\tdf[wma_col_name] = df[close_price_col_name].ewm(span=int(N)).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[wma_col_name], label=\"Exponential Weighted Moving Average \" + N + \" days\")\r\n\tplt.title(\"Visualization of Exponential Weighted Moving Average \" + N + \" days\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[wma_col_name] # delete the WMA column for re-graphing\r", "def calculate_ma(rates: [[]], size: int, current: int = 0):\n total = current\n stop = size if current == 0 else size - 1\n i = 0\n while i < stop:\n total += rates[i][0]\n i += 1\n return total / size", "def get_average_sales(data):\n print(\"Calculating stock data...\\n\")\n avg_sales = []\n for list in data:\n int_list_avg = sum(int(item) for item in list) / len(list)\n avg_plus_extra = round(int_list_avg * 1.1)\n avg_sales.append(avg_plus_extra)\n\n return avg_sales", "def MA(self, symbol, period, shift):\n data = self.get_latest_bars_values(symbol, 'Close', period + shift)\n if len(data[0:-shift]) != 0:\n return np.mean(data[0:-shift])\n return 0", "def ema(self, s, n):\n s = np.array(s).astype(float)\n ema = []\n j = 1\n\n # get n sma first and calculate the next n period ema\n sma = sum(s[:n]) / n\n multiplier = 2 / float(1 + n)\n ema[:0] = [sma] * n\n\n # EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\n ema.append(( (s[n] - sma) * multiplier) + sma)\n\n # now calculate the rest of the values\n for i in s[n + 1:]:\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\n ema.append(tmp)\n j = j + 1\n\n # print \"ema length = \" + str(len(ema))\n return ema", "def mean(series):\n return fsum(series) / len(series)", "def moving_average(data, temporal_window=100):\n window = np.ones(temporal_window) / temporal_window\n return np.convolve(data, window, 'valid')", "def MACD(prices, slow, fast, signal):\r\n emaslow = expMovingAverage(prices, slow)\r\n emafast = expMovingAverage(prices, fast)\r\n emasignal = expMovingAverage(prices, signal )\r\n return emaslow, emafast, emafast - emaslow, emasignal", "def dist_sma(self, a):\r\n\r\n return self.logunif(a, self.arange.to(\"AU\").value)", "def dist_sma(self, a):\n\n return self.uniform(a, self.arange.to(\"AU\").value)", "def compute_MA(self, series, long_term=True):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n if long_term:\n lag = 200\n else:\n lag = 50\n assert len(temp)>lag, 'Not enough data points in this timeseries!'\n for idx in range(lag, len(temp)):\n temp[idx] = series[idx-lag:idx].mean()\n temp[:lag] = None\n return temp", "def gen_sma(self, n):\n n = self.gen_input_check(n)\n ar = self.arange.to(\"AU\").value\n a = np.random.uniform(low=ar[0], high=ar[1], size=n) * u.AU\n\n return a", "def mov_mean_std(ts, m):\n\n if m <= 1:\n raise ValueError(\"Query length must be longer than one\")\n\n ts = ts.astype(\"float\")\n\n # Add zero to the beginning of the cumsum of ts\n s = np.insert(np.cumsum(ts), 0, 0)\n\n # Add zero to the beginning of the cumsum of ts ** 2\n s_sq = np.insert(np.cumsum(ts ** 2), 0, 0)\n seg_sum = s[m:] - s[:-m]\n seg_sum_sq = s_sq[m:] - s_sq[:-m]\n return seg_sum / m, np.sqrt(seg_sum_sq / m - (seg_sum / m) ** 2)", "def moving_average_forecast(series, window_size):\n\tforecast= []\n\tfor time in range(len(series)- window_size):\n\t\tforecast.append(series[time:time + window_size].mean())\n\treturn np.array(forecast)", "def ar1_moving_average_time_series(series, length=1):\n\n # just in case the index isn't already datetime type\n series.index = pd.to_datetime(series.index)\n\n ar1 = []\n ar1_se = []\n index = []\n\n for i in range(len(series) - length ):\n #print(series[i:(length + i)])\n param, se = get_AR1_parameter_estimate(series[i:(length + i)])\n ar1.append(param)\n ar1_se.append(se)\n index.append(series.index[length + i])\n\n ar1_name = series.name+\"_ar1\"\n ar1_se_name = series.name+\"_ar1_se\"\n\n ar1_df = pd.DataFrame()\n ar1_df[ar1_name] = pd.Series(ar1)\n ar1_df[ar1_se_name] = pd.Series(ar1_se)\n ar1_df.index = index\n\n return ar1_df", "def calculateLatestFifteenDayMA(self, closingPrices):\n fifteenDayMovingAverage = 0\n for i in range(3, 18):\n fifteenDayMovingAverage = fifteenDayMovingAverage + \\\n closingPrices[i]\n fifteenDayMovingAverage = fifteenDayMovingAverage/15\n return fifteenDayMovingAverage", "def mean(a_series):\n return float(sum(a_series) / max(len(a_series) * 1.0, 1.0))", "def moving_average(data, width):\n return np.convolve(data, np.ones(width), 'same') / width", "def ema(self, candles, n):\n\n\t s = [] # array(s)\n\t for candle in candles:\n\t \ts.append(candle.close)\n\n\t ema = []\n\t j = 1\n\n\t #get n sma first and calculate the next n period ema\n\t sma = sum(s[:n]) / n\n\t multiplier = 2 / float(1 + n)\n\t ema.append(sma)\n\n\t #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\n\t ema.append(( (s[n] - sma) * multiplier) + sma)\n\n\t #now calculate the rest of the values\n\t for i in s[n+1:]:\n\t tmp = ( (i - ema[j]) * multiplier) + ema[j]\n\t j = j + 1\n\t ema.append(tmp)\n\n\t return ema", "def get_ssma(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.SSMA(data)\n if result is None:\n raise IndicatorException\n return result", "def get_smma(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.SMMA(data)\n if result is None:\n raise IndicatorException\n return result", "def SMAPE(pred, true):\n denom = torch.abs(true) + torch.abs(pred)\n smape = torch.where(denom == 0., torch.zeros_like(true), torch.abs(pred - true) / denom)\n mean_smape = smape.mean()\n return mean_smape * 200.", "def sma(self, sma: float):\n\n self._sma = sma", "def moving_average_panda(data_set, periods=4, drop_initial_data=True):\n\n data_set['MA_Col1'] = data_set.iloc[:, 0].rolling(window=periods).mean()\n data_set['MA_Col2'] = data_set.iloc[:, 1].rolling(window=periods).mean()\n data_set['MA_Col3'] = data_set.iloc[:, 2].rolling(window=periods).mean()\n data_set['MA_Col4'] = data_set.iloc[:, 3].rolling(window=periods).mean()\n data_set['MA_Col5'] = data_set.iloc[:, 4].rolling(window=periods).mean()\n if drop_initial_data:\n data_set.drop(['1', '2', '3', '4', '5'], axis=1, inplace=True)\n data_set.drop(range(periods), inplace=True)\n\n return data_set", "def stock_average(stock):\n closing_price=stock['Close']\n average=stats.mean(closing_price)\n return average", "def moving_average(x, n, type='simple'):\n x = np.asarray(x)\n if type == 'simple':\n weights = np.ones(n)\n else:\n weights = np.exp(np.linspace(-1., 0., n))\n\n weights /= weights.sum()\n\n a = np.convolve(x, weights, mode='full')[:len(x)]\n a[:n] = a[n]\n return a", "def __calculate_moving_average(self,df):\n for m in Config.moving_average_conf:\n if m['type']=='simple':\n df[m['label']]=round(df[Config.PRICE_COL].rolling(window=m['days']).mean(),2)\n else:\n df[m['label']]=round(df[Config.PRICE_COL].ewm(span=m['days'], adjust=False).mean(),2)\n\n return df", "def average(ls):\n\n if len(ls) == 0:\n return 0.0\n\n sm = sum(ls)\n return sm / len(ls)", "def smape(actual: np.ndarray, predicted: np.ndarray):\n return np.mean(np.divide(np.abs(actual - predicted), (np.abs(actual) + np.abs(predicted) + np.finfo(float).eps) * 0.5))", "def MM_n(N, data):\n out = np.zeros(len(data))\n\n for j in range(N):\n out[j] = np.average(data[:j+1])\n for (j,d) in enumerate(data[N-1:]):\n out[j+N-1] = np.average(data[j:j+N])\n\n return out", "def EMA(self, n=SHORT_TERM_PERIOD):\n prices = self.df.close\n\n ema = prices.ewm(span=n, adjust=False).mean()\n\n self.df[\"ema\"] = ema\n\n return ema", "def moving_average(data, window_size):\n window= np.ones(int(window_size))/float(window_size)\n return np.convolve(data, window, 'same')", "def rolling_ewma(data_frame, spn, minp):\r\n # span = 2 / (1 + periods)\r\n return pd.ewma(data_frame['close'], span=spn, min_periods = minp)", "def compute_MAE(e):\n\n return np.mean(np.abs(e))", "def ewm(dataArray):\r\n\r\n # normalized = np.zeros(dataArray.shape)\r\n starting_means = np.mean(dataArray[:init_block_size])\r\n starting_var = np.var(dataArray[:init_block_size])\r\n averages = np.copy(starting_means)\r\n variances = np.copy(starting_var)\r\n\r\n for i in range(0, len(dataArray)):\r\n # for the first samples, there are not enough previous samples to warrant an exponential weighted averaging\r\n # simply substract the true average of the first samples\r\n if i < init_block_size:\r\n dataArray[i] = (dataArray[i] - starting_means) / np.maximum(eps, np.sqrt(starting_var))\r\n else:\r\n #update the rolling mean and variance\r\n averages = 0.999 * averages + 0.001 * dataArray[i]\r\n variances = 0.999 * variances + 0.001 * (np.square(dataArray[i] - averages))\r\n\r\n dataArray[i] = (dataArray[i] - averages) / np.maximum(eps, np.sqrt(variances)) \r\n\r\n return dataArray", "def addMovingAverages(self):\n\n if not isinstance(self.df, pd.DataFrame):\n raise TypeError('Pandas DataFrame required.')\n\n if not 'close' in self.df.columns:\n raise AttributeError(\"Pandas DataFrame 'close' column required.\")\n\n if not self.df['close'].dtype == 'float64' and not self.df['close'].dtype == 'int64':\n raise AttributeError(\n \"Pandas DataFrame 'close' column not int64 or float64.\")\n\n # calculate cumulative moving average\n self.df['cma'] = self.df.close.expanding().mean()\n\n # calculate exponential moving averages\n self.df['ema12'] = self.df.close.ewm(span=12, adjust=False).mean()\n self.df['ema26'] = self.df.close.ewm(span=26, adjust=False).mean()\n\n # calculate simple moving averages\n self.df['sma20'] = self.df.close.rolling(20, min_periods=1).mean()\n self.df['sma50'] = self.df.close.rolling(50, min_periods=1).mean()\n self.df['sma200'] = self.df.close.rolling(200, min_periods=1).mean()", "def MACD(DF,a,b,c):\n df = DF.copy()\n df[\"MA_Fast\"]=df[\"Adj Close\"].ewm(span=a,min_periods=a).mean()\n df[\"MA_Slow\"]=df[\"Adj Close\"].ewm(span=b,min_periods=b).mean()\n df[\"MACD\"]=df[\"MA_Fast\"]-df[\"MA_Slow\"]\n df[\"Signal\"]=df[\"MACD\"].ewm(span=c,min_periods=c).mean()\n df.dropna(inplace=True)\n return df", "def sma(client, symbol, range=\"6m\", col=\"close\", periods=None):\n if periods is None:\n periods = [30]\n periods = tolist(periods)\n\n df = client.chartDF(symbol, range)\n\n build = {col: df[col].values}\n for per in periods:\n build[\"sma-{}\".format(per)] = t.EMA(df[col].values.astype(float), per)\n return pd.DataFrame(build)", "def moving_average(position, step_size, avg_quantity):\n print(\"1\")\n avg_disp = int(math.floor(avg_quantity / 2))\n start_frame = step_size + avg_disp + 1\n end_frame = len(position) - avg_disp\n moving_avg = []\n for i in range(start_frame, end_frame + 1):\n position_avg = 0\n for j in range(i - 1 - avg_disp, i + avg_disp):\n position_avg += position[j]\n position_1 = position_avg / (avg_disp * 2 + 1)\n \n moving_avg.append(position_1)\n print(\"2\")\n return start_frame, end_frame, moving_avg", "def EMA_ST(df, base, target, period, alpha=False):\r\n\r\n con = pd.concat([df[:period][base].rolling(window=period).mean(), df[period:][base]])\r\n \r\n if (alpha == True):\r\n # (1 - alpha) * previous_val + alpha * current_val where alpha = 1 / period\r\n df[target] = con.ewm(alpha=1 / period, adjust=False).mean()\r\n else:\r\n # ((current_val - previous_val) * coeff) + previous_val where coeff = 2 / (period + 1)\r\n df[target] = con.ewm(span=period, adjust=False).mean()\r\n \r\n df[target].fillna(0, inplace=True)\r\n return df", "def calc_smape(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass", "def _moving_average(self, series=None, window_length=None, train_subwindow_len=None):\n import numpy as np\n\n moving_averages = []\n iter_length = len(series) - window_length\n for i in range(0, iter_length):\n ma_current = np.mean(series[i:i + window_length])\n moving_averages.append(ma_current)\n\n # Moving average shrinkes w.r.t the actual series based on the moving average window. Hence, to keep the\n # length of the moving average equal to the series, we append proxy padding which are the moving averages\n # from the closest representative training sub-window.\n moving_averages_padded = moving_averages[(train_subwindow_len - (\n window_length // 2)):train_subwindow_len] + moving_averages + moving_averages[-train_subwindow_len:-(\n train_subwindow_len - (window_length // 2))]\n\n return moving_averages_padded", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def adv(prices: pd.Series, volume, d: int) -> pd.Series:\n\n if isinstance(prices.index, pd.MultiIndex):\n return (prices * volume).groupby(level=1).rolling(d).mean().droplevel(0).sort_index()\n else:\n return (prices * volume).rolling(d).mean()", "def SMAPE(y_true, y_pred):\n return smape(y_true, y_pred) / 2", "def calc_first_mms_records(pair, data):\n\n mms_20_final = 0\n mms_50_final = 0\n mms_200_final = 0\n m20 = []\n m50 = []\n m200 = []\n\n for rec in data.get(\"candles\"):\n # mms 20\n m20.append(rec.get(\"close\"))\n if len(m20) == 20:\n mms_20_final = float(numpy.mean(m20))\n if len(m20) > 20:\n del m20[0]\n mms_20_final = float(numpy.mean(m20))\n\n # mms 50\n m50.append(rec.get(\"close\"))\n if len(m50) == 50:\n mms_50_final = float(numpy.mean(m50))\n if len(m50) > 50:\n del m50[0]\n mms_50_final = float(numpy.mean(m50))\n\n # mms 200\n m200.append(rec.get(\"close\"))\n if len(m200) == 200:\n mms_200_final = float(numpy.mean(m200))\n if len(m200) > 200:\n del m200[0]\n mms_200_final = float(numpy.mean(m200))\n\n record_mms = MmsVariation.objects.create(\n pair=pair,\n timestamp=datetime.fromtimestamp(rec.get(\"timestamp\")),\n mms_20=mms_20_final,\n mms_50=mms_50_final,\n mms_200=mms_200_final,\n )\n record_mms.save()", "def compute_sta(stim, rho, num_timesteps):\n \n sta = np.zeros((num_timesteps,))\n\n # This command finds the indices of all of the spikes that occur after 300 ms into the recording.\n spike_times = rho[num_timesteps:].nonzero()[0] + num_timesteps\n\n # Fill in this value. Note that you should not count spikes that occur before 300 ms into the recording.\n num_spikes = np.count_nonzero(rho[num_timesteps:]) # 53583\n\n # Compute the spike-triggered average of the spikes found. To do this, compute the average of all of the vectors\n # starting 300 ms (exclusive) before a spike and ending at the time of the event (inclusive). Each of these vectors\n # defines a list of samples that is contained within a window of 300 ms before each spike. The average of these\n # vectors should be completed in an element-wise manner.\n # i call this 'buffer time'\n buffer_time = 300 # ms\n sampling_period = 2\n # Therefore there are 300/2 = num timesteps seconds to ignore!\n for spike_time in spike_times:\n sta += stim[spike_time - num_timesteps + 1:spike_time + 1]\n\n assert len(spike_times) == num_spikes # Making sure im not crazy.\n sta /= num_spikes\n\n return sta", "def price_average(lst):\n\n return sum(lst) / len(lst)", "def execQ10():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n banana = frame[(dFrame.Series_title_1 == \"Bananas, 1kg\") & (dFrame.Period >= 2013.01) & (dFrame.Period < 2014.01)]\n average = banana['Price'].mean()\n return average", "def mean_variance_analysis(df):\n rets = np.log(df['close']/df['close'].shift(1))\n\n std = rets.std()* 252\n\n annualized_returns = rets.mean() * 252\n\n print(f'The annualized returns of the stock is {annualized_returns}, and the standard deviation of the stock is {std}')", "def average(self, returns):\r\n return returns.mean() * self.day", "def execQ9():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n banana = frame[(dFrame.Series_title_1 == \"Bananas, 1kg\") & (dFrame.Period >= 2012.01) & (dFrame.Period < 2013.01)]\n average = banana['Price'].mean()\n return average", "def movmean(A, k, discard_endpoints=True, std=False):\n\n\tk1 = k[0]\n\tk2 = k[1]\n\t\n\tnew_array = []\n\tfor i in range(len(A)):\n\t\tlow = i-k1\n\t\thigh = i+k2+1\n\t\tif low < 0:\n\t\t\tif discard_endpoints:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tlow = 0\n\n\t\tif high > len(A):\n\t\t\tif discard_endpoints:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\thigh = len(A)\n\n\t\tthis = A[low:high]\n\t\tif std:\n\t\t\tto_append = np.std(this, ddof=1)\n\t\telse:\n\t\t\tto_append = np.mean(this)\n\t\tnew_array.append(to_append)\n\treturn np.array(new_array)", "def sinc_mean_function(x):\n return np.sin(24*x ) / (12*x) + 2", "def mv(series, window=30):\n if not isinstance(series, pd.Series):\n raise ValueError('expect input pandas series dataframe, but get %s'%(type(series)))\n\n return series.rolling(window=window, min_periods=1).mean()", "def avg_amplitude_spectro(Sxx_ampli):\r\n\r\n # average the amplitude spectrogram taking the PSD for energy conservation\r\n S_ampli_mean = np.sqrt(np.mean(Sxx_ampli**2, axis=1))\r\n\r\n return S_ampli_mean", "def mean_wave_period(F, f, df):\n return np.sum(F * df) / np.sum(F * f * df)", "def movavg(ave_list, length, value):\n ave_list.append(value)\n if length < len(ave_list):\n del ave_list[0]\n value = sum(ave_list)\n return value / len(ave_list)", "def get_average(data):\n average = sum(data) / len(data)\n\n return average", "def RSI(df,window):\n # Window length for moving average\n window_length = window\n\n # Dates\n start = '2014-02-31'\n end = '2019-12-31'\n\n # Get data\n data = df\n # Get just the adjusted close\n close = data['Adj Close']\n # Get the difference in price from previous step\n delta = close.diff()\n # Get rid of the first row, which is NaN since it did not have a previous \n # row to calculate the differences\n delta = delta[1:] \n\n # Make the positive gains (up) and negative gains (down) Series\n up, down = delta.copy(), delta.copy()\n up[up < 0] = 0\n down[down > 0] = 0\n\n # Calculate the EWMA\n roll_up1 = up.ewm(span=window_length).mean()\n roll_down1 = down.abs().ewm(span=window_length).mean()\n\n # Calculate the RSI based on EWMA\n RS1 = roll_up1 / roll_down1\n RSI1 = 100.0 - (100.0 / (1.0 + RS1))\n\n # Calculate the SMA\n #roll_up2 = up.rolling(window_length).mean()\n #roll_down2 = down.abs().rolling(window_length).mean()\n\n # Calculate the RSI based on SMA\n #RS2 = roll_up2 / roll_down2\n #RSI2 = 100.0 - (100.0 / (1.0 + RS2))\n\n # Compare graphically\n #plt.figure(figsize=(8, 6))\n #RSI1.plot()\n #RSI2.plot()\n #plt.legend(['RSI via EWMA', 'RSI via SMA'])\n #plt.show()\n df['RSI'] = RSI1\n return df", "def obtain_monthly_mean(data=pd.DataFrame()):\n return data.resample(\"MS\").mean()", "def stddev_from_moving_average(timeseries):\r\n series = pandas.Series([x[1] for x in timeseries])\r\n expAverage = pandas.stats.moments.ewma(series, com=50)\r\n stdDev = pandas.stats.moments.ewmstd(series, com=50)\r\n\r\n return abs(series.iget(-1) - expAverage.iget(-1)) > 3 * stdDev.iget(-1)", "def standardize_ts(a, scale=1.0):\n stds = np.std(a, axis=0, keepdims=True)\n stds[stds==0] = 1\n return (a - np.mean(a, axis=0, keepdims=True))/(scale*stds)", "def average(data):\r\n sum =0\r\n for i in data:\r\n sum+=i\r\n return sum/len(data)", "def _get_mean(self, sums, step):\n\n return sums/step" ]
[ "0.7370013", "0.73070514", "0.72123134", "0.72123134", "0.71889323", "0.71044785", "0.69947493", "0.6878451", "0.6826726", "0.67439926", "0.6694565", "0.6685296", "0.6657218", "0.6633062", "0.6631047", "0.6587248", "0.6556056", "0.6541206", "0.6492296", "0.64602447", "0.64139616", "0.64041024", "0.6383691", "0.6383059", "0.63778275", "0.63331485", "0.6326961", "0.63199466", "0.6312502", "0.63074315", "0.62883997", "0.62861407", "0.62489724", "0.62448204", "0.61740315", "0.6156367", "0.615364", "0.6143905", "0.6141203", "0.6140302", "0.6111639", "0.6105259", "0.60975266", "0.60880053", "0.6085004", "0.6083677", "0.60485715", "0.6031658", "0.60270095", "0.60028815", "0.5981092", "0.5980416", "0.5952859", "0.5944471", "0.5941722", "0.59092695", "0.58862746", "0.5884395", "0.5878335", "0.586721", "0.58606666", "0.58578", "0.58549535", "0.5849494", "0.5838971", "0.5827394", "0.5819184", "0.5818194", "0.5810713", "0.5810699", "0.580814", "0.58045495", "0.5794157", "0.57907724", "0.57829154", "0.5727449", "0.57176965", "0.5716896", "0.5716855", "0.5712899", "0.5661197", "0.5660801", "0.5653265", "0.56524396", "0.5643835", "0.5635104", "0.5632598", "0.5629049", "0.5624502", "0.56114084", "0.5589772", "0.5589304", "0.558616", "0.5585977", "0.55841255", "0.557449", "0.55743253", "0.55707645", "0.5550667", "0.55492735" ]
0.8110601
0
Average True Range Is a lagging indicator, used to provide insights into volatility.
def AverageTrueRange(self, timeperiod = 14): return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_true_range(self, period=14):\n tr = self._true_range_computation(period=period * 2)\n return pd.Series(tr.rolling(center=False, window=period,\n min_periods=period - 1).mean(),\n name='{} day ATR Ticker: {}'.format(period,\n self.ticker)).tail(\n period)", "def should_average(self):\n return self._should_average", "def rollingAvg( lag, oldSet ):\r\n\r\n newSet = []\r\n\r\n # insert lag-1 number of nans at beginning of list\r\n for i in range(0, lag - 1):\r\n newSet.append(Decimal('nan'))\r\n\r\n # calculate new values for list\r\n for i in range((lag - 1), len(oldSet)):\r\n sum = 0\r\n for j in range(lag):\r\n sum += oldSet[i - j]\r\n\r\n avg = sum / Decimal(lag)\r\n newSet.append(Decimal(avg))\r\n\r\n return newSet", "def atr(df, lag, normalize=False):\n\n def _true_range(window):\n divisor = (1.0 * float(not normalize)) + ((float(normalize) * window[-1][\"c\"]))\n\n tr1 = window[-1][\"h\"] - window[-1][\"l\"]\n tr2 = window[-1][\"h\"] - window[-2][\"c\"]\n tr3 = window[-1][\"l\"] - window[-2][\"c\"]\n return max(tr1, tr2, tr3) / divisor\n\n def _sma(window):\n avg = round(reduce(lambda a, b: a + b, window) / len(window), 2)\n return avg\n\n tr = [_true_range(df[i : i + 2]) for i in range(len(df) - 1)]\n return [_sma(tr[i : i + lag + 1]) for i in range(len(tr) - lag)]", "def get_avg_range(range_array):\n # Average the ranges\n range_count = 0\n range_accum = 0.0\n\n if range_array:\n # Accumulate the data\n for beam in range(len(range_array)):\n if range_array[beam] > 0.0 and not Ensemble.is_bad_velocity(range_array[beam]):\n range_count += 1\n range_accum += range_array[beam]\n\n if range_count > 0:\n return range_accum / range_count\n else:\n return 0.0", "def AverageTrueRangeStopLoss(self, timeperiod = 14, multiplier = 2):\r\n stopLoss = ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)\r\n \r\n plus_dm = ta.PLUS_DM(self.data.high,self.data.low, timeperiod)\r\n minus_dm = ta.MINUS_DM(self.data.high,self.data.low, timeperiod)\r\n \r\n if plus_dm > minus_dm:\r\n stopLoss = self.data.close - multiplier * stopLoss\r\n else:\r\n stopLoss = self.data.close + multiplier * stopLoss\r\n \r\n\r\n stopLoss.dropna(inplace=True) \r\n \r\n return stopLoss", "def conditional_mean(self, gp):\n raise NotImplementedError", "def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower", "def __call__(self, x):\n return np.mean(self.observations <= x)", "def average_age(self, start=1, end=None):\n picks = self.pick_set.filter(number__gte=start)\n if end is not None:\n picks = picks.filter(number__lte=end)\n\n dt = datetime.date(self.year, 1, 1)\n ages = [e.player.age(dt) for e in picks]\n ages = [e for e in ages if e]\n average = sum(ages) / len(ages)\n return average", "def step_change(data, span=10, lag=1):\n moving_average = data.ewm(span=span).mean()\n lagged = pd.Series(np.append(np.repeat(np.nan, lag), moving_average[:len(moving_average)-lag]))\n diffs = data[lag:] - lagged\n pct_diff = diffs/moving_average\n max_diff = max(pct_diff)\n mean_diff = np.mean(pct_diff)\n return moving_average, diffs, pct_diff, max_diff, avg_diff", "def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def slg_average(df,start_year,end_year,bat_met,player_name):\n base_fields = ['AB','HR','X3B','X2B','SLG']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n return round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n SLG = round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n del df['X1B']\n return SLG", "def mean_error_rate(y_true, y_interval):\n _check_interval_array(y_interval)\n\n wrong_intervals = ((y_true < y_interval[:, 0]) | (y_true > y_interval[:, 1])).sum()\n\n return wrong_intervals / y_true.shape[0]", "def average(self):\n return (self.current + self.last) / 2.0", "def _get_acc(logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n predictions = logits > 0\n target_bool = target > 0.5\n return (predictions == target_bool).float().mean()", "def mean_in_range(arr, args):\n mn = np.mean(arr)\n res = mn > args[0] and mn < args[1]\n return ct.Result(res, 'mean_in_range')", "def calc_meanadiff(sig):\n\n return np.mean(abs(np.diff(sig)))", "def load_average(self):\n return _favg(self.load_samples)", "def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average", "def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()", "def moving_avg(df, key, lag):\n\n def _sma(key, window):\n values = list(map(lambda w: w[key], window))\n avg = round(reduce(lambda a, b: a + b, values) / len(values), 2)\n return avg\n\n return [_sma(key, df[i : i + lag + 1]) for i in range(len(df) - lag)]", "def soft_acc(self, y_true, y_pred):\n try:\n score= backend.mean(backend.abs(y_true - y_pred) <= self.prediction_tolerance)\n except Exception:\n pass\n return score", "def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()", "def mean(vals):", "def test_avg_l(self):\n u_spec = leabra.UnitSpec(g_bar_e=0.3, g_bar_l=0.3, g_bar_i=1.0)\n u = leabra.Unit(spec=u_spec)\n\n for _ in range(20):\n u.add_excitatory(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n self.assertEqual(u.avg_l, 0.40)\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(0.52, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 0.52 is the value of emergent\n\n for _ in range(100):\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(1.64, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 1.64 is the value of emergent", "def mean_average_position():\n pass", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def gen_sensor_reward(self,MAX_UNCERTAINTY,window_size,window_lag):\n\n for i in range(0, len(self.tracker_object.tracks)):\n unormalized_uncertainty = np.sum(self.tracker_object.tracks[i].p_k_k.diagonal())\n self.uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty)\n\n\n this_uncertainty = []\n [this_uncertainty.append(self.uncertainty[x][-1]) for x in range(0, len(self.tracker_object.tracks))]\n\n self.avg_uncertainty.append(np.mean(this_uncertainty))\n\n if len(self.avg_uncertainty) < window_size + window_lag:\n self.reward.append(0)\n else:\n current_avg = np.mean(self.avg_uncertainty[-window_size:])\n prev_avg = np.mean(self.avg_uncertainty[-(window_size + window_lag):-window_lag])\n if current_avg < prev_avg or self.avg_uncertainty[-1] < .1:\n # if current_avg < prev_avg:\n self.reward.append(1)\n else:\n self.reward.append(0)", "def absorbance( self, lmin=0, lmax=0 ):\n A = self.prop[\"SPEC\"][:,1]\n if lmax>0:\n m = np.vstack( (self.wavelength(), A) ).T # matrix w lambda and absorbance\n m = m[ m[:,0] >= lmin ] # slice by wavelength...\n m = m[ m[:,0] <= lmax ]\n return np.average( m[:,1] ) # scalar\n return A # array", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def forward_avg(array_in):\n return (array_in[:-1] + array_in[1:]) * 0.5", "def get_info_gain(true_rows, false_rows, current_impurity):\n avg_impurity = (len(true_rows)/(len(true_rows)+len(false_rows))) * get_gini(true_rows) + \\\n (len(false_rows)/(len(true_rows)+len(false_rows))) * get_gini(false_rows)\n return current_impurity - avg_impurity", "def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_iter = filter(lambda x: x > avg, data) # returns iterator for data above the avg\n\n print \"values strictly above average are:\", list(above_avg_iter)", "def conditional_mean(self, F):\n raise NotImplementedError", "def angle_error(self, y_true, y_pred):\n diff = self.angle_difference(K.argmax(y_true), K.argmax(y_pred))\n return K.mean(K.cast(K.abs(diff), K.floatx()))", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def mean(self):\n return self.x_guessed()", "def average_age():\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n ages = []\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n age = row[\"Age_ses1\"]\n if not math.isnan(age):\n ages.append(age)\n\n print(\"------ Age ------\")\n print_stats(ages)", "def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])", "def force_averaging(self):\n return self._force_averaging", "def age_avg_adopt():\n \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n\n train = pd.read_csv('./data/train.csv')\n \n # Convert age from months to years\n train.loc[train['Age'] > -1, 'Age'] = (train['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train.loc[train['Type'] == 1, ['State','Type', 'Age', 'AdoptionSpeed']]\n cat_df = train.loc[train['Type'] == 2, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_avg = []\n \n cat_age_labels = [] \n cat_avg = []\n \n\n # Find dog average adoption speed by age\n for i in range(dog_min_age, dog_max_age + 1) :\n \n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_avg.append(dog_df.loc[dog_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n dog_age_labels.append(i)\n\n # Plot bar graphs\n yticks_index = list(range(5))\n \n plt.figure(num = None, figsize=(6.5,4.5),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n \n index = index[0:13]\n dog_age_labels = dog_age_labels[0:13]\n dog_avg = dog_avg[0:13]\n \n plt.bar(index, dog_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Dog Average Adoption Speed for Each Age')\n plt.savefig('bardogAvg.png', bbox_inches='tight')\n\n # Find cat average adoption speed by age\n for i in range(cat_min_age, cat_max_age + 1) :\n \n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_avg.append(cat_df.loc[cat_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n cat_age_labels.append(i)\n\n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n \n \n index = index[0:11]\n cat_age_labels = cat_age_labels[0:11]\n cat_avg = cat_avg[0:11]\n \n plt.bar(index, cat_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Cat Average Adoption Speed for Each Age')\n plt.savefig('barcatAvg.png', bbox_inches='tight')", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ", "def belief_mean(self) -> types.StatesTorch:\n return self._belief_mean", "def compute_lagged_ma(df, lag=48, window=48, min_periods=48):\n lagged_ma = (\n df[[\"store\", \"sales\"]]\n .groupby(\"store\")[\"sales\"]\n .rolling(window, min_periods=min_periods)\n .sum()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n lagged_open = (\n df[[\"store\", \"open\"]]\n .groupby(\"store\")[\"open\"]\n .rolling(window, min_periods=min_periods)\n .sum()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n return lagged_ma / lagged_open", "def lag(self):\n self._assert_counted_at_lag()\n return self._lag", "def get_average_survival(self):\n return np.mean(self.survival_rates)", "def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val", "def bollinger_lband_indicator(close, n=20, ndev=2, fillna=False):\n df = pd.DataFrame([close]).transpose()\n mavg = close.rolling(n).mean()\n mstd = close.rolling(n).std()\n lband = mavg - ndev * mstd\n df['lband'] = 0.0\n df.loc[close < lband, 'lband'] = 1.0\n lband = df['lband']\n if fillna:\n lband = lband.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(lband, name='bbilband')", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def angle_error_regression(y_true, y_pred):\n return K.mean(angle_difference(y_true * 360, y_pred * 360))", "def mean(self):\n return self.vmean", "def AvgFilter(self, number):\n\n alpha = (self.k-1)/self.k\n avg = alpha*self.prevAvg + (1-alpha)*number\n\n self.prevAvg = avg\n self.k = self.k + 1\n\n return avg", "def dif_avg(u_beam):\n u = np.sort(u_beam)[::-1]\n# print(u)\n ind = u.shape[0]//100*5\n top5 = np.mean(u[:ind])\n# bottom5 = np.mean(u[-ind:])\n mean_wo_top5 = np.mean(u[ind:])\n return top5/mean_wo_top5", "def isendofheated(self,lag):\n kmax = self.n\n v1 = self.v1\n v2 = self.v2\n for k in range(kmax-1):\n if lag[k+1]>=(v2+v1)/(v2-v1) * lag[k]:\n return False\n return True", "def advantages(self, gamma, lam):\n values = self.value_predictions()\n result = np.zeros([self.num_steps, self.batch_size], dtype=np.float32)\n current = np.zeros([self.batch_size], dtype=np.float32)\n for t in range(self.num_steps - 1, -1, -1):\n delta = self.rews[t] - values[t]\n delta += (1 - self.dones[t + 1]) * gamma * values[t + 1]\n current *= gamma * lam\n current += delta\n result[t] = current\n current *= (1 - self.dones[t])\n return result", "def rsi(df, lag):\n\n def avg_gain():\n gains = [\n df[i][\"c\"] - df[i - 1][\"c\"] if df[i][\"c\"] >= df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_gain = [sum(gains[:lag]) / float(lag)]\n [avg_gain.append(((avg_gain[-1] * 13) + gain) / 14.0) for gain in gains[lag:]]\n return avg_gain\n\n def avg_loss():\n losses = [\n abs(df[i][\"c\"] - df[i - 1][\"c\"]) if df[i][\"c\"] < df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_loss = [sum(losses[:lag]) / float(lag)]\n [avg_loss.append(((avg_loss[-1] * 13) + loss) / 14.0) for loss in losses[lag:]]\n return avg_loss\n\n gains = avg_gain()\n losses = avg_loss()\n\n raw_rsi = [\n round(100 - (100 / (1 + (gains[i] / losses[i]))), 2) for i in range(len(gains))\n ]\n df = df[-1 * len(raw_rsi) :]\n\n return [raw_rsi[i] for i in range(len(df))]", "def average(data):\n return np.average(data)", "def av(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.mean('year')", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def trend_indicators(df):\n p = \"trend_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ADX\n i = ADXIndicator(high, low, close, window=40)\n df[p + \"adx_40_neg\"] = i.adx_neg()\n # ARN\n i = AroonIndicator(close, window=50)\n df[p + \"arn_50\"] = i.aroon_indicator()\n # CCI\n i = CCIIndicator(high, low, close, window=70)\n df[p + \"cci_70\"] = i.cci()\n # DPO\n i = DPOIndicator(close, window=100)\n df[p +\n \"dpo_100_log\"] = list(map(lambda x: uf.log_abs(x, zeros=True), i.dpo()))\n # KST\n i = KSTIndicator(close)\n df[p + \"kst_sig_log\"] = list(map(lambda x: uf.log_abs(x,\n zeros=True), i.kst_sig()))\n # MACD\n i = MACD(close, 12, 16, 34)\n df[p + \"macd_12_signal_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), i.macd_signal()))\n # SMA\n i = SMAIndicator(close, window=50) # 50\n\n sma_50_rate = uf.get_rate(i.sma_indicator())\n df[p + \"sma_50_rate_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_rate))\n\n sma_50_diff = list(map(lambda s, c: uf.none_subtraction(\n s, c), i.sma_indicator(), close))\n df[p + \"sma_50_diff_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_diff))\n\n i = SMAIndicator(close, window=200)\n\n sma_200_diff = list(\n map(lambda s, c: uf.none_subtraction(s, c), i.sma_indicator(), close))\n sma_200_diff_o_close = list(\n map(lambda s, c: s / c, sma_200_diff, close))\n df[p + \"sma_200_diff_o_close_log\"] = list(\n map(lambda x: uf.log_abs(x * 100, zeros=True), sma_200_diff_o_close))\n # STC\n i = STCIndicator(close, 100, 200, 50)\n df[p + \"stc_50_2\"] = i.stc()\n # TRIX\n i = TRIXIndicator(close, window=20)\n df[p + \"trix_20_log\"] = list(map(lambda x: uf.log_abs(x * 1000), i.trix()))\n # VI\n i = VortexIndicator(high, low, close, window=50)\n df[p + \"vi_50_amp\"] = list(map(lambda x: uf.log_abs(x *\n 1000, zeros=True), i.vortex_indicator_diff()))\n\n return df", "def get_mean(self, X):\n raise NotImplementedError", "def _compute_mean_std(self, history, window=28):\n history = np.array(history[-window - 1: -1])\n decay_weights = [self.decay ** a for a in range(len(history), 0, -1)]\n weighted = history * decay_weights\n mean = weighted.mean()\n std = weighted.std()\n return mean, std", "def ATR(stockData , ticker, n):\n start = dt.datetime(2020, 1, 1)\n data = pdr.get_data_yahoo(ticker, start)\n\n high_low = data['High'] - data['Low']\n high_close = np.abs(data['High'] - data['Close'].shift())\n low_close = np.abs(data['Low'] - data['Close'].shift())\n\n ranges = pd.concat([high_low, high_close, low_close], axis=1)\n true_range = np.max(ranges, axis=1)\n\n atr = true_range.rolling(n).sum() / n\n\n\n # Returns the Average True Range dataframe / with the dates.\n return atr", "def low_abundance(self, cutoff=10):\n data = json.loads(self.profile)\n\n checks = [mean(v) > cutoff for _, v in data[\"data\"].items()]\n\n return not any(checks)", "def get_indicator(self, window_size=5):\n self._indicator = pd.DataFrame(self._stock['Close'].rolling(window_size).mean())\n return self._indicator", "def gamma(self, predictor):\n pred = predictor(self.X)\n pred_mean = pred.mean()\n\n self.tags[_PREDICTION] = pred\n expect_event = self.tags.groupby(_EVENT).mean()\n expect_group_event = self.tags.groupby(\n [_EVENT, _GROUP_ID]).mean()\n\n neg = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 0.0)].groupby([_EVENT]).mean()\n pos = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 1.0)].groupby([_EVENT]).mean()\n # print(pos)\n expect_group_event.loc[('label=1.0', 1), 'pred'] = (pred_mean + self.delta[1] * (pos.loc['label=1.0', 'pred'] - pos.loc['label=0.0', 'pred'])) / 2\n expect_group_event.loc[('label=0.0', 1), 'pred'] = (pred_mean + self.delta[1] * (pos.loc['label=0.0', 'pred'] - pos.loc['label=1.0', 'pred'])) / 2\n\n expect_group_event.loc[('label=1.0', 0), 'pred'] = (pred_mean + self.delta[0] * (neg.loc['label=1.0', 'pred'] - neg.loc['label=0.0', 'pred'])) / 2\n expect_group_event.loc[('label=0.0', 0), 'pred'] = (pred_mean + self.delta[0] * (neg.loc['label=0.0', 'pred'] - neg.loc['label=1.0', 'pred'])) / 2\n\n expect_event = expect_group_event.groupby(_EVENT).mean()\n expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n\n # expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n g_unsigned = expect_group_event[_DIFF]\n g_signed = pd.concat([g_unsigned, -g_unsigned],\n keys=[\"+\", \"-\"],\n names=[_SIGN, _EVENT, _GROUP_ID])\n self._gamma_descr = str(expect_group_event[[_PREDICTION, _DIFF]])\n return g_signed", "def mean_interval(self, name, alpha=_alpha, **kwargs):\n data = self.get(name,**kwargs)\n return ugali.utils.stats.mean_interval(data,alpha)", "def _individual_old_age_insurance(self, total_salary):\n return self.individual_old_age_insurance_rate * total_salary", "def gmean_diff(self) -> float:\n sim_log = np.log1p(self.predicted)\n obs_log = np.log1p(self.true)\n return float(np.exp(gmean(sim_log) - gmean(obs_log)))", "def get_rolling_mean(values, window = 20):\n\treturn values.rolling(window, center=False).mean()", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def __call__(self, y_pred, y_true):\n dy = y_pred - y_true\n n = self.quantiles.size()[0]\n qs = self.quantiles.reshape((n,) + (1,) * max(len(dy.size()) - 2, 0))\n l = torch.where(dy >= 0.0, (1.0 - qs) * dy, (-qs) * dy)\n if self.mask:\n l = torch.where(y_true == self.mask, torch.zeros_like(l), l)\n return l.mean()", "def compute_lagged_ewma(\n df, lag=48, com=None, span=None, halflife=None, alpha=None, min_periods=50\n):\n lagged_ma = (\n df[[\"store\", \"sales\"]]\n .assign(sales=lambda x: x[\"sales\"].replace(0, np.nan))\n .groupby(\"store\")[\"sales\"]\n .ewm(\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n ignore_na=True,\n )\n .mean()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n return lagged_ma", "def calc_meanad(sig):\n m = np.mean(sig)\n diff = [abs(x-m) for x in sig]\n\n return np.mean(diff)", "def air_range(self) -> Union[int, float]:\n return self.air_weapon and self.air_weapon.range", "def cummean(a):\r\n return np.true_divide(np.cumsum(a), range(1, len(a) + 1))", "def flat_accuracy(valid_tags, pred_tags):\n\n return (np.array(valid_tags) == np.array(pred_tags)).mean()", "def accuracy_with_gap(y_true, y_pred, gap):\n true_predictions = 0\n for i in range(len(y_pred)):\n if abs(y_pred[i] - y_true[i]) <= gap:\n true_predictions += 1\n return true_predictions/len(y_true)", "def find_starts(config, data):\n\n trigger = butter_bandpass_filter(\n data, config.bandpass_lower, config.bandpass_upper,\n config.sampling_rate, 6)\n trigger = np.absolute(trigger)\n trigger = butter_lowpass_filter(\n trigger, config.lowpass_freq, config.sampling_rate, 6)\n\n # transient = 0.0005\n # start_idx = int(transient * config.sampling_rate)\n start_idx = 0\n average = np.average(trigger[start_idx:])\n maximum = np.max(trigger[start_idx:])\n minimum = np.min(trigger[start_idx:])\n middle = (np.max(trigger[start_idx:]) - min(trigger[start_idx:])) / 2\n if average < 1.1 * middle:\n print()\n print(\"Adjusting average to avg + (max - avg) / 2\")\n average = average + (maximum - average) / 2\n offset = -int(config.trigger_offset * config.sampling_rate)\n\n if config.trigger_rising:\n trigger_fn = lambda x, y: x > y\n else:\n trigger_fn = lambda x, y: x < y\n\n # The cryptic numpy code below is equivalent to looping over the signal and\n # recording the indices where the trigger crosses the average value in the\n # direction specified by config.trigger_rising. It is faster than a Python\n # loop by a factor of ~1000, so we trade readability for speed.\n trigger_signal = trigger_fn(trigger, average)[start_idx:]\n starts = np.where((trigger_signal[1:] != trigger_signal[:-1])\n * trigger_signal[1:])[0] + start_idx + offset + 1\n if trigger_signal[0]:\n starts = np.insert(starts, 0, start_idx + offset)\n\n # plt.plot(data)\n # plt.plot(trigger*100)\n # plt.axhline(y=average*100)\n # plt.show()\n\n return starts, trigger, average", "def ram_average(self):\n return _favg(self.ram_samples)", "def __LAI(NDVI, vegt_cover):\n\n LAI_1 = np.log(-(vegt_cover - 1)) / -0.45\n LAI_1[LAI_1 > 8] = 8.0\n LAI_2 = (9.519 * np.power(NDVI, 3) + 0.104 * np.power(NDVI, 2) +\n 1.236 * NDVI - 0.257)\n\n LAI = (LAI_1 + LAI_2) / 2.0 # Average LAI\n LAI[LAI < 0.001] = 0.001\n return LAI", "def showAverageGainWon(self) :\n averageGainWon = 0\n for level in self.level_history :\n averageGainWon += level.profit\n averageGainWon = averageGainWon/len(self.level_history)\n Scenario.messageGetAverageGainWon(averageGainWon)", "def get_moving_average(close, span):\n i = SMAIndicator(close, window=span)\n return i.sma_indicator()", "def avgX(self):\n return np.mean(self.getx())", "def mean_STD(self,counter):\n \n \n pass", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def get_avg_points(self):\n pass", "def get_avg_trend(y, filter='ewm', a=0.015, verbose =1, resample_interval='60s', fill_missing=False, title= '' , note= ''):\n\n # Two-way EWMA averaging\n ts_mean1, ts_std1 = smoothing(y, filter=filter, a=a)\n\n reversed_y = y.iloc[::-1]\n ts_mean2, ts_std2 = smoothing(reversed_y, filter=filter,a=a)\n ts_mean2 = ts_mean2.iloc[::-1]\n ts_std2 = ts_std2.iloc[::-1]\n\n ts_mean = (ts_mean1 + ts_mean2)/2\n ts_std = (ts_std1 + ts_std2)/2\n\n\n # Resample the daily trend by calculating the median of a resampling slice. mean can also be used.\n trend = ts_mean.resample(resample_interval).mean()\n ts_std = ts_std.resample(resample_interval).mean()\n\n # Fill up the missing trend samples if exist, by propagating the last valid\n if fill_missing: #rolling filter introduce Nan at the head or tail..\n trend.fillna(method='ffill', inplace=True, limit=2) #fill the end\n trend.fillna(method='bfill', inplace=True, limit=2) #fill the start\n\n\n\n if verbose>=1:\n t = title if title is not None else 'Average Trend'\n\n fig = plt.gcf()\n\n plt.plot(y[::1+y.shape[0]// 2000], alpha=.5)\n ax = trend.plot()\n ax.fill_between(trend.index, trend - 2 * ts_std, trend + 2 * ts_std,\n alpha=.25)\n ax.legend(['Orignal', 'Trend', 'std'])\n plt.text(ax.get_xlim()[0], ax.get_ylim()[0] + 50, note)\n plt.title(t)\n plt.show()\n\n import matplotlib.dates as mdates\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))\n plt.tight_layout()\n\n fig.savefig('./output/trends/'+t + '.pdf')\n plt.close(fig)\n\n return trend", "def _mean_diff(x, y):\n return np.mean(x) - np.mean(y)", "def calc_mean(sig):\n # m = mean(sig)\n return np.mean(sig)", "def mean(self):\n return self.cond_proba.mean", "def moving_average(data, temporal_window=100):\n window = np.ones(temporal_window) / temporal_window\n return np.convolve(data, window, 'valid')", "def volatility_indicators(df):\n p = \"volatility_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ATR\n atr = AverageTrueRange(high, low, close, 14)\n df[p + \"atr_14\"] = atr.average_true_range()\n df[p + \"atr_o_close\"] = list(map(lambda a,\n c: a / c, df[p + \"atr_14\"], close))\n # BB\n bb = BollingerBands(close, window=10, window_dev=2)\n df[p + \"bb_wband_10\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=100, window_dev=2)\n df[p + \"bb_pband_100\"] = bb.bollinger_pband()\n\n bb = BollingerBands(close, window=200, window_dev=2)\n df[p + \"bb_wband_200\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=20, window_dev=2)\n df[p + \"bb_hband_o_close\"] = list(map(lambda l,\n c: (l - c) / c, bb.bollinger_hband(), close))\n\n # DC\n dc = DonchianChannel(high, low, close, window=50)\n df[p + \"dc_pband_50\"] = dc.donchian_channel_pband()\n dc = DonchianChannel(high, low, close, window=10)\n df[p + \"dc_wband_10\"] = dc.donchian_channel_wband()\n # KC\n kc = KeltnerChannel(high, low, close, window=50)\n df[p + \"pband_50\"] = kc.keltner_channel_pband()\n kc = KeltnerChannel(high, low, close, window=20)\n df[p + \"wband_20\"] = kc.keltner_channel_wband()\n # UI\n ui = UlcerIndex(close, window=30)\n df[p + \"ui_30\"] = ui.ulcer_index()\n return df", "def gamma(self, predictor):\n pred = predictor(self.X)\n self.tags[_PREDICTION] = pred\n expect_event = self.tags.groupby(_EVENT).mean()\n expect_group_event = self.tags.groupby(\n [_EVENT, _GROUP_ID]).mean()\n\n num_grp = len(self.error_rate)\n tprs = [0 for _ in range(num_grp)]\n # print(expect_group_event)\n for i in range(num_grp):\n tprs[i] = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == i)].groupby([_EVENT]).mean()\n expect_group_event.loc[('label=1', i), 'pred'] = (1 - self.error_rate[i][0]) * tprs[i].loc['label=1', 'pred'] + self.error_rate[i][0] * tprs[i].loc['label=0', 'pred']\n expect_group_event.loc[('label=0', i), 'pred'] = (1 - self.error_rate[i][1]) * tprs[i].loc['label=0', 'pred'] + self.error_rate[i][1] * tprs[i].loc['label=1', 'pred']\n\n # neg = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 0.0)].groupby([_EVENT]).mean()\n # pos = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 1.0)].groupby([_EVENT]).mean()\n\n # expect_group_event.loc[('label=1.0', 1), 'pred'] = (1 - self.error_rate[1][0]) * pos.loc['label=1.0', 'pred'] + self.error_rate[1][1] * pos.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 1), 'pred'] = (1 - self.error_rate[1][1]) * pos.loc['label=0.0', 'pred'] + self.error_rate[1][0] * pos.loc['label=1.0', 'pred']\n\n # expect_group_event.loc[('label=1.0', 0), 'pred'] = (1 - self.error_rate[0][0]) * neg.loc['label=1.0', 'pred'] + self.error_rate[0][1] * neg.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 0), 'pred'] = (1 - self.error_rate[0][1]) * neg.loc['label=0.0', 'pred'] + self.error_rate[0][0] * neg.loc['label=1.0', 'pred']\n\n expect_event = expect_group_event.groupby(_EVENT).mean()\n expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n\n # expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n g_unsigned = expect_group_event[_DIFF]\n g_signed = pd.concat([g_unsigned, -g_unsigned],\n keys=[\"+\", \"-\"],\n names=[_SIGN, _EVENT, _GROUP_ID])\n self._gamma_descr = str(expect_group_event[[_PREDICTION, _DIFF]])\n return g_signed" ]
[ "0.6600089", "0.6148675", "0.6051582", "0.59976155", "0.57290894", "0.5724756", "0.5678576", "0.56369007", "0.55565417", "0.5463608", "0.5455246", "0.5452944", "0.54398197", "0.5430366", "0.5412434", "0.53751826", "0.5327749", "0.5327182", "0.5326094", "0.52998155", "0.52757585", "0.52621", "0.5249347", "0.5246109", "0.5239227", "0.5238913", "0.52334976", "0.52284217", "0.52228266", "0.52228266", "0.5218736", "0.5218178", "0.5217545", "0.52149194", "0.5202403", "0.51936007", "0.5183227", "0.51738185", "0.5168707", "0.5159302", "0.51430225", "0.51364064", "0.5136299", "0.512962", "0.51135284", "0.5106295", "0.5104564", "0.51022875", "0.5079198", "0.5075643", "0.5072617", "0.5052022", "0.5039352", "0.50357074", "0.5032805", "0.5023554", "0.5021878", "0.50209004", "0.50206774", "0.50203544", "0.50193274", "0.5011841", "0.50080687", "0.50050175", "0.5002423", "0.50018674", "0.50000685", "0.4999536", "0.49885947", "0.49867088", "0.49854943", "0.4983263", "0.49830496", "0.49753264", "0.49698424", "0.49689937", "0.49661228", "0.4965047", "0.49603522", "0.49583504", "0.49581888", "0.49508366", "0.49453616", "0.49398616", "0.49376535", "0.49330312", "0.49309635", "0.49250388", "0.49125448", "0.4910416", "0.49038148", "0.49036574", "0.49028146", "0.48962948", "0.48955396", "0.48929453", "0.48899376", "0.4888026", "0.48847646", "0.48843673" ]
0.6382062
1
Average True Range Is a lagging indicator, used to provide insights into volatility.
def AverageTrueRangeStopLoss(self, timeperiod = 14, multiplier = 2): stopLoss = ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod) plus_dm = ta.PLUS_DM(self.data.high,self.data.low, timeperiod) minus_dm = ta.MINUS_DM(self.data.high,self.data.low, timeperiod) if plus_dm > minus_dm: stopLoss = self.data.close - multiplier * stopLoss else: stopLoss = self.data.close + multiplier * stopLoss stopLoss.dropna(inplace=True) return stopLoss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_true_range(self, period=14):\n tr = self._true_range_computation(period=period * 2)\n return pd.Series(tr.rolling(center=False, window=period,\n min_periods=period - 1).mean(),\n name='{} day ATR Ticker: {}'.format(period,\n self.ticker)).tail(\n period)", "def AverageTrueRange(self, timeperiod = 14):\r\n \r\n return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)", "def should_average(self):\n return self._should_average", "def rollingAvg( lag, oldSet ):\r\n\r\n newSet = []\r\n\r\n # insert lag-1 number of nans at beginning of list\r\n for i in range(0, lag - 1):\r\n newSet.append(Decimal('nan'))\r\n\r\n # calculate new values for list\r\n for i in range((lag - 1), len(oldSet)):\r\n sum = 0\r\n for j in range(lag):\r\n sum += oldSet[i - j]\r\n\r\n avg = sum / Decimal(lag)\r\n newSet.append(Decimal(avg))\r\n\r\n return newSet", "def atr(df, lag, normalize=False):\n\n def _true_range(window):\n divisor = (1.0 * float(not normalize)) + ((float(normalize) * window[-1][\"c\"]))\n\n tr1 = window[-1][\"h\"] - window[-1][\"l\"]\n tr2 = window[-1][\"h\"] - window[-2][\"c\"]\n tr3 = window[-1][\"l\"] - window[-2][\"c\"]\n return max(tr1, tr2, tr3) / divisor\n\n def _sma(window):\n avg = round(reduce(lambda a, b: a + b, window) / len(window), 2)\n return avg\n\n tr = [_true_range(df[i : i + 2]) for i in range(len(df) - 1)]\n return [_sma(tr[i : i + lag + 1]) for i in range(len(tr) - lag)]", "def get_avg_range(range_array):\n # Average the ranges\n range_count = 0\n range_accum = 0.0\n\n if range_array:\n # Accumulate the data\n for beam in range(len(range_array)):\n if range_array[beam] > 0.0 and not Ensemble.is_bad_velocity(range_array[beam]):\n range_count += 1\n range_accum += range_array[beam]\n\n if range_count > 0:\n return range_accum / range_count\n else:\n return 0.0", "def conditional_mean(self, gp):\n raise NotImplementedError", "def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower", "def __call__(self, x):\n return np.mean(self.observations <= x)", "def average_age(self, start=1, end=None):\n picks = self.pick_set.filter(number__gte=start)\n if end is not None:\n picks = picks.filter(number__lte=end)\n\n dt = datetime.date(self.year, 1, 1)\n ages = [e.player.age(dt) for e in picks]\n ages = [e for e in ages if e]\n average = sum(ages) / len(ages)\n return average", "def step_change(data, span=10, lag=1):\n moving_average = data.ewm(span=span).mean()\n lagged = pd.Series(np.append(np.repeat(np.nan, lag), moving_average[:len(moving_average)-lag]))\n diffs = data[lag:] - lagged\n pct_diff = diffs/moving_average\n max_diff = max(pct_diff)\n mean_diff = np.mean(pct_diff)\n return moving_average, diffs, pct_diff, max_diff, avg_diff", "def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def slg_average(df,start_year,end_year,bat_met,player_name):\n base_fields = ['AB','HR','X3B','X2B','SLG']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n return round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n SLG = round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n del df['X1B']\n return SLG", "def mean_error_rate(y_true, y_interval):\n _check_interval_array(y_interval)\n\n wrong_intervals = ((y_true < y_interval[:, 0]) | (y_true > y_interval[:, 1])).sum()\n\n return wrong_intervals / y_true.shape[0]", "def average(self):\n return (self.current + self.last) / 2.0", "def _get_acc(logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n predictions = logits > 0\n target_bool = target > 0.5\n return (predictions == target_bool).float().mean()", "def mean_in_range(arr, args):\n mn = np.mean(arr)\n res = mn > args[0] and mn < args[1]\n return ct.Result(res, 'mean_in_range')", "def calc_meanadiff(sig):\n\n return np.mean(abs(np.diff(sig)))", "def load_average(self):\n return _favg(self.load_samples)", "def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average", "def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()", "def moving_avg(df, key, lag):\n\n def _sma(key, window):\n values = list(map(lambda w: w[key], window))\n avg = round(reduce(lambda a, b: a + b, values) / len(values), 2)\n return avg\n\n return [_sma(key, df[i : i + lag + 1]) for i in range(len(df) - lag)]", "def soft_acc(self, y_true, y_pred):\n try:\n score= backend.mean(backend.abs(y_true - y_pred) <= self.prediction_tolerance)\n except Exception:\n pass\n return score", "def mean(vals):", "def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()", "def test_avg_l(self):\n u_spec = leabra.UnitSpec(g_bar_e=0.3, g_bar_l=0.3, g_bar_i=1.0)\n u = leabra.Unit(spec=u_spec)\n\n for _ in range(20):\n u.add_excitatory(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n self.assertEqual(u.avg_l, 0.40)\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(0.52, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 0.52 is the value of emergent\n\n for _ in range(100):\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(1.64, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 1.64 is the value of emergent", "def mean_average_position():\n pass", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def gen_sensor_reward(self,MAX_UNCERTAINTY,window_size,window_lag):\n\n for i in range(0, len(self.tracker_object.tracks)):\n unormalized_uncertainty = np.sum(self.tracker_object.tracks[i].p_k_k.diagonal())\n self.uncertainty[i].append((1.0 / MAX_UNCERTAINTY) * unormalized_uncertainty)\n\n\n this_uncertainty = []\n [this_uncertainty.append(self.uncertainty[x][-1]) for x in range(0, len(self.tracker_object.tracks))]\n\n self.avg_uncertainty.append(np.mean(this_uncertainty))\n\n if len(self.avg_uncertainty) < window_size + window_lag:\n self.reward.append(0)\n else:\n current_avg = np.mean(self.avg_uncertainty[-window_size:])\n prev_avg = np.mean(self.avg_uncertainty[-(window_size + window_lag):-window_lag])\n if current_avg < prev_avg or self.avg_uncertainty[-1] < .1:\n # if current_avg < prev_avg:\n self.reward.append(1)\n else:\n self.reward.append(0)", "def absorbance( self, lmin=0, lmax=0 ):\n A = self.prop[\"SPEC\"][:,1]\n if lmax>0:\n m = np.vstack( (self.wavelength(), A) ).T # matrix w lambda and absorbance\n m = m[ m[:,0] >= lmin ] # slice by wavelength...\n m = m[ m[:,0] <= lmax ]\n return np.average( m[:,1] ) # scalar\n return A # array", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def forward_avg(array_in):\n return (array_in[:-1] + array_in[1:]) * 0.5", "def get_info_gain(true_rows, false_rows, current_impurity):\n avg_impurity = (len(true_rows)/(len(true_rows)+len(false_rows))) * get_gini(true_rows) + \\\n (len(false_rows)/(len(true_rows)+len(false_rows))) * get_gini(false_rows)\n return current_impurity - avg_impurity", "def demo_one_filter():\n data = [1.3, 2.7, 0.8, 4.1, 4.3, -0.1]\n avg = np.mean(data)\n print \"average value is:\", avg\n\n # create iterator that filters to keep only above average data\n above_avg_iter = filter(lambda x: x > avg, data) # returns iterator for data above the avg\n\n print \"values strictly above average are:\", list(above_avg_iter)", "def conditional_mean(self, F):\n raise NotImplementedError", "def angle_error(self, y_true, y_pred):\n diff = self.angle_difference(K.argmax(y_true), K.argmax(y_pred))\n return K.mean(K.cast(K.abs(diff), K.floatx()))", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def mean(self):\n return self.x_guessed()", "def average_age():\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n ages = []\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n age = row[\"Age_ses1\"]\n if not math.isnan(age):\n ages.append(age)\n\n print(\"------ Age ------\")\n print_stats(ages)", "def get_average_age(self):\n return np.mean([agent.age for agent in self.agents])", "def force_averaging(self):\n return self._force_averaging", "def age_avg_adopt():\n \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n\n train = pd.read_csv('./data/train.csv')\n \n # Convert age from months to years\n train.loc[train['Age'] > -1, 'Age'] = (train['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train.loc[train['Type'] == 1, ['State','Type', 'Age', 'AdoptionSpeed']]\n cat_df = train.loc[train['Type'] == 2, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_avg = []\n \n cat_age_labels = [] \n cat_avg = []\n \n\n # Find dog average adoption speed by age\n for i in range(dog_min_age, dog_max_age + 1) :\n \n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_avg.append(dog_df.loc[dog_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n dog_age_labels.append(i)\n\n # Plot bar graphs\n yticks_index = list(range(5))\n \n plt.figure(num = None, figsize=(6.5,4.5),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n \n index = index[0:13]\n dog_age_labels = dog_age_labels[0:13]\n dog_avg = dog_avg[0:13]\n \n plt.bar(index, dog_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Dog Average Adoption Speed for Each Age')\n plt.savefig('bardogAvg.png', bbox_inches='tight')\n\n # Find cat average adoption speed by age\n for i in range(cat_min_age, cat_max_age + 1) :\n \n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_avg.append(cat_df.loc[cat_df['Age'] == i, ['AdoptionSpeed']].mean()[0])\n cat_age_labels.append(i)\n\n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n \n \n index = index[0:11]\n cat_age_labels = cat_age_labels[0:11]\n cat_avg = cat_avg[0:11]\n \n plt.bar(index, cat_avg)\n plt.xlabel('Age in Years')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Adoption Speed')\n plt.yticks(yticks_index)\n plt.title('Cat Average Adoption Speed for Each Age')\n plt.savefig('barcatAvg.png', bbox_inches='tight')", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ", "def belief_mean(self) -> types.StatesTorch:\n return self._belief_mean", "def compute_lagged_ma(df, lag=48, window=48, min_periods=48):\n lagged_ma = (\n df[[\"store\", \"sales\"]]\n .groupby(\"store\")[\"sales\"]\n .rolling(window, min_periods=min_periods)\n .sum()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n lagged_open = (\n df[[\"store\", \"open\"]]\n .groupby(\"store\")[\"open\"]\n .rolling(window, min_periods=min_periods)\n .sum()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n return lagged_ma / lagged_open", "def lag(self):\n self._assert_counted_at_lag()\n return self._lag", "def get_average_survival(self):\n return np.mean(self.survival_rates)", "def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val", "def bollinger_lband_indicator(close, n=20, ndev=2, fillna=False):\n df = pd.DataFrame([close]).transpose()\n mavg = close.rolling(n).mean()\n mstd = close.rolling(n).std()\n lband = mavg - ndev * mstd\n df['lband'] = 0.0\n df.loc[close < lband, 'lband'] = 1.0\n lband = df['lband']\n if fillna:\n lband = lband.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(lband, name='bbilband')", "def _get_mean(self):\n mu = self._get_conditional_negative_energy()\n return sigmoid(mu)", "def gavg(idata):\n\t\n\twgt1=np.cos(np.deg2rad(idata.lat))*(idata*0+1)\n\tga=(wgt1*idata).sum(dim=['lat','lon'])/wgt1.sum(dim=['lat','lon'])\n\n\treturn ga", "def angle_error_regression(y_true, y_pred):\n return K.mean(angle_difference(y_true * 360, y_pred * 360))", "def isendofheated(self,lag):\n kmax = self.n\n v1 = self.v1\n v2 = self.v2\n for k in range(kmax-1):\n if lag[k+1]>=(v2+v1)/(v2-v1) * lag[k]:\n return False\n return True", "def mean(self):\n return self.vmean", "def AvgFilter(self, number):\n\n alpha = (self.k-1)/self.k\n avg = alpha*self.prevAvg + (1-alpha)*number\n\n self.prevAvg = avg\n self.k = self.k + 1\n\n return avg", "def dif_avg(u_beam):\n u = np.sort(u_beam)[::-1]\n# print(u)\n ind = u.shape[0]//100*5\n top5 = np.mean(u[:ind])\n# bottom5 = np.mean(u[-ind:])\n mean_wo_top5 = np.mean(u[ind:])\n return top5/mean_wo_top5", "def advantages(self, gamma, lam):\n values = self.value_predictions()\n result = np.zeros([self.num_steps, self.batch_size], dtype=np.float32)\n current = np.zeros([self.batch_size], dtype=np.float32)\n for t in range(self.num_steps - 1, -1, -1):\n delta = self.rews[t] - values[t]\n delta += (1 - self.dones[t + 1]) * gamma * values[t + 1]\n current *= gamma * lam\n current += delta\n result[t] = current\n current *= (1 - self.dones[t])\n return result", "def rsi(df, lag):\n\n def avg_gain():\n gains = [\n df[i][\"c\"] - df[i - 1][\"c\"] if df[i][\"c\"] >= df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_gain = [sum(gains[:lag]) / float(lag)]\n [avg_gain.append(((avg_gain[-1] * 13) + gain) / 14.0) for gain in gains[lag:]]\n return avg_gain\n\n def avg_loss():\n losses = [\n abs(df[i][\"c\"] - df[i - 1][\"c\"]) if df[i][\"c\"] < df[i - 1][\"c\"] else 0.0\n for i in range(1, len(df))\n ]\n avg_loss = [sum(losses[:lag]) / float(lag)]\n [avg_loss.append(((avg_loss[-1] * 13) + loss) / 14.0) for loss in losses[lag:]]\n return avg_loss\n\n gains = avg_gain()\n losses = avg_loss()\n\n raw_rsi = [\n round(100 - (100 / (1 + (gains[i] / losses[i]))), 2) for i in range(len(gains))\n ]\n df = df[-1 * len(raw_rsi) :]\n\n return [raw_rsi[i] for i in range(len(df))]", "def average(data):\n return np.average(data)", "def av(self, data):\n ts_ = self.ts(data)\n if 'year' not in ts_.coords:\n return ts_\n return ts_.mean('year')", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def trend_indicators(df):\n p = \"trend_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ADX\n i = ADXIndicator(high, low, close, window=40)\n df[p + \"adx_40_neg\"] = i.adx_neg()\n # ARN\n i = AroonIndicator(close, window=50)\n df[p + \"arn_50\"] = i.aroon_indicator()\n # CCI\n i = CCIIndicator(high, low, close, window=70)\n df[p + \"cci_70\"] = i.cci()\n # DPO\n i = DPOIndicator(close, window=100)\n df[p +\n \"dpo_100_log\"] = list(map(lambda x: uf.log_abs(x, zeros=True), i.dpo()))\n # KST\n i = KSTIndicator(close)\n df[p + \"kst_sig_log\"] = list(map(lambda x: uf.log_abs(x,\n zeros=True), i.kst_sig()))\n # MACD\n i = MACD(close, 12, 16, 34)\n df[p + \"macd_12_signal_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), i.macd_signal()))\n # SMA\n i = SMAIndicator(close, window=50) # 50\n\n sma_50_rate = uf.get_rate(i.sma_indicator())\n df[p + \"sma_50_rate_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_rate))\n\n sma_50_diff = list(map(lambda s, c: uf.none_subtraction(\n s, c), i.sma_indicator(), close))\n df[p + \"sma_50_diff_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_diff))\n\n i = SMAIndicator(close, window=200)\n\n sma_200_diff = list(\n map(lambda s, c: uf.none_subtraction(s, c), i.sma_indicator(), close))\n sma_200_diff_o_close = list(\n map(lambda s, c: s / c, sma_200_diff, close))\n df[p + \"sma_200_diff_o_close_log\"] = list(\n map(lambda x: uf.log_abs(x * 100, zeros=True), sma_200_diff_o_close))\n # STC\n i = STCIndicator(close, 100, 200, 50)\n df[p + \"stc_50_2\"] = i.stc()\n # TRIX\n i = TRIXIndicator(close, window=20)\n df[p + \"trix_20_log\"] = list(map(lambda x: uf.log_abs(x * 1000), i.trix()))\n # VI\n i = VortexIndicator(high, low, close, window=50)\n df[p + \"vi_50_amp\"] = list(map(lambda x: uf.log_abs(x *\n 1000, zeros=True), i.vortex_indicator_diff()))\n\n return df", "def get_mean(self, X):\n raise NotImplementedError", "def _compute_mean_std(self, history, window=28):\n history = np.array(history[-window - 1: -1])\n decay_weights = [self.decay ** a for a in range(len(history), 0, -1)]\n weighted = history * decay_weights\n mean = weighted.mean()\n std = weighted.std()\n return mean, std", "def ATR(stockData , ticker, n):\n start = dt.datetime(2020, 1, 1)\n data = pdr.get_data_yahoo(ticker, start)\n\n high_low = data['High'] - data['Low']\n high_close = np.abs(data['High'] - data['Close'].shift())\n low_close = np.abs(data['Low'] - data['Close'].shift())\n\n ranges = pd.concat([high_low, high_close, low_close], axis=1)\n true_range = np.max(ranges, axis=1)\n\n atr = true_range.rolling(n).sum() / n\n\n\n # Returns the Average True Range dataframe / with the dates.\n return atr", "def low_abundance(self, cutoff=10):\n data = json.loads(self.profile)\n\n checks = [mean(v) > cutoff for _, v in data[\"data\"].items()]\n\n return not any(checks)", "def gamma(self, predictor):\n pred = predictor(self.X)\n pred_mean = pred.mean()\n\n self.tags[_PREDICTION] = pred\n expect_event = self.tags.groupby(_EVENT).mean()\n expect_group_event = self.tags.groupby(\n [_EVENT, _GROUP_ID]).mean()\n\n neg = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 0.0)].groupby([_EVENT]).mean()\n pos = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 1.0)].groupby([_EVENT]).mean()\n # print(pos)\n expect_group_event.loc[('label=1.0', 1), 'pred'] = (pred_mean + self.delta[1] * (pos.loc['label=1.0', 'pred'] - pos.loc['label=0.0', 'pred'])) / 2\n expect_group_event.loc[('label=0.0', 1), 'pred'] = (pred_mean + self.delta[1] * (pos.loc['label=0.0', 'pred'] - pos.loc['label=1.0', 'pred'])) / 2\n\n expect_group_event.loc[('label=1.0', 0), 'pred'] = (pred_mean + self.delta[0] * (neg.loc['label=1.0', 'pred'] - neg.loc['label=0.0', 'pred'])) / 2\n expect_group_event.loc[('label=0.0', 0), 'pred'] = (pred_mean + self.delta[0] * (neg.loc['label=0.0', 'pred'] - neg.loc['label=1.0', 'pred'])) / 2\n\n expect_event = expect_group_event.groupby(_EVENT).mean()\n expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n\n # expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n g_unsigned = expect_group_event[_DIFF]\n g_signed = pd.concat([g_unsigned, -g_unsigned],\n keys=[\"+\", \"-\"],\n names=[_SIGN, _EVENT, _GROUP_ID])\n self._gamma_descr = str(expect_group_event[[_PREDICTION, _DIFF]])\n return g_signed", "def get_indicator(self, window_size=5):\n self._indicator = pd.DataFrame(self._stock['Close'].rolling(window_size).mean())\n return self._indicator", "def _individual_old_age_insurance(self, total_salary):\n return self.individual_old_age_insurance_rate * total_salary", "def mean_interval(self, name, alpha=_alpha, **kwargs):\n data = self.get(name,**kwargs)\n return ugali.utils.stats.mean_interval(data,alpha)", "def gmean_diff(self) -> float:\n sim_log = np.log1p(self.predicted)\n obs_log = np.log1p(self.true)\n return float(np.exp(gmean(sim_log) - gmean(obs_log)))", "def get_rolling_mean(values, window = 20):\n\treturn values.rolling(window, center=False).mean()", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def _get_mean(self):\n return (0.485, 0.456, 0.406)", "def __call__(self, y_pred, y_true):\n dy = y_pred - y_true\n n = self.quantiles.size()[0]\n qs = self.quantiles.reshape((n,) + (1,) * max(len(dy.size()) - 2, 0))\n l = torch.where(dy >= 0.0, (1.0 - qs) * dy, (-qs) * dy)\n if self.mask:\n l = torch.where(y_true == self.mask, torch.zeros_like(l), l)\n return l.mean()", "def compute_lagged_ewma(\n df, lag=48, com=None, span=None, halflife=None, alpha=None, min_periods=50\n):\n lagged_ma = (\n df[[\"store\", \"sales\"]]\n .assign(sales=lambda x: x[\"sales\"].replace(0, np.nan))\n .groupby(\"store\")[\"sales\"]\n .ewm(\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n ignore_na=True,\n )\n .mean()\n .groupby(level=0)\n .shift(lag)\n .droplevel(0)\n )\n return lagged_ma", "def calc_meanad(sig):\n m = np.mean(sig)\n diff = [abs(x-m) for x in sig]\n\n return np.mean(diff)", "def air_range(self) -> Union[int, float]:\n return self.air_weapon and self.air_weapon.range", "def cummean(a):\r\n return np.true_divide(np.cumsum(a), range(1, len(a) + 1))", "def flat_accuracy(valid_tags, pred_tags):\n\n return (np.array(valid_tags) == np.array(pred_tags)).mean()", "def accuracy_with_gap(y_true, y_pred, gap):\n true_predictions = 0\n for i in range(len(y_pred)):\n if abs(y_pred[i] - y_true[i]) <= gap:\n true_predictions += 1\n return true_predictions/len(y_true)", "def find_starts(config, data):\n\n trigger = butter_bandpass_filter(\n data, config.bandpass_lower, config.bandpass_upper,\n config.sampling_rate, 6)\n trigger = np.absolute(trigger)\n trigger = butter_lowpass_filter(\n trigger, config.lowpass_freq, config.sampling_rate, 6)\n\n # transient = 0.0005\n # start_idx = int(transient * config.sampling_rate)\n start_idx = 0\n average = np.average(trigger[start_idx:])\n maximum = np.max(trigger[start_idx:])\n minimum = np.min(trigger[start_idx:])\n middle = (np.max(trigger[start_idx:]) - min(trigger[start_idx:])) / 2\n if average < 1.1 * middle:\n print()\n print(\"Adjusting average to avg + (max - avg) / 2\")\n average = average + (maximum - average) / 2\n offset = -int(config.trigger_offset * config.sampling_rate)\n\n if config.trigger_rising:\n trigger_fn = lambda x, y: x > y\n else:\n trigger_fn = lambda x, y: x < y\n\n # The cryptic numpy code below is equivalent to looping over the signal and\n # recording the indices where the trigger crosses the average value in the\n # direction specified by config.trigger_rising. It is faster than a Python\n # loop by a factor of ~1000, so we trade readability for speed.\n trigger_signal = trigger_fn(trigger, average)[start_idx:]\n starts = np.where((trigger_signal[1:] != trigger_signal[:-1])\n * trigger_signal[1:])[0] + start_idx + offset + 1\n if trigger_signal[0]:\n starts = np.insert(starts, 0, start_idx + offset)\n\n # plt.plot(data)\n # plt.plot(trigger*100)\n # plt.axhline(y=average*100)\n # plt.show()\n\n return starts, trigger, average", "def ram_average(self):\n return _favg(self.ram_samples)", "def __LAI(NDVI, vegt_cover):\n\n LAI_1 = np.log(-(vegt_cover - 1)) / -0.45\n LAI_1[LAI_1 > 8] = 8.0\n LAI_2 = (9.519 * np.power(NDVI, 3) + 0.104 * np.power(NDVI, 2) +\n 1.236 * NDVI - 0.257)\n\n LAI = (LAI_1 + LAI_2) / 2.0 # Average LAI\n LAI[LAI < 0.001] = 0.001\n return LAI", "def showAverageGainWon(self) :\n averageGainWon = 0\n for level in self.level_history :\n averageGainWon += level.profit\n averageGainWon = averageGainWon/len(self.level_history)\n Scenario.messageGetAverageGainWon(averageGainWon)", "def get_moving_average(close, span):\n i = SMAIndicator(close, window=span)\n return i.sma_indicator()", "def avgX(self):\n return np.mean(self.getx())", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def get_avg_points(self):\n pass", "def mean_STD(self,counter):\n \n \n pass", "def get_avg_trend(y, filter='ewm', a=0.015, verbose =1, resample_interval='60s', fill_missing=False, title= '' , note= ''):\n\n # Two-way EWMA averaging\n ts_mean1, ts_std1 = smoothing(y, filter=filter, a=a)\n\n reversed_y = y.iloc[::-1]\n ts_mean2, ts_std2 = smoothing(reversed_y, filter=filter,a=a)\n ts_mean2 = ts_mean2.iloc[::-1]\n ts_std2 = ts_std2.iloc[::-1]\n\n ts_mean = (ts_mean1 + ts_mean2)/2\n ts_std = (ts_std1 + ts_std2)/2\n\n\n # Resample the daily trend by calculating the median of a resampling slice. mean can also be used.\n trend = ts_mean.resample(resample_interval).mean()\n ts_std = ts_std.resample(resample_interval).mean()\n\n # Fill up the missing trend samples if exist, by propagating the last valid\n if fill_missing: #rolling filter introduce Nan at the head or tail..\n trend.fillna(method='ffill', inplace=True, limit=2) #fill the end\n trend.fillna(method='bfill', inplace=True, limit=2) #fill the start\n\n\n\n if verbose>=1:\n t = title if title is not None else 'Average Trend'\n\n fig = plt.gcf()\n\n plt.plot(y[::1+y.shape[0]// 2000], alpha=.5)\n ax = trend.plot()\n ax.fill_between(trend.index, trend - 2 * ts_std, trend + 2 * ts_std,\n alpha=.25)\n ax.legend(['Orignal', 'Trend', 'std'])\n plt.text(ax.get_xlim()[0], ax.get_ylim()[0] + 50, note)\n plt.title(t)\n plt.show()\n\n import matplotlib.dates as mdates\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))\n plt.tight_layout()\n\n fig.savefig('./output/trends/'+t + '.pdf')\n plt.close(fig)\n\n return trend", "def _mean_diff(x, y):\n return np.mean(x) - np.mean(y)", "def calc_mean(sig):\n # m = mean(sig)\n return np.mean(sig)", "def mean(self):\n return self.cond_proba.mean", "def moving_average(data, temporal_window=100):\n window = np.ones(temporal_window) / temporal_window\n return np.convolve(data, window, 'valid')", "def gamma(self, predictor):\n pred = predictor(self.X)\n self.tags[_PREDICTION] = pred\n expect_event = self.tags.groupby(_EVENT).mean()\n expect_group_event = self.tags.groupby(\n [_EVENT, _GROUP_ID]).mean()\n\n num_grp = len(self.error_rate)\n tprs = [0 for _ in range(num_grp)]\n # print(expect_group_event)\n for i in range(num_grp):\n tprs[i] = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == i)].groupby([_EVENT]).mean()\n expect_group_event.loc[('label=1', i), 'pred'] = (1 - self.error_rate[i][0]) * tprs[i].loc['label=1', 'pred'] + self.error_rate[i][0] * tprs[i].loc['label=0', 'pred']\n expect_group_event.loc[('label=0', i), 'pred'] = (1 - self.error_rate[i][1]) * tprs[i].loc['label=0', 'pred'] + self.error_rate[i][1] * tprs[i].loc['label=1', 'pred']\n\n # neg = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 0.0)].groupby([_EVENT]).mean()\n # pos = expect_group_event.loc[(expect_group_event.index.get_level_values(_GROUP_ID) == 1.0)].groupby([_EVENT]).mean()\n\n # expect_group_event.loc[('label=1.0', 1), 'pred'] = (1 - self.error_rate[1][0]) * pos.loc['label=1.0', 'pred'] + self.error_rate[1][1] * pos.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 1), 'pred'] = (1 - self.error_rate[1][1]) * pos.loc['label=0.0', 'pred'] + self.error_rate[1][0] * pos.loc['label=1.0', 'pred']\n\n # expect_group_event.loc[('label=1.0', 0), 'pred'] = (1 - self.error_rate[0][0]) * neg.loc['label=1.0', 'pred'] + self.error_rate[0][1] * neg.loc['label=0.0', 'pred']\n # expect_group_event.loc[('label=0.0', 0), 'pred'] = (1 - self.error_rate[0][1]) * neg.loc['label=0.0', 'pred'] + self.error_rate[0][0] * neg.loc['label=1.0', 'pred']\n\n expect_event = expect_group_event.groupby(_EVENT).mean()\n expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n\n # expect_group_event[_DIFF] = expect_group_event[_PREDICTION] - expect_event[_PREDICTION]\n g_unsigned = expect_group_event[_DIFF]\n g_signed = pd.concat([g_unsigned, -g_unsigned],\n keys=[\"+\", \"-\"],\n names=[_SIGN, _EVENT, _GROUP_ID])\n self._gamma_descr = str(expect_group_event[[_PREDICTION, _DIFF]])\n return g_signed", "def volatility_indicators(df):\n p = \"volatility_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ATR\n atr = AverageTrueRange(high, low, close, 14)\n df[p + \"atr_14\"] = atr.average_true_range()\n df[p + \"atr_o_close\"] = list(map(lambda a,\n c: a / c, df[p + \"atr_14\"], close))\n # BB\n bb = BollingerBands(close, window=10, window_dev=2)\n df[p + \"bb_wband_10\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=100, window_dev=2)\n df[p + \"bb_pband_100\"] = bb.bollinger_pband()\n\n bb = BollingerBands(close, window=200, window_dev=2)\n df[p + \"bb_wband_200\"] = bb.bollinger_wband()\n\n bb = BollingerBands(close, window=20, window_dev=2)\n df[p + \"bb_hband_o_close\"] = list(map(lambda l,\n c: (l - c) / c, bb.bollinger_hband(), close))\n\n # DC\n dc = DonchianChannel(high, low, close, window=50)\n df[p + \"dc_pband_50\"] = dc.donchian_channel_pband()\n dc = DonchianChannel(high, low, close, window=10)\n df[p + \"dc_wband_10\"] = dc.donchian_channel_wband()\n # KC\n kc = KeltnerChannel(high, low, close, window=50)\n df[p + \"pband_50\"] = kc.keltner_channel_pband()\n kc = KeltnerChannel(high, low, close, window=20)\n df[p + \"wband_20\"] = kc.keltner_channel_wband()\n # UI\n ui = UlcerIndex(close, window=30)\n df[p + \"ui_30\"] = ui.ulcer_index()\n return df" ]
[ "0.6598846", "0.6380403", "0.61479884", "0.6053863", "0.59975517", "0.57305396", "0.56783205", "0.56357294", "0.5556505", "0.54646796", "0.54572976", "0.54534125", "0.5439065", "0.54316556", "0.5413024", "0.53748345", "0.5328688", "0.53281933", "0.532751", "0.5298861", "0.5276126", "0.52620775", "0.52498454", "0.5245364", "0.52393454", "0.52372473", "0.52346116", "0.52283305", "0.52222675", "0.52222675", "0.5219627", "0.52181226", "0.5217166", "0.5214979", "0.5202445", "0.51938576", "0.5182558", "0.51730984", "0.5170766", "0.51592714", "0.51444066", "0.5137173", "0.5136289", "0.5130368", "0.5113341", "0.5108027", "0.5103707", "0.51036614", "0.5081907", "0.50756335", "0.50734913", "0.50522405", "0.5039208", "0.5036116", "0.5032336", "0.50241345", "0.5022719", "0.5021959", "0.50216365", "0.50212914", "0.5020454", "0.5012194", "0.50078523", "0.5005015", "0.5002091", "0.500163", "0.50000215", "0.49984404", "0.49891222", "0.49869305", "0.4985341", "0.49847245", "0.49837297", "0.4976249", "0.4969697", "0.49690664", "0.4965743", "0.4964145", "0.49619767", "0.4960118", "0.4959184", "0.49502698", "0.4945782", "0.49397805", "0.4938272", "0.49331594", "0.49314588", "0.49243787", "0.4911544", "0.49097884", "0.49033308", "0.49032405", "0.49028748", "0.48965", "0.4896099", "0.48944712", "0.48893356", "0.4888413", "0.4885933", "0.48854032" ]
0.5725855
6
100 LOG10( SUM(ATR(1), n) / ( MaxHi(n) MinLo(n) ) ) / LOG10(n) n = User defined period length. LOG10(n) = base10 LOG of n ATR(1) = Average True Range (Period of 1) SUM(ATR(1), n) = Sum of the Average True Range over past n bars MaxHi(n) = The highest high over past n bars
def ChoppinessIndex(self, timeperiod = 14): return ta.C
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ATR(df, n):\n HML = abs(df['High'] - df['Low'])\n HMPC= abs(df['High'] - df['Close'].shift(1))\n LMPC= abs(df['Low'] - df['Close'].shift(1))\n TR = pd.concat([HML, HMPC, LMPC], axis=1).max(axis=1, skipna=False)\n return TR.rolling(n).mean()", "def ATR(stockData , ticker, n):\n start = dt.datetime(2020, 1, 1)\n data = pdr.get_data_yahoo(ticker, start)\n\n high_low = data['High'] - data['Low']\n high_close = np.abs(data['High'] - data['Close'].shift())\n low_close = np.abs(data['Low'] - data['Close'].shift())\n\n ranges = pd.concat([high_low, high_close, low_close], axis=1)\n true_range = np.max(ranges, axis=1)\n\n atr = true_range.rolling(n).sum() / n\n\n\n # Returns the Average True Range dataframe / with the dates.\n return atr", "def trend_extremum(data):\n if data[0] < data[-1]:\n argmin = data[0]\n argmax = data[-1]\n\n if argmax + argmin:\n return (argmax - argmin) / (argmax + argmin)\n\n elif data[0] > data[-1]:\n argmin = data[-1]\n argmax = data[0]\n\n if argmax + argmin:\n return (argmin - argmax) / (argmax + argmin)\n\n return 0.0", "def trend(data):\n argmin = np.argmin(data)\n argmax = np.argmax(data)\n\n divider = (data[argmax] + data[argmin])\n\n if divider == 0.0:\n return 0.0\n\n if argmin < argmax:\n return (data[argmax] - data[argmin]) / (data[argmax] + data[argmin])\n elif argmin > argmax:\n return (data[argmin] - data[argmax]) / (data[argmin] + data[argmax])\n\n return 0.0", "def AverageTrueRange(self, timeperiod = 14):\r\n \r\n return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def atr(data, n, high_col='High', low_col='Low', \n close_col='Close', vol_col='Volume', fillna=False):\n close = data[close_col]\n high = data[high_col]\n low = data[low_col]\n \n cs = close.shift(1)\n tr = high.combine(cs, max) - low.combine(cs, min)\n tr = ema(tr, n)\n if fillna:\n tr = tr.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(tr, name='atr')", "def set_atr_periods(self, periods: int = 100):\n h, l, c_prev = self.data.High, self.data.Low, pd.Series(self.data.Close).shift(1)\n tr = np.max([h - l, (c_prev - h).abs(), (c_prev - l).abs()], axis=0)\n atr = pd.Series(tr).rolling(periods).mean().bfill().values\n self.__atr = atr", "def TAS(px, high, low, w=10, n=3):\r\n\r\n minn = low.rolling(window=w).min() # min de minimos\r\n maxx = high.rolling(window=w).max() # max de maximos\r\n\r\n k = 100 * (px - minn) / (maxx - minn)\r\n d = SMA(k, n)\r\n return k, d", "def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema", "def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)", "def ATR(df, period, ohlc=['open', 'high', 'low', 'close']): \n atr = 'ATR_' + str(period)\n\n # Compute true range only if it is not computed and stored earlier in the df \n if not 'TR' in df.columns: \n df['h-l'] = df[ohlc[1]] - df[ohlc[2]] \n df['h-yc'] = abs(df[ohlc[1]] - df[ohlc[3]].shift()) \n df['l-yc'] = abs(df[ohlc[2]] - df[ohlc[3]].shift()) \n df['TR'] = df[['h-l', 'h-yc', 'l-yc']].max(axis=1) \n df.drop(['h-l', 'h-yc', 'l-yc'], inplace=True, axis=1)\n\n # Compute EMA of true range using ATR formula after ignoring first row \n EMA(df, 'TR', atr, period, alpha=True) \n return df", "def SuperTrend(df, period, multiplier, ohlc=['open', 'high', 'low', 'close']):\n\n ATR(df, period, ohlc=ohlc) \n atr = 'ATR_' + str(period) \n st = 'ST_' + str(period) + '_' + str(multiplier) \n stx = 'STX_' + str(period) + '_' + str(multiplier) \n \"\"\" \n SuperTrend Algorithm : \n BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR \n BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR \n FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND)) \n THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND) \n FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND)) \n THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND) \n SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN \n Current FINAL UPPERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN \n Current FINAL UPPERBAND \n \"\"\" \n # Compute basic upper and lower bands \n df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr] \n df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]\n\n # Compute final upper and lower bands \n df['final_ub'] = 0.00 \n df['final_lb'] = 0.00 \n for i in range(period, len(df)): \n df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df['Close'].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1] \n df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df['Close'].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1] \n # Set the Supertrend value \n df[st] = 0.00 \n for i in range(period, len(df)): \n df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] <= df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] > df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] >= df['final_lb'].iat[i] else 0\n df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] < df['final_lb'].iat[i] else 0.00 \n # Mark the trend direction up/down \n df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)\n\n # Remove basic and final bands from the columns \n df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1) \n df.fillna(0, inplace=True)\n\n return df", "def avg_pressure(start, end):\n return round((start + end) / 2, 2)", "def average_energy(power,events,borders,eventName):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n average = 0\n i = 0\n count = 0\n minValue = 10000\n maxValue = 0\n minAverage = 10000\n maxAverage = 0 \n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date+borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n values = power[(power['time']>=start)&(power['time']<=end)]['value']\n sum_values = sum(values)\n tot_values = len(values)\n if tot_values>0:\n if values.max() > maxValue:\n maxValue = values.max()\n if values.min() < minValue:\n minValue = values.min()\n if sum_values/tot_values > maxAverage:\n maxAverage = sum_values/tot_values\n if sum_values/tot_values < minAverage:\n minAverage = sum_values/tot_values\n average = average + sum_values/tot_values\n count += 1\n i += 1\n if count>0:\n average = average / count\n print(\"number of\", eventName ,\"in groudtruth and power=\",count)\n print(\"minValue=\",minValue,\"maxValue=\",maxValue)\n print(\"minAverage=\",minAverage,\"maxAverage=\",maxAverage)\n print(\"Final Av=\",average)\n return average\n else:\n print(\"Not values found in the range\")", "def mean_period(data):\n peaks = len(find_peaks(data, height=0)[0])\n return len(data) / peaks if peaks > 0 else len(data)", "def calc_stat_values(self):", "def mean(vals):", "def get_mean(data, n=-1):\n \n return round((sum(data)/n),1)", "def avg(values):\n return sum(values) / float(len(values))", "def aiclike(timeSeries, params, distribution):\n if distribution == 'pareto':\n nloglval = -(timeSeries.shape[0] * np.log(params['mu']) + timeSeries.shape[0] * params['mu'] * np.log(params['xmin']) - (params['xmin']+1) * np.sum(np.log(timeSeries)))\n return nloglval\n \n elif distribution == 'lognormal':\n nloglval = np.sum(np.log(timeSeries * params['sigma'] * np.sqrt(2*np.pi)) + (np.log(timeSeries) - params['mu'])**2 / (2 * params['sigma']**2))\n return nloglval\n \n elif distribution == 'normal':\n nloglval = np.sum(np.log( params['sigma'] * np.sqrt(2*np.pi) ) + (timeSeries - params['mu'])**2 / (2 * params['sigma']**2))\n return nloglval\n \n elif distribution == 'exponential':\n nloglval = np.sum(params['lambda'] * timeSeries - np.log(params['lambda']))\n return nloglval\n \n elif distribution == 'boundedpl':\n nloglval = -len(timeSeries) * np.log( (params['mu'] - 1) / (np.min(timeSeries)**(1 - params['mu']) - np.max(timeSeries)**(1 - params['mu']))) + params['mu'] * np.sum(np.log(timeSeries))\n return nloglval", "def atr(df, lag, normalize=False):\n\n def _true_range(window):\n divisor = (1.0 * float(not normalize)) + ((float(normalize) * window[-1][\"c\"]))\n\n tr1 = window[-1][\"h\"] - window[-1][\"l\"]\n tr2 = window[-1][\"h\"] - window[-2][\"c\"]\n tr3 = window[-1][\"l\"] - window[-2][\"c\"]\n return max(tr1, tr2, tr3) / divisor\n\n def _sma(window):\n avg = round(reduce(lambda a, b: a + b, window) / len(window), 2)\n return avg\n\n tr = [_true_range(df[i : i + 2]) for i in range(len(df) - 1)]\n return [_sma(tr[i : i + lag + 1]) for i in range(len(tr) - lag)]", "def baseline(data):\n weights = weighting(data)\n return np.inner(weights,data['clicks'])/weights.sum()", "def H(cur_bid, cur_bidder_value, active_values):\r\n return (np.log(cur_bidder_value - cur_bid) -\r\n sum(np.log(np.array(active_values) - cur_bid)) / (len(active_values) - 1.0))", "def rangeLog(min, max, n):\n\n logmin, logmax = log(min), log(max)\n return np.exp( np.arange( logmin, logmax, (logmax-logmin)/float(n) ) )", "def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)", "def get_low_high(a):\n\n return stats.pareto.cdf((10000,1000000),a)", "def ATR(df, period, ohlc=['Open', 'High', 'Low', 'Close']):\r\n \r\n atr = 'ATR_' + str(period)\r\n\r\n # Compute true range only if it is not computed and stored earlier in the df\r\n if not 'TR' in df.columns:\r\n df['h-l'] = df[ohlc[1]] - df[ohlc[2]]\r\n df['h-yc'] = abs(df[ohlc[1]] - df[ohlc[3]].shift())\r\n df['l-yc'] = abs(df[ohlc[2]] - df[ohlc[3]].shift())\r\n \r\n df['TR'] = df[['h-l', 'h-yc', 'l-yc']].max(axis=1)\r\n \r\n df.drop(['h-l', 'h-yc', 'l-yc'], inplace=True, axis=1)\r\n\r\n # Compute EMA of true range using ATR formula after ignoring first row\r\n EMA_ST(df, 'TR', atr, period, alpha=True)\r\n \r\n return df", "def average_true_range(self, period=14):\n tr = self._true_range_computation(period=period * 2)\n return pd.Series(tr.rolling(center=False, window=period,\n min_periods=period - 1).mean(),\n name='{} day ATR Ticker: {}'.format(period,\n self.ticker)).tail(\n period)", "def dif_avg(u_beam):\n u = np.sort(u_beam)[::-1]\n# print(u)\n ind = u.shape[0]//100*5\n top5 = np.mean(u[:ind])\n# bottom5 = np.mean(u[-ind:])\n mean_wo_top5 = np.mean(u[ind:])\n return top5/mean_wo_top5", "def calc(self,index, counter_values):\n try:\n angles = self.ik220_dev.read_attribute('Angles').value\n if index == 9:\n return sum(angles[:3])/3.0 # Excluded channel 4 of grating pitch encoder because of problems of Homing in the last header of the RON grating encoder.\n elif index == 10:\n return sum(angles[4:6])/2.0 # Modified from 4 channels to 2 channels because of problems of Homing in the 2 last headers of the RON mirror3 encoder.\n else:\n return angles[index - 1]\n except:\n return 1e-100", "def azAverage(self,rads,vals,nbins=50):\n\t\ttry:\n\t\t\tavVals = []\n\t\t\tbins = np.linspace(0,self.annulus,nbins)\n\t\t\tfor i, bin in enumerate(bins[:-1]):\n\t\t\t\tav = np.max(vals[(rads>bins[i]) & (rads<=bins[i+1])])\n\t\t\t\tavVals.append(av)\n\t\texcept:\n\t\t\t#if bin size is too small, and some bins have no particles, make bins bigger\n\t\t\tnbins=25\t\t\t\n\t\t\tavVals = []\n\t\t\tbins = np.linspace(0,self.annulus,nbins)\n\t\t\tfor i, bin in enumerate(bins[:-1]):\n\t\t\t\ttry:\n\t\t\t\t\tav = np.max(vals[(rads>bins[i]) & (rads<=bins[i+1])])\n\t\t\t\texcept:\n\t\t\t\t\tav = 0\n\t\t\t\tavVals.append(av)\n\t\treturn bins[:-1], avVals", "def mean(a_series):\n return float(sum(a_series) / max(len(a_series) * 1.0, 1.0))", "def trend_indicators(df):\n p = \"trend_\"\n high, low, close = convert_df_to_features(df, False)\n\n # ADX\n i = ADXIndicator(high, low, close, window=40)\n df[p + \"adx_40_neg\"] = i.adx_neg()\n # ARN\n i = AroonIndicator(close, window=50)\n df[p + \"arn_50\"] = i.aroon_indicator()\n # CCI\n i = CCIIndicator(high, low, close, window=70)\n df[p + \"cci_70\"] = i.cci()\n # DPO\n i = DPOIndicator(close, window=100)\n df[p +\n \"dpo_100_log\"] = list(map(lambda x: uf.log_abs(x, zeros=True), i.dpo()))\n # KST\n i = KSTIndicator(close)\n df[p + \"kst_sig_log\"] = list(map(lambda x: uf.log_abs(x,\n zeros=True), i.kst_sig()))\n # MACD\n i = MACD(close, 12, 16, 34)\n df[p + \"macd_12_signal_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), i.macd_signal()))\n # SMA\n i = SMAIndicator(close, window=50) # 50\n\n sma_50_rate = uf.get_rate(i.sma_indicator())\n df[p + \"sma_50_rate_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_rate))\n\n sma_50_diff = list(map(lambda s, c: uf.none_subtraction(\n s, c), i.sma_indicator(), close))\n df[p + \"sma_50_diff_log\"] = list(\n map(lambda x: uf.log_abs(x, zeros=True), sma_50_diff))\n\n i = SMAIndicator(close, window=200)\n\n sma_200_diff = list(\n map(lambda s, c: uf.none_subtraction(s, c), i.sma_indicator(), close))\n sma_200_diff_o_close = list(\n map(lambda s, c: s / c, sma_200_diff, close))\n df[p + \"sma_200_diff_o_close_log\"] = list(\n map(lambda x: uf.log_abs(x * 100, zeros=True), sma_200_diff_o_close))\n # STC\n i = STCIndicator(close, 100, 200, 50)\n df[p + \"stc_50_2\"] = i.stc()\n # TRIX\n i = TRIXIndicator(close, window=20)\n df[p + \"trix_20_log\"] = list(map(lambda x: uf.log_abs(x * 1000), i.trix()))\n # VI\n i = VortexIndicator(high, low, close, window=50)\n df[p + \"vi_50_amp\"] = list(map(lambda x: uf.log_abs(x *\n 1000, zeros=True), i.vortex_indicator_diff()))\n\n return df", "def relative_strength(prices, n):\n\n deltas = np.diff(prices)\n seed = deltas[:n+1] # takes the last 1 price differences? 12 market days?\n up = seed[seed>=0].sum()/n\n down = -seed[seed<0].sum()/n\n rs = up/down\n rsi = np.zeros_like(prices)\n rsi[:n] = 100. - 100./(1.+rs)\n\n for i in range(n, len(prices)):\n delta = deltas[i-1] # cause the diff is 1 shorter\n\n if delta>0:\n upval = delta\n downval = 0.\n else:\n upval = 0.\n downval = -delta\n\n up = (up*(n-1) + upval)/n\n down = (down*(n-1) + downval)/n\n\n rs = up/down\n rsi[i] = 100. - 100./(1.+rs)\n\n return rsi", "def average(self, n=0):\n assert n >= 0\n for key in self.value_history:\n values = np.array(self.value_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n avg = np.sum(values * nums) / np.sum(nums)\n self.output[key] = avg", "def element_effective_area(freq_hz):\n freqs = np.array([0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9, 0.45e9,\n 0.55e9, 0.65e9])\n a_eff = np.array([1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956, 0.2046,\n 0.1384, 0.0792])\n f_cut = 2\n f1 = interp1d(np.log10(freqs[:f_cut+1]), np.log10(a_eff[:f_cut+1]),\n kind='slinear')\n f2 = interp1d(np.log10(freqs[f_cut:]), np.log10(a_eff[f_cut:]),\n kind='cubic')\n if freq_hz <= freqs[f_cut]:\n return 10**f1(np.log10(freq_hz))\n else:\n return 10**f2(np.log10(freq_hz))", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def calmar_ratio(returns, period=DAILY):\n\n temp_max_dd = max_drawdown(returns=returns)\n if temp_max_dd < 0:\n temp = annual_return(\n returns=returns,\n period=period\n ) / abs(max_drawdown(returns=returns))\n else:\n return np.nan\n\n if np.isinf(temp):\n return np.nan\n\n return temp", "def atr(self, n, array=False, length=None):\n if array:\n if length is not None:\n result = talib.ATR(self.high[-length:], self.low[-length:], self.close[-length:], n)\n else:\n result = talib.ATR(self.high, self.low, self.close, n)\n return result\n else:\n l = n+1\n result = talib.ATR(self.high[-l:], self.low[-l:], self.close[-l:], n)\n return result[-1]", "def _baseline_value(self):\n t = self['primary']\n return np.median(t.data[:int(10e-3/t.dt)])", "def percent_change(ts, ax=-1):\r\n ts = np.asarray(ts)\r\n\r\n return (ts / np.expand_dims(np.mean(ts, ax), ax) - 1) * 100", "def amplogwidth(arr, factor=2):\n\n log = np.ma.log10(np.sqrt(np.sum(arr**2, axis=-1))) # logarithms of amplitudes\n mean = log.mean() # means of logarithms of amplitudes\n std = log.std() # standard deviation of logarithms of amplitudes\n\n return mean - factor*std, mean + factor*std", "def mean_height(data):", "def mean_height(data):", "def calcHistogram(Tech_res):\n A_Hist = np.histogram(Tech_res.A_mean, Tech_res.hist_bins)\n top, bin_list, x_steps = list(A_Hist[0]), list(A_Hist[1]), []\n for n in range(np.shape(bin_list)[0]-1):\n x_steps.append((bin_list[n+1] + bin_list[n]) / 2)\n Tech_res.update_tech_meas(Hist_tops=top, Hist_steps=x_steps)\n return", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def logistic(x_0, max_value, midpoint, steepness):\n return max_value / (1 + math.exp(-(x_0 - midpoint) / steepness))", "def get_average_in_range(list, low, high):\n track = 0\n val = 0\n for num in list:\n if num >= low and num < high:\n val += num\n track += 1\n if track == 0:\n return 0\n return val / track", "def average(values):\n\treturn sum(values)/len(values)", "def arelfreq(a,numbins=10,defaultreallimits=None):\r\n h,l,b,e = histogram(a,numbins,defaultreallimits)\r\n h = N.array(h/float(a.shape[0]))\r\n return h,l,b,e", "def NMAE(ratings, range):\n\n Rmax = max(range)\n Rmin = min(range)\n\n return MAE(ratings, range) / (Rmax - Rmin)", "def arithmetic_ret(self) -> float:\n return float(np.log(self.tsdf).diff().mean() * self.periods_in_a_year)", "def cagr(B, A, n):\n if B < 0: B = 0\n return (math.pow(B / A, 1 / n) - 1) * 100", "def annual_series(events):\n annually_series = pd.Series(data=events[COL.MAX_OVERLAPPING_SUM].values,\n index=events[COL.START].values,\n name=COL.MAX_OVERLAPPING_SUM).resample('AS').max()\n annually_series = annually_series.sort_values(ascending=False).reset_index(drop=True)\n\n mean_sample_rainfall = annually_series.mean()\n sample_size = annually_series.count()\n\n x = -np.log(np.log((sample_size + 0.2) / (sample_size - (annually_series.index.values + 1.0) + 0.6)))\n x_mean = x.mean()\n\n w = ((x * annually_series).sum() - sample_size * mean_sample_rainfall * x_mean) / \\\n ((x ** 2).sum() - sample_size * x_mean ** 2)\n u = mean_sample_rainfall - w * x_mean\n\n return {'u': u, 'w': w}", "def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax", "def integer_hist(a, int_range=None, open_range=False, relative=False):\n data = np.round(a).flatten()\n if int_range:\n values = np.arange(int(int_range[0]), int(int_range[1])+1)\n else:\n values = np.arange(int(data.min()), int(data.max())+1)\n N = values.size\n if relative:\n count = np.empty(N, 'd')\n else:\n count = np.empty(N, 'l')\n for bin, c in enumerate(values):\n if open_range and bin == N - 1:\n count[bin] = (data >= c).sum()\n else:\n count[bin] = (data == c).sum()\n if relative:\n count /= count.sum()\n return values, count", "def get_HRF(duration,TR,peak):\n n = int(np.ceil(duration/TR))\n x = np.linspace(0,duration,n)\n h = np.zeros(n)\n h[x<peak] = x[x<peak]/peak\n h[x>=peak] = (x[x>=peak]-duration)/(peak-duration)\n h = h/np.sum(h)\n return h", "def std_alp_dm_line(m):\n return 10.**(-7.5 + 0.5 * np.log10(m))", "def calculateRelativeStrengthIndex(self, series, interval=14):\n\n if not isinstance(series, pd.Series):\n raise TypeError('Pandas Series required.')\n\n if not isinstance(interval, int):\n raise TypeError('Interval integer required.')\n\n if(len(series) < interval):\n raise IndexError('Pandas Series smaller than interval.')\n\n diff = series.diff(1).dropna()\n\n sum_gains = 0 * diff\n sum_gains[diff > 0] = diff[diff > 0]\n avg_gains = sum_gains.ewm(com=interval-1, min_periods=interval).mean()\n\n sum_losses = 0 * diff\n sum_losses[diff < 0] = diff[diff < 0]\n avg_losses = sum_losses.ewm(\n com=interval-1, min_periods=interval).mean()\n\n rs = abs(avg_gains / avg_losses)\n rsi = 100 - 100 / (1 + rs)\n\n return rsi", "def mean(values):\r\n return sum(values) / float(len(values))", "def arithmetic_ret_func(self, months_from_last: int = None, from_date: dt.date = None, to_date: dt.date = None,\n periods_in_a_year_fixed: int = None) -> float:\n earlier, later = self.calc_range(months_from_last, from_date, to_date)\n if periods_in_a_year_fixed:\n time_factor = periods_in_a_year_fixed\n else:\n fraction = (later - earlier).days / 365.25\n how_many = self.tsdf.loc[earlier:later].count(numeric_only=True)\n time_factor = how_many / fraction\n return float(np.log(self.tsdf.loc[earlier:later]).diff().mean() * time_factor)", "def calc_x_day_avg(data, x=3):\n pass", "def _calculate_period(vals):\n\tif len(vals) < 4:\n\t\treturn None\n\t# if self.firmware['major'] < 16:\n\t# \treturn ((vals[3] << 24) | (vals[2] << 16) | (vals[1] << 8) | vals[0]) / 12e6\n\t# else:\n\treturn self._calculate_float(vals)", "def mean(series):\n return fsum(series) / len(series)", "def gentrends(x, window=1/3.0, charts=True): #from https://github.com/dysonance/Trendy/blob/master/trendy.py\r\n\r\n import numpy as np\r\n import pandas as pd\r\n\r\n x = np.array(x)\r\n\r\n if window < 1:\r\n window = int(window * len(x))\r\n\r\n max1 = np.where(x == max(x))[0][0] # find the index of the abs max\r\n min1 = np.where(x == min(x))[0][0] # find the index of the abs min\r\n\r\n # First the max\r\n if max1 + window > len(x):\r\n max2 = max(x[0:(max1 - window)])\r\n else:\r\n max2 = max(x[(max1 + window):])\r\n\r\n # Now the min\r\n if min1 - window < 0:\r\n min2 = min(x[(min1 + window):])\r\n else:\r\n min2 = min(x[0:(min1 - window)])\r\n\r\n # Now find the indices of the secondary extrema\r\n max2 = np.where(x == max2)[0][0] # find the index of the 2nd max\r\n min2 = np.where(x == min2)[0][0] # find the index of the 2nd min\r\n\r\n # Create & extend the lines\r\n maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points\r\n minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points\r\n a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline\r\n a_min = x[min1] - (minslope * min1) # y-intercept for min trendline\r\n b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt\r\n b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point\r\n maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's\r\n minline = np.linspace(a_min, b_min, len(x)) # Y values between min's\r\n\r\n # OUTPUT\r\n trends = np.transpose(np.array((x, maxline, minline)))\r\n if charts is True:\r\n trends = pd.DataFrame(trends, index=np.arange(0, len(x)),\r\n columns=['Data', 'Max Line', 'Min Line'])\r\n import matplotlib as mpl\r\n mpl.use('Agg')\r\n import matplotlib.pyplot as plt\r\n fig = plt.figure()\r\n the_plot = fig.add_subplot(111)\r\n the_plot.plot(trends)\r\n outputStream = io.BytesIO()\r\n plt.savefig(outputStream)\r\n\r\n return outputStream", "def find_mean(values):\n return sum(values) / len(values)", "def confint(arr):\n res=[[],[],[]]\n #r=hpd(arr)\n r=(sap(arr,2.5),sap(arr,97.5))\n res[0]=r[0]\n res[1]=arr.mean(0)\n res[2]=r[1]\n return np.array(res)", "def average(values):\n return sum(values) / len(values)", "def average(values):\n return sum(values) / len(values)", "def mean_value( values ):\n return sum( values ) / len( values )", "def dist_sma(self, a):\r\n\r\n return self.logunif(a, self.arange.to(\"AU\").value)", "def empirical_distribution(data, x, n):\n i = 0\n while i < n and data[i] <= x:\n i+=1\n return float(i)/n", "def avarage_for_group(data: Dict[int, int]) -> float:\n values = data.values()\n summary = sum(values)\n return summary // len(data)", "def mean_wave_period(F, f, df):\n return np.sum(F * df) / np.sum(F * f * df)", "def normalize_series(series):\n return (series - series.mean()) / (series.max() - series.min())", "def BHS_standard(err):\n \n leq5 = 0\n leq10 = 0\n leq15 = 0\n \n for i in range(len(err)):\n \n if(abs(err[i])<=5):\n leq5 += 1\n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=10): \n leq10 += 1\n leq15 += 1\n \n elif(abs(err[i])<=15): \n leq15 += 1\n \n \n \n return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))", "def average(self, start, end):\n return self.integrate(start, end) / (end - start)", "def mean_variance_analysis(df):\n rets = np.log(df['close']/df['close'].shift(1))\n\n std = rets.std()* 252\n\n annualized_returns = rets.mean() * 252\n\n print(f'The annualized returns of the stock is {annualized_returns}, and the standard deviation of the stock is {std}')", "def item_um(n):\n if n <= 0.250:\n return 0\n elif n > 0.250 and n <= 0.500:\n return 1\n elif n > 0.500 and n <= 0.750:\n return 2\n elif n > 0.750 and n <= 1.000:\n return 3", "def EstimatePeriod(response):\n #is a bit shoddy, requires long time periods to produce consistent results\n \n \n roots = np.array([])\n for i in range(len(response[1])):\n try:\n if response[1][i] == 0:\n roots = np.append(roots, response[0][i])\n \n #tests for sign change\n elif response[1][i] * response[1][i+1] < 0:\n roots = np.append(roots, response[0][i])\n \n else:\n pass\n \n except IndexError:\n pass\n \n #from root(N) = t_0 + N*T/2, and sum of series in N. NB a divsion by N is\n #implicit in the mean\n roots = 2 * (roots - roots[0])\n period = 2 * np.mean(roots)/(len(roots) + 1)\n \n #could add error calculation in future\n return period", "def compute_log(tx, index_log, mean=[], std=[]):\n tx_new = np.log10(3+abs(tx[:,index_log]))\n return standardize(tx_new,mean,std)", "def mean(self) -> float:\n points = np.concatenate(\n [\n [self.t_min],\n -np.logspace(-5, -1, 5)[::-1],\n np.logspace(-5, -1, 5),\n [self.t_max],\n ]\n )\n\n mean = 0.0\n for left, right in zip(points[:-1], points[1:]):\n integral, _ = integrate.quad(self.cdf, left, right, limit=500)\n mean += right * self.cdf(right) - left * self.cdf(left) - integral\n\n return mean", "def tail_avg(timeseries):\r\n try:\r\n t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3\r\n return t\r\n except IndexError:\r\n return timeseries[-1][1]", "def period(self) -> int:", "def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower", "def rythm_hist(duration_ohe):\n assert duration_ohe.ndim == 2\n return duration_ohe.sum(axis=0) / duration_ohe.sum()", "def calc_mard(df):\n df = add_error_fields(df)\n\n abs_relative_difference_in_measurement_range = df.loc[\n df[\"withinMeasRange\"], \"absRelDiff\"\n ]\n\n return np.mean(abs_relative_difference_in_measurement_range)", "def get_frequency_array(self):\n\t\treturn np.logspace(np.log10(self.converted_range[0]), np.log10(\n\t\t\tself.converted_range[1]), num=129)[:self.maximum_frequency]", "def mean(values):\n total = sum(values)\n len_values = len(values)\n return total/len_values", "def __len__(self):\n return 9 # logsfr_ratios has 6 bins", "def _get_mean(self, sums, step):\n\n return sums/step", "def set_period_limits(self): # function[Tmin, Tmax] = setTlim(obj)\n\n x_min = self.tf.minimum_period\n x_max = self.tf.maximum_period\n\n Tmin = 10 ** (np.floor(np.log10(x_min) * 2) / 2)\n if (np.log10(x_min) - np.log10(Tmin)) < 0.15:\n Tmin = 10 ** (np.log10(Tmin) - 0.3)\n\n Tmax = 10 ** (np.ceil(np.log10(x_max) * 2) / 2)\n if (np.log10(Tmax) - np.log10(x_max)) < 0.15:\n Tmax = 10 ** (np.log10(Tmax) + 0.3)\n return Tmin, Tmax", "def calc_calories(gpx_track, wt = 175, activity='Run'):", "def get_avg_range(range_array):\n # Average the ranges\n range_count = 0\n range_accum = 0.0\n\n if range_array:\n # Accumulate the data\n for beam in range(len(range_array)):\n if range_array[beam] > 0.0 and not Ensemble.is_bad_velocity(range_array[beam]):\n range_count += 1\n range_accum += range_array[beam]\n\n if range_count > 0:\n return range_accum / range_count\n else:\n return 0.0", "def chaikan_accum_dist(open,high,low,close,volume):\n return ((close - open)/range) * volume\n return volume * ((close-low)) - (high-close))/(high-low)", "def mean(data):\n n = len(data)\n if n < 1:\n return 0\n return sum(data)/float(n)", "def findSpaceLength(Histogram, High):\n summ=0\n length=0\n number=0\n for kol in Histogram:\n if kol==0:\n length+=1\n elif kol>0 and length>0:\n if length<High:\n summ+=length\n length=0\n number+=1\n else:length=0\n if number<>0: return max(summ/number, (1/5)*High) ## in a case if there is no space in line\n else: return (1/5)*High", "def make_line_hist(x,minValue,maxValue,nbins=10,logscale=False):\n if logscale:\n freqs,bins = np.histogram(x,bins=np.logspace(np.log10(minValue),np.log10(maxValue),nbins),normed=True) \n else:\n freqs,bins = np.histogram(x,bins=np.linspace(minValue,maxValue,nbins),normed=True)\n \n \n plt.plot((bins[:-1]+bins[1:])/2,freqs)\n return freqs", "def average(self, n=0):\n assert n >= 0\n for key in self.val_history:\n values = np.array(self.val_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n if values.shape == nums.shape:\n avg = np.sum(values * nums) / np.sum(nums)\n else:\n avg = np.mean(values, axis=0).tolist()\n self.output[key] = avg\n self.ready = True", "def avgtr(self):\n return np.diff(self.trtimes).mean()" ]
[ "0.63604623", "0.629991", "0.6296152", "0.61950475", "0.605709", "0.5966973", "0.5952976", "0.59012014", "0.5847547", "0.5821448", "0.5785486", "0.575912", "0.5732089", "0.5712802", "0.5695987", "0.5627515", "0.5604428", "0.5594196", "0.55866754", "0.55238754", "0.5521845", "0.55140394", "0.55064607", "0.5499636", "0.54889363", "0.54695255", "0.5457199", "0.5453494", "0.54465485", "0.5440207", "0.5436622", "0.543558", "0.54314435", "0.54310393", "0.5430343", "0.5426857", "0.5422738", "0.5410754", "0.5409839", "0.5384239", "0.53822625", "0.53803635", "0.53762734", "0.5339141", "0.5339141", "0.533886", "0.5326426", "0.53207445", "0.53162307", "0.53122836", "0.53099126", "0.530938", "0.5308678", "0.5307263", "0.53025156", "0.5300026", "0.52923423", "0.5290937", "0.5289408", "0.528854", "0.52866644", "0.5281173", "0.5280362", "0.5280222", "0.5278238", "0.5277475", "0.52645075", "0.5262951", "0.52601045", "0.52601045", "0.5246939", "0.52359074", "0.5231856", "0.52299255", "0.52288604", "0.5228723", "0.52247596", "0.5222062", "0.52218366", "0.5214392", "0.5210173", "0.5209518", "0.5204436", "0.5202755", "0.51987994", "0.5189416", "0.51865506", "0.5186014", "0.5182548", "0.5181866", "0.51777905", "0.51758945", "0.5171375", "0.5165459", "0.51537436", "0.51524824", "0.5152034", "0.51517135", "0.51485294", "0.5144807", "0.5142898" ]
0.0
-1
Schaff Trend Cycle (STC) STC indicator is a forwardlooking leading indicator combining moving averages (MACD) with oscillator (stochastic).
def Schaff(self, shortPeriod=23, longPeriod=50): shortEMAClose = ta.EMA(self.data.close, timeperiod=shortPeriod) longEMAClose = ta.EMA(self.data.close, timeperiod=longPeriod) macdClose = shortEMAClose - longEMAClose shortEMALow = ta.EMA(self.data.low, timeperiod=shortPeriod) longEMALow = ta.EMA(self.data.low, timeperiod=longPeriod) macdLow = shortEMALow - longEMALow shortEMAHigh = ta.EMA(self.data.high, timeperiod=shortPeriod) longEMAHigh = ta.EMA(self.data.high, timeperiod=longPeriod) macdHigh = shortEMAHigh - longEMAHigh fastk, fastd = ta.STOCHF(macdHigh, macdLow, macdClose, fastk_period=10, fastd_period=10, fastd_matype=0) return 100 * ((macdClose - fastk) / (fastd - fastk))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_transition_spectrum_cd(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n rr = tr[\"rr\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = rr*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def CTMCtoStormpy(h):\n\tstate_labelling = _buildStateLabeling(h)\n\ttransition_matrix = deepcopy(h.matrix)\n\te = array([h.e(s) for s in range(h.nb_states)])\n\ttransition_matrix /= e[:,newaxis]\n\ttransition_matrix = st.build_sparse_matrix(transition_matrix)\n\tcomponents = st.SparseModelComponents(transition_matrix=transition_matrix,\n\t\t\t\t\t\t\t\t\t\t state_labeling=state_labelling,\n\t\t\t\t\t\t\t\t\t\t rate_transitions=True)\n\tcomponents.exit_rates = e\n\tctmc = st.storage.SparseCtmc(components)\n\treturn ctmc", "def new_tsno(self, spm, t0, ccon):\n\n cp = cp_ice(t0)\n tdif = ccon / (spm * cp)\n tsno = tdif + FREEZE\n\n return tsno", "def swcfsrf(fsns, fsnsc):\n var = fsns - fsnsc\n var.long_name = \"Surface shortwave cloud forcing\"\n return var", "def fldsc(ts, flnsc):\n var = 5.67e-8 * ts**4 - flnsc\n var.units = \"W/m2\"\n var.long_name = \"Clearsky Surf LW downwelling flux\"\n return var", "def circuitSat(C):", "def scout(self, historical_candles):\n # Get required candles, EMA and MACD require close price, and ATR requires open, high, low, close prices\n close_prices = get_candle_values_as_list(historical_candles, \"close_price\")\n open_prices = get_candle_values_as_list(historical_candles, \"open_price\")\n high_prices = get_candle_values_as_list(historical_candles, \"high_price\")\n low_prices = get_candle_values_as_list(historical_candles, \"low_price\")\n\n # Get indicators\n macd_hist = f_macd(close_prices, MACD_WINDOW_SLOW, MACD_WINDOW_FAST, MACD_WINDOW_SIGNAL)\n atr = f_atr(high_prices, low_prices, close_prices, ATR_WINDOW) * ATR_MULTIPLIER\n ohlc4 = f_ohlc4(open_prices, high_prices, low_prices, close_prices, ATR_WINDOW)\n\n # Chandelier exit\n long_stop = max(ohlc4) - atr[-1]\n short_stop = min(ohlc4) + atr[-1]\n\n # For the first iteration, set the previous long stop\n if not self.long_stop_prev:\n self.long_stop_prev = long_stop\n\n if close_prices[-2] > self.long_stop_prev:\n long_stop = max(long_stop, self.long_stop_prev)\n\n # For the first iteration, set the previous short stop\n if not self.short_stop_prev:\n self.short_stop_prev = short_stop\n\n if ohlc4[-2] < self.short_stop_prev:\n short_stop = min(short_stop, self.short_stop_prev)\n\n if macd_hist[-1] > 0:\n position = \"long\"\n elif macd_hist[-1] < 0:\n position = \"short\"\n else:\n position = \"neutral\"\n\n\n\n # Useful for dumping data into excel for testing\n # print(\"+++++++++++++\")\n # print(\"Open High Low Close\")\n # for i in range(len(close_prices)):\n # print(f\"{open_prices[i]} {high_prices[i]} {low_prices[i]} {close_prices[i]}\")\n # print(\"--------------\")\n #\n # print(\"MACD_HIST\")\n # for p in macd_hist:\n # print(p)\n #\n # print(\"ATR\")\n # for p in atr:\n # print(p)\n #\n # print(\"OHLC4\")\n # for p in ohlc4:\n # print(p)\n #\n # print(\"long_stop\")\n # print(long_stop)\n #\n # print(\"short_stop\")\n # print(short_stop)\n\n\n\n\n print(f\"macd_hist = {macd_hist[-1]}\")\n print(f\"long_stop_prev = {self.long_stop_prev}\")\n print(f\"short_stop_prev = {self.short_stop_prev}\")\n print(f\"long_stop = {long_stop}\")\n print(f\"short_stop = {short_stop}\")\n print(f\"recommended position = {position}\")\n\n # Set the stop values for next iteration\n self.long_stop_prev = long_stop\n self.short_stop_prev = short_stop\n\n return position", "def swcf(fsntoa, fsntoac):\n var = fsntoa - fsntoac\n var.long_name = \"TOA shortwave cloud forcing\"\n return var", "def plot_estimation(stc_est, stc_signal, initial_time=1.5, surface='inflated'):\n brain = stc_est.plot(hemi='both', subject='sample', initial_time=initial_time, surface=surface)\n hemi = ['lh', 'rh'][config.signal_hemi]\n vert = stc_signal.vertices[config.signal_hemi][0]\n brain.add_foci([vert], coords_as_verts=True, hemi=hemi)\n return brain", "def scout(self, historical_candles):\n # Get required candles, EMA and MACD require close price, and ATR requires open, high, low, close prices\n close_prices = get_candle_values_as_list(historical_candles, \"close_price\")\n open_prices = get_candle_values_as_list(historical_candles, \"open_price\")\n high_prices = get_candle_values_as_list(historical_candles, \"high_price\")\n low_prices = get_candle_values_as_list(historical_candles, \"low_price\")\n volumes = get_candle_values_as_list(historical_candles, \"volume\")\n\n # Calculate rsi\n rsi = f_rsi(close_prices, RSI_WINDOW)\n\n # Calculte MACD indicator\n macd_line, macd_signal, macd_histogram = f_macd(close_prices, SLOW_WINDOW, FAST_WINDOW, SIGNAL_WINDOW)\n\n # Set up conditions - Buy\n # Check MACD histogram has just changed from negative to positive\n buy_condition_1 = macd_histogram[-2] < 0 and macd_histogram[-1] > 0\n\n # Check rsi is below the cutoff\n buy_condition_2 = rsi[-1] <= RSI_CUTOFF\n\n # Check MACD line is below zero\n buy_condition_3 = macd_line[-1] < 0\n\n # Set up conditions - Sell\n # Check MACD histogram has just changed from positive to negative\n sell_condition_1 = macd_histogram[-2] > 0 and macd_histogram[-1] < 0\n\n # Check MACD line is above zero\n sell_condition_2 = macd_line[-1] > 0\n\n # Set the strategy recommended action (by default, do nothing)\n action = \"none\"\n\n # Check buy conditions and set buy flag if met\n if buy_condition_1 and buy_condition_2 and buy_condition_3:\n action = \"buy\"\n\n # Check sell conditions and set sell flag if met\n if sell_condition_1 and sell_condition_2:\n action = \"sell\"\n\n # Print strategy data\n print(\"MACD RSI strategy data\")\n print(\"Buy condition 1: macd_histogram[-2] < 0 and macd_histogram[-1] > 0\")\n print(f\" macd_histogram[-2] = {macd_histogram[-2]}\")\n print(f\" macd_histogram[-1] = {macd_histogram[-1]}\")\n print(f\" Condition 1 met?: {buy_condition_1}\")\n print(f\"Buy condition 2: rsi[-1] <= {RSI_CUTOFF}\")\n print(f\" rsi[-1] = {rsi[-1]}\")\n print(f\" Buy condition 2 met?: {buy_condition_2}\")\n print(\"Buy condition 3: macd_line[-1] < 0\")\n print(f\" macd_line[-1] = {macd_line[-1]}\")\n print(f\" Buy condition 3 met?: {buy_condition_3}\")\n print(\"Sell condition 1: macd_histogram[-2] > 0 and macd_histogram[-1] < 0\")\n print(f\" macd_histogram[-2] = {macd_histogram[-2]}\")\n print(f\" macd_histogram[-1] = {macd_histogram[-1]}\")\n print(f\" Sell condition 1 met?: {sell_condition_1}\")\n print(\"Sell condition 2: macd_line[-1] > 0\")\n print(f\" macd_line[-1] = {macd_line[-1]}\")\n print(f\" Sell condition 2 met?: {sell_condition_2}\")\n\n return action", "def AlphaCyc(ss, train):\n\n if ss.Win != 0:\n ss.Win.PollEvents() # this is essential for GUI responsiveness while running\n viewUpdt = ss.TrainUpdt.value\n if not train:\n viewUpdt = ss.TestUpdt.value\n\n # update prior weight changes at start, so any DWt values remain visible at end\n # you might want to do this less frequently to achieve a mini-batch update\n # in which case, move it out to the TrainTrial method where the relevant\n # counters are being dealt with.\n if train:\n ss.Net.WtFmDWt()\n\n ss.Net.AlphaCycInit(train)\n ss.Time.AlphaCycStart()\n for qtr in range(4):\n for cyc in range(ss.Time.CycPerQtr):\n ss.Net.Cycle(ss.Time)\n ss.Time.CycleInc()\n if ss.ViewOn:\n if viewUpdt == leabra.Cycle:\n if cyc != ss.Time.CycPerQtr-1: # will be updated by quarter\n ss.UpdateView(train)\n if viewUpdt == leabra.FastSpike:\n if (cyc+1)%10 == 0:\n ss.UpdateView(train)\n ss.Net.QuarterFinal(ss.Time)\n ss.Time.QuarterInc()\n if ss.ViewOn:\n if viewUpdt <= leabra.Quarter:\n ss.UpdateView(train)\n if viewUpdt == leabra.Phase:\n if qtr >= 2:\n ss.UpdateView(train)\n\n if train:\n ss.Net.DWt()\n if ss.ViewOn and viewUpdt == leabra.AlphaCycle:\n ss.UpdateView(train)", "def start_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT ON\")", "def _calculate_c_change(self, s, ts):\n cc = self.c_change\n cs = self.c_stock\n criterium = (cs[:,0]==s) & (cs[:,1]==ts)\n nowtarget = numpy.where(criterium)[0]\n criterium = (cs[:,0]==s) & (cs[:,1]==ts-1)\n prevtarget = numpy.where(criterium)[0]\n if len(nowtarget) > 0 and len(prevtarget)>0:\n stepinf = numpy.array([[s, ts, 0., 0., 0., 0., 0., 0., 0., 0.]],\n dtype=numpy.float32)\n self.c_change = numpy.append(cc, stepinf, axis=0)\n self.c_change[-1, 2:] = cs[nowtarget, 2:] - cs[prevtarget, 2:]", "def TST_C2ST(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n # if STAT.item() < threshold_lower:\r\n # h = 1\r\n return h, threshold, STAT", "def cir_st_disc(s_0, alpha, b, sigma, k: int= 20, delta: float=1, T= 1, seed= None):\n np.random.seed(seed)\n random.seed(seed)\n\n # Instance stock price ts and append it first value\n s_t = []\n s_t_neg= []\n s_t.append(s_0)\n s_t_neg.append(s_0)\n \n k= int(T/delta)\n \n for i in range(k):\n s_t_value= np.maximum(s_t[-1], 0) # because a price can't be negative, so we enforce our processus\n # to be positive or 0\n\n # We generata a normal number\n # cuz' (W_{t+1} - W_{t} follow a N(0, delta)\n epsilon= np.random.normal()\n\n mu= alpha*(b - s_t_value)*delta\n sigma_s_t= sigma * np.sqrt(s_t_value * delta)\n\n d_s_t= mu + sigma_s_t*epsilon\n d_s_t_neg= mu - sigma_s_t*epsilon\n\n s_t.append(s_t_value + d_s_t)\n s_t_neg.append(s_t_value + d_s_t_neg)\n\n return np.array(s_t), np.array(s_t_neg)", "def read_s_and_c(self):\n speed = self._previous_speed\n cadence = self._previous_cadence\n for conn, svc in zip(self.cyc_connections, self.cyc_services):\n if not conn.connected:\n speed = cadence = 0\n continue\n values = svc.measurement_values\n if not values:\n if self._cadence_failed >= 3 or self._speed_failed >= 3:\n if self._cadence_failed > 3:\n cadence = 0\n if self._speed_failed > 3:\n speed = 0\n continue\n if not values.last_wheel_event_time:\n continue\n speed = self._compute_speed(values, speed)\n if not values.last_crank_event_time:\n continue\n cadence = self._compute_cadence(values, cadence)\n\n if speed:\n speed = str(speed)[:8]\n if cadence:\n cadence = str(cadence)[:8]\n\n return speed, cadence", "def TF_startSweep(self):\n self.write(self.headStr('TF')+'TSTGL')", "def temp_separation_tac(T_c, T_s, fc, T_air, r_ah, r_s, r_x):\n T_ac = fc.expression(\n '((T_air / r_ah) + (T_s / r_s) + (T_c / r_x)) / '\n '((1 / r_ah) + (1 / r_s) + (1 / r_x))',\n {'r_ah': r_ah, 'r_s': r_s, 'r_x': r_x, 'T_c': T_c, 'T_s': T_s,\n 'T_air': T_air})\n return T_ac", "def ata_sct_temperature_history(self) -> SmartSsdAtaSctTemperatureHistory:\n return self._ata_sct_temperature_history", "def SCB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB):\n\n #Declaring useful parameters\n [S0,C0,B0]=init\n lambdaS=deltaSC-rhoS #Introducing global decay rates lambda.\n lambdaC=deltaCB-rhoC\n lambdaB=-rhoB\n\n #Checking for eigenvalues equalitites\n thresh=1e-5 #threshold difference for considering two eignevalues as equal\n bSC=(lambdaC-lambdaS)*(abs(lambdaC-lambdaS)>=thresh)\n bSB=(lambdaB-lambdaS)*(abs(lambdaB-lambdaS)>=thresh)\n bCB=(lambdaB-lambdaC)*(abs(lambdaB-lambdaC)>=thresh)\n\n #S has always the same expression\n S=S0*np.exp(-lambdaS*t)\n\n #there are two cases for C\n if bSC!=0:\n c2=deltaSC*S0/bSC; c1=C0-c2\n C=c1*np.exp(-lambdaC*t)+c2*np.exp(-lambdaS*t)\n\n #there are three subcases for B in that case\n if bCB==0:\n b2=deltaCB*c1; b3=deltaCB*c2/bSB; b1=B0-b3\n B=(b1+b2*t)*np.exp(-lambdaB*t)+b3*np.exp(-lambdaS*t)\n\n elif bSB==0:\n b2=deltaCB*c1/bCB; b3=deltaCB*c2; b1=B0-b2\n B=(b1+b3*t)*np.exp(-lambdaB*t)+b2*np.exp(-lambdaC*t)\n\n else:\n b2=deltaCB*c1/bCB; b3=deltaCB*c2/bSB; b1=B0-b2-b3\n B=b1*np.exp(-lambdaB*t)+b2*np.exp(-lambdaC*t)+b3*np.exp(-lambdaS*t)\n\n else:\n c2=deltaSC*S0\n c1=C0\n C=(c1+c2*t)*np.exp(-lambdaS*t)\n\n #there are two subcases for B in that case\n if bCB!=0:\n b3=deltaCB*c2/bSB; b2=(deltaCB*c1-b3)/bSB; b1=B0-b2\n B=b1*np.exp(-lambdaB*t)+(b2+b3*t)*np.exp(-lambdaC*t)\n\n else:\n b1=B0; b2=deltaCB*c1; b3=deltaCB*c2/2\n B=(b1+b2*t+b3*t**2)*np.exp(-lambdaB*t)\n\n return(np.vstack((S,C,B)))", "def stft(db,istart=0,istop=86400,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**9):\r\n \r\n #get length of input time series if there is two columns\r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm<fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=fx.shape[0]\r\n fm=1\r\n if fm>1:\r\n fx=fx.reshape(fn)\r\n else:\r\n fx=fx.reshape(fn)\r\n #make a hanning window to minimize aliazing and Gibbs effect of short time \r\n #windows\r\n h=normalizeL2(np.hanning(nh))\r\n #make a hanning window to smooth in frequency domain\r\n if ng!=1:\r\n if np.remainder(ng,2)!=1:\r\n ng=ng-1\r\n print 'ng forced to be odd as ng-1'\r\n else:\r\n pass\r\n g=normalizeL2(np.hanning(ng))\r\n else:\r\n pass\r\n #make time step list\r\n tlst=np.arange(start=0,stop=fn-nh+1,step=tstep)\r\n #make a frequency list for plotting exporting only positive frequencies\r\n df=float(df)\r\n flst=np.fft.fftfreq(nfbins,1/df)[0:nfbins/2] #get only positive frequencies\r\n #initialize the TFD array\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n fa=sps.hilbert(dctrend(fx))\r\n \r\n for place,ii in enumerate(tlst):\r\n fxwin=fa[ii:ii+nh]*h\r\n #get only positive frequencies\r\n FXwin=np.fft.fft(padzeros(fxwin,npad=nfbins))[:nfbins/2]\r\n #smooth in frequency plane\r\n if ng!=1:\r\n FXwin=np.convolve(padzeros(FXwin,npad=len(FXwin)+ng-1),g,'valid')\r\n else:\r\n pass\r\n #pull out only positive quadrant, flip array for plotting\r\n tfarray[:,place]=FXwin[::-1]\r\n \r\n return tfarray,tlst,flst", "def stochastics( dataframe, low, high, close, k, d ):\n\n if not isinstance(dataframe, pd.DataFrame):\n raise ValueError(\"dataframe must by a Data Frame\")\n\n df = dataframe.copy()\n\n # Set minimum low and maximum high of the k stoch\n low_min = df[low].rolling( window = k ).min()\n high_max = df[high].rolling( window = k ).max()\n\n # Fast Stochastic\n df['k_fast'] = 100 * (df[close] - low_min)/(high_max - low_min)\n df['d_fast'] = df['k_fast'].rolling(window = d).mean()\n\n # Slow Stochastic\n df['k_slow'] = df[\"d_fast\"]\n df['d_slow'] = df['k_slow'].rolling(window = d).mean()\n\n return df", "def S_c(incident, target, T, M_b):\n # Them constants though\n pi = np.pi\n r_e = const.value('classical electron radius') * 100.0 # Convert 1m = 100cm\n m_e = const.value('electron mass energy equivalent in MeV')\n N_A = const.value('Avogadro constant')\n\n # Currently the incident and target are specified in Z number. incident is assumed to be fully ionized\n z = incident\n Z = target\n Z_target = Z_eff(target)\n I_target = I(target)\n\n # M_b is specified in AMU\n M_b = M_b * const.value('atomic mass constant energy equivalent in MeV')\n\n def T_above_zero(T):\n first = 4 * (z ** 2) * pi * (r_e ** 2) * m_e\n second = N_A * Z_target # TODO: Take M_m from a Pyne material\n third = 1.0 / beta_squared(T, M_b)\n logpart = (2 * m_e * beta_squared(T, M_b) * gamma_squared(T, M_b)) / (I_target)\n fourth = np.log(logpart) - beta_squared(T, M_b) + beta_squared(T, M_b)\n \n return first * second * third * fourth\n\n return np.piecewise(T, [T<=0.0, T>0], [0.0, T_above_zero])", "def calc_seff(Teff, S0, coeffs):\n a, b, c, d = coeffs\n T = Teff - 5780.\n return S0 + a*T + b*T**2 + c*T**3 + d*T**4", "def robuststftL(fx,alpha=.325, nh=2**8,tstep=2**5,df=1.0,nfbins=2**10):\r\n \r\n #get length of input time series \r\n nfx=len(fx)\r\n \r\n #compute time shift list\r\n mlst=np.arange(start=-nh/2+1,stop=nh/2+1,step=1,dtype='int')\r\n #compute time locations to take STFT\r\n tlst=np.arange(start=0,stop=nfx-nh+1,step=tstep)\r\n \r\n #make a frequency list for plotting exporting only positive frequencies\r\n flst=np.fft.fftfreq(nfbins,1/df)\r\n flstc=flst[nfbins/2:]\r\n #Note: these are actually the negative frequencies but works better for\r\n #calculations\r\n flstp=flst[0:nfbins/2]\r\n \r\n #make time window and normalize\r\n sigmanh=nh/(6*np.sqrt(2*np.log(2)))\r\n h=sps.gaussian(nh,sigmanh)\r\n h=h/sum(h)\r\n \r\n #create an empty array to put the tf in and initialize a complex value\r\n tfarray=np.zeros((nfbins/2,len(tlst)),dtype='complex128')\r\n \r\n #take the hilbert transform of the signal to make complex and remove\r\n #negative frequencies\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa/fa.std()\r\n \r\n #make a frequency list for plotting exporting only positive frequencies\r\n flst=np.fft.fftfreq(nfbins,1/df)[nfbins/2:]#get only positive frequencies\r\n \r\n #create list of coefficients\r\n a=np.zeros(nh)\r\n a[(nh-2)*alpha:alpha*(2-nh)+nh-1]=1./(nh*(1-2*alpha)+4*alpha)\r\n \r\n for tpoint,nn in enumerate(tlst):\r\n #calculate windowed correlation function of analytic function\r\n fxwin=h*fa[nn:nn+nh]\r\n for fpoint,mm in enumerate(flstc):\r\n fxelement=fxwin*np.exp(1j*2*np.pi*mlst*mm/df)\r\n fxreal=np.sort(fxelement.real)[::-1]\r\n fximag=np.sort(fxelement.imag)[::-1]\r\n tfpoint=sum(a*(fxreal+1j*fximag))\r\n if tfpoint==0.0:\r\n tfarray[fpoint,tpoint]=1E-10\r\n else:\r\n tfarray[fpoint,tpoint]=tfpoint\r\n #normalize tfarray\r\n tfarray=(4.*nh*df)*tfarray\r\n \r\n return tfarray,tlst,flstp", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def get_stochd(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.STOCHD(data)\n if result is None:\n raise IndicatorException\n return result", "def __init__(self, S, f, num_t_sens, num_t_insens):\n # Number of states \n self.S = S \n self.f = f\n self.t_sens = num_t_sens\n self.t_insens = num_t_insens\n \n self.name = 'CRF'", "def k_Sw07(wind_second_moment, temp_C):\n\n U2 = wind_second_moment\n\n Sc = schmidt_number(temp_C)\n k = (0.27 * U2) * (660 / Sc) ** 0.5\n\n return k", "def short_circ():\n \n set_mode(mode_cv)\n time.sleep(.250)\n set_CV_volts(0.1)\n time.sleep(.250)\n \n sc_vals = get_input_values()\n sc_data_point = data_point(sc_vals)\n jsc = sc_data_point[4]\n print('Short circuit current: ', jsc)\n write_data_tofile(sc_data_point)\n\n return jsc", "def was(cps):\n # Head of unit\n mask = cps['tc1_p'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(12. + 1. * rand)\n new_vals = np.where(new_vals < 200000., 200000., new_vals)\n cps.loc[mask, 'wasp'] = new_vals\n # spouse of unit\n mask = cps['tc1_s'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(12. + 1. * rand)\n new_vals = np.where(new_vals < 200000., 200000., new_vals)\n cps.loc[mask, 'wass'] = new_vals", "def OneModeCoherentHD(Ns,t,nth,shots):\n s1 = (1+1j)*np.zeros(shots)\n\n alpha = np.sqrt(Ns/4)\n \n for i in range(shots):\n prog= sf.Program(1)\n \n with prog.context as q:\n \n sf.ops.Coherent(alpha) | q[0] # State preparation\n sf.ops.ThermalLossChannel(t,nth) | q[0] # Thermal loss channel mimicing target\n \n sf.ops.MeasureHD | q[0] # Het. Msmnt of signal 1\n\n\n # Need to run twice because of bug in the bosonic backend in dealing with repeated HD measurements\n \n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n \n \n #Collecting the samples\n samples = results.all_samples\n \n #Creating the measurement records\n s1[i] = samples[0][0]\n \n # Interation over number of shots is done, outputing the records\n \n return s1", "def get_stoch(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.STOCH(data)\n if result is None:\n raise IndicatorException\n return result", "def tra(s_c, point, system):\n try:\n s_c = s_c[0]\n except:\n pass\n #print(\"Input is already a float.\")\n #print(\"The given conmutation point is: {}\".format(s_c))\n u0_curve = sir.CurveSegment(point.s0, point.i0, 0, system)\n sc, ic = u0_curve._curve_sol(system.imax)\n #print(\"The intersection point is: {}\".format(sc))\n if s_c >= sc:\n #print(\"I'ma do it with only two thingamajigs.\")\n Tu = sir.CurveSegment(point.s0, point.i0, 0, system, s_c)\n Tu.get_time()\n i_c = system._curve(s_c, point.s0, point.i0, 0)\n Tc = sir.CurveSegment(s_c, i_c, system.umax, system)\n send, iend = Tc.curve_intersection(system.tau)\n Tc = sir.CurveSegment(s_c, i_c, system.umax, system, send)\n Tc.get_time()\n #print(\"Tu: {}\".format(Tu.time))\n #print(\"Tc: {}\".format(Tc.time))\n #print(Tu.time + Tc.time)\n return sir.Trajectory(Tu, Tc)\n else:\n #print(\"I'ma have to do it with three thingamajigs.\")\n Tu = sir.CurveSegment(point.s0, point.i0, 0, system, sc)\n Tu.get_time()\n Ts = sir.LineSegment(sc, s_c, system)\n Ts.get_time()\n Tc = sir.CurveSegment(s_c, system.imax, system.umax, system)\n send, iend = Tc.curve_intersection(system.tau)\n Tc = sir.CurveSegment(s_c, system.imax, system.umax, system, send)\n Tc.get_time()\n #print(\"Tu: {}\".format(Tu.time))\n #print(\"Ts: {}\".format(Ts.time))\n #print(\"Tc: {}\".format(Tc.time))\n #print(Tu.time + Ts.time + Tc.time)\n return sir.Trajectory(Tu, Ts, Tc)", "def get_current_s(self):\n return 1 if self.ff_states[0] else 0", "def sendTcScoe(self, tcPacket):\n LOG_INFO(\"EDEN.Client.sendTcScoe\", \"EDEN\")\n tcScoePDU = EGSE.EDENPDU.TCscoe()\n tcScoePDU.setCCSDSpacket(tcPacket)\n self.sendPDU(tcScoePDU)", "def plot_distance(stc_est, stc_signal, D, surface='inflated'):\n import surfer # Import here so other parts of the code can be used without graphics\n peak = stc_est.get_peak(vert_as_index=True)[0]\n peak_hemi = 0 if peak < len(stc_est.vertices[0]) else 1\n true_hemi = config.signal_hemi\n\n est_vert = np.hstack(stc_est.vertices)[peak]\n true_vert = stc_signal.vertices[true_hemi][0]\n\n brain = surfer.Brain('sample', hemi='both', surf=surface)\n brain.add_data(D[peak, :len(stc_est.vertices[0])], vertices=stc_est.vertices[0],\n hemi='lh', transparent=True)\n brain.add_data(D[peak, len(stc_est.vertices[0]):], vertices=stc_est.vertices[1],\n hemi='rh', transparent=True)\n brain.add_foci([est_vert], coords_as_verts=True, hemi='lh' if peak_hemi == 0 else 'rh', color='red')\n brain.add_foci([true_vert], coords_as_verts=True, hemi='lh' if true_hemi == 0 else 'rh', color='green')\n return brain", "def calc_csls(self, sess):\n good_pairs = self.generate_dictionary(sess)\n eval_dict = {self.src_ph: good_pairs[0], self.tgt_ph: good_pairs[1]}\n cos_mean = sess.run(self.csls_subgraphs[\"CSLS_Criteria\"], feed_dict=eval_dict)\n print(\"CSLS Score is \" + str(cos_mean))\n\n # Drop LR only after the second drop in CSLS\n if cos_mean < self.best_cos_score:\n self.drop_lr = True & self.second_drop\n self.second_drop = True\n\n # Save model whenever cos score is better than saved score\n if cos_mean > self.best_cos_score:\n self.save_model = True\n else:\n self.save_model = False\n\n # Update best cos score\n if cos_mean > self.best_cos_score:\n self.best_cos_score = cos_mean\n self.drop_lr = False", "def ctof(temp):\n return temp * 9/5 + 32 # functions should be surrounded by 2 blank lines", "def stft(y, sr, n_fft=400, hop_t=0.010, win_t=0.025, window=\"hamming\",\n preemphasis=0.97):\n if preemphasis > 1e-12:\n y = y - preemphasis * np.concatenate([[0], y[:-1]], 0)\n hop_length = int(sr * hop_t)\n win_length = int(sr * win_t)\n return librosa.core.stft(y, n_fft=n_fft, hop_length=hop_length, \n win_length=win_length, window=window)", "def one_transition_spectrum_abs(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def calc_s11(rs):\n freq = []\n resist = []\n react = []\n\n for f in rs:\n fs = f.split(\" \")\n fs = list(filter(None, fs))\n freq.append(float(fs[0]))\n resist.append(float(fs[5]))\n react.append(float(fs[6]))\n\n #print('freq',freq,'resist',resist,'react',react)\n\n refc = []\n s11 = []\n ts11 = 0\n for i in range(0,len(freq)):\n refc.append((resist[i] + 1j*react[i]- 50)/((resist[i]) + 1j*react[i] + 50));\n\n if refc[i]==0:\n s11.append(0)\n else:\n s11.append(20*math.log(abs(refc[i]),10))\n\n ts11 += s11[i]\n\n #print(s11)\n return (ts11, s11)", "def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):\n self.barrier() # wait for other nodes\n \n filename = str(self.pickle_prefix) + \"_results_pop_cnoise.p\"\n filepath = self.data_dir + \"/\" + filename\n \n if self.id == 0: print \"- filepath:\", filepath \n \n if self.do_run or (os.path.isfile(filepath) is False):\n\n tstart = 0; \n fs = 1 / self.dt # sampling rate \n fmax = fs / 2 # maximum frequency (nyquist)\n \n t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!\n\n #print self.syn_ex_dist\n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_ex_dist == []):\n for nt in range(self.n_celltypes): # loop over all cells\n #print \"nt\", nt\n if hasattr(self.cells[nt][0], 'input_vec'):\n self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!\n else: \n self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!\n \n #print self.syn_ex_dist\n \n if (self.syn_ex_dist[0] == []):\n nemax = 1\n else:\n nemax = max([item for sublist in self.syn_ex_dist for item in sublist])\n \n if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)\n for nt in range(self.n_celltypes): # loop over all cells\n self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!\n \n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_inh_dist[0] == []):\n nimax = 0\n else:\n nimax = max([item for sublist in self.syn_inh_dist for item in sublist]) \n \n #print \"self.syn_inh_dist, self.syn_ex_dist\", self.syn_inh_dist, self.syn_ex_dist\n \n n_noise = max([nemax,nimax]) # number of noise sources\n #print n_noise,nemax,nimax\n # create reproduceable input\n noise_data = []\n\n for nj in range(n_noise):\n \n if self.id == 0: # make sure all have the same signal !!!\n if len(freq_used) == 0: \n noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)\n else:\n noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal\n else:\n noise_data0 = np.empty(len(t_noise), dtype=np.float64)\n\n noise_data0 = self.broadcast(noise_data0, fast = True) \n \n noise_data.append(noise_data0)\n noise_data0 = [] \n \n noise_data_points = len(noise_data[0]) \n\n # Create signal weight vector inh_factor if it is not fully given\n if len(noise_data) > len(inh_factor):\n inh_factor = [inh_factor[0]] * len(noise_data) \n print \"inh_factor:\", inh_factor\n\n #if equi:\n #pass\n # tstop = t_stim\n \n if max(self.n_syn_ex) == 0: # this means current input\n \n self.set_IStim() # sets amp\n \n if self.fluct_s != []:\n if self.fluct_s[self.a_celltype[0]] > 0:\n if self.id == 0: print \"- adding i fluct\"\n self.connect_fluct()\n \n for i, m in enumerate(self.method_interpol):\n if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0\n stimulus.append(stimulus0)\n tstop = t[-1]\n \n self.set_IPlay2(stimulus, t)\n if self.id == 0: print \"- starting colored noise transfer function estimation! with amp = \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n else:\n\n self.give_freq = False\n ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n\n if 'gsyn_in' not in self.method_interpol: \n pass\n else:\n self.g_syn_ex = [1]*len(self.N)\n \n \n if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n if self.id == 0: print \"- adding g fluct\"\n self.connect_gfluct(E_i=-65)\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp\n stimulus.append(stimulus0)\n \n noise_data = [] \n tstop = t[-1]\n \n if self.N[self.a_celltype[0]] > 1:\n self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)\n if self.id == 0: print \"- add random start\"\n \n #print \"Enter Synplay()\"\n self.set_SynPlay(stimulus, t, t_startstop = t_startstop) \n #print \"Exit Synplay()\"\n\n if self.id == 0: print \"- starting colored noise transfer function estimation with synaptic input! with amp = \" + str(np.round(self.amp,4)) + \", ihold = \" + str(np.round(self.ihold,4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n amp_vec = []\n mag_vec = [] \n pha_vec = []\n freq_used = []\n ca = []\n SNR_mat = []\n VAFf_mat = []\n Qual_mat = []\n CF_mat = [] \n VAF_mat = []\n stim = []\n stim_re_mat = []\n resp_mat = []\n current_re = []\n ihold1 = []\n tk = []\n K_mat = []\n gsyn_in = []\n fmean = []\n fmax = [] \n fmstd = [] \n fcvm = [] \n fmeanA = []\n fmaxA = [] \n fmstdA = [] \n fcvmA = [] \n t_all_vec_input_sorted = []\n id_all_vec_input_sorted = []\n \n if (self.id == 0) and (max(self.n_syn_ex) > 0):\n print range(self.n_celltypes), np.shape(self.t_all_vec_input)\n for l in range(self.n_celltypes): \n ie = argsort(self.t_all_vec_input[l]) \n t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )\n id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )\n \n #if (self.id == 0): \n # print self.g_syn_ex\n # print np.array(self.g_syn_ex)>= 0\n \n #print \"g_syn_ex:\",self.g_syn_ex\n if np.array(np.array(self.g_syn_ex)>= 0).any():\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:\n print \"- Equilibrate!\"\n self.run(tstop, do_loadstate = False)\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].get_states(filename)\n else:\n self.run(tstop, do_loadstate = False)\n \n i_startstop = []\n \n results = self.get(t_startstop, i_startstop) \n time = results.get('time')\n current = results.get('current') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n gsyn = results.get('gsyn') \n freq_times = results.get('freq_times')\n spike_freq = results.get('spike_freq')\n t_all_vec_vec = results.get('t_all_vec_vec')\n id_all_vec_vec = results.get('id_all_vec_vec')\n gsyns = results.get('gsyns')\n gsyn_in = results.get('gsyn_in')\n \n fmax = results.get('fmax')\n fmstd = results.get('fmstd')\n fcvm = results.get('fcvm')\n \n fmeanA = results.get('fmeanA') \n fmaxA = results.get('fmaxA')\n fmstdA = results.get('fmstdA')\n fcvmA = results.get('fcvmA')\n \n fbaseA = results.get('fbaseA') \n fbase = results.get('fbase')\n fbstdA = results.get('fbstdA')\n \n \n else: # do not run, analyse input!!!\n \n time = t\n voltage = []\n for l in range(self.n_celltypes): \n voltage.append(np.zeros(len(t)))\n current = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n gsyn_in = []\n \n t_all_vec_vec = []\n id_all_vec_vec = []\n \n fmean = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n \n fmeanA = []\n fmaxA = []\n fmstdA = []\n fcvmA = []\n fbaseA = []\n fbase = []\n fbstdA = []\n \n if self.id == 0:\n \n current = self.n_train_ex\n \n #t_all_vec = self.t_all_vec_input\n #id_all_vec = self.id_all_vec_input\n\n #ie = argsort(t_all_vec) \n #t_all_vec_vec.append( t_all_vec[ie] )\n #id_all_vec_vec.append( id_all_vec[ie].astype(int) )\n \n t_all_vec_vec = t_all_vec_input_sorted\n id_all_vec_vec = id_all_vec_input_sorted\n \n freq_times = arange(0, tstop, self.bin_width)\n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n\n if self.tau2_ex[0] > 0:\n spike_freq = np.concatenate((zeros(1),num_spikes)) \n print \"NOSYN TEST: start convolution with Ksyn\"\n Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0]) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n spike_freq = np.convolve(Ksyn, spike_freq, mode='same')\n print \"NOSYN TEST: convolution finished\"\n else:\n\n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n fmean.append(self.fmean_input)\n fmax.append(self.fmax_input) \n fmstd.append(self.fmstd_input) \n fcvm.append(self.fcvm_input) \n fstdm.append(self.fstdm_input)\n\n if self.no_fmean == True:\n fmean.append(ihold)\n \n #plt.figure('spike_freq') \n #plt.plot(freq_times, spike_freq)\n #plt.savefig(\"./figs/Pub/Spike_freq_\" + str(self.pickle_prefix) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.clf()\n \n fmeanA = fmean[0]\n fmaxA = fmax[0]\n fmstdA = fmstd [0] \n fcvmA = fcvm[0]\n fstdmA = fstdm[0]\n \n \n if self.id == 0: \n \n if any([i<0 for i in inh_factor]):\n \n p0 = []\n inhf_idx = []\n for i, inhf in enumerate(inh_factor):\n if inhf < 0: \n p0.append(0) \n inhf_idx.append(i)\n \n plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))\n p = plsq\n \n ip = 0\n for i in inhf_idx:\n inh_factor[i] = p[ip]\n ip += 1\n \n\n print \"Final inh_factor: \", inh_factor\n \n \n results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)\n \n mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean') \n SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat') \n stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat') \n \n \n self.barrier() # wait for other nodes\n \n \n if self.id == 0:\n \n if t_qual > 0:\n #print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt\n current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]\n current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]\n \n if len(self.i_holdrs) > 0:\n ihold1 = self.i_holdrs[self.a_celltype[0]][0]\n else:\n ihold1 = []\n \n for l in range(len(self.method_interpol)): # unwrap \n pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n \n # only return fraction of actual signal, it is too long!!! \n if time[-1] > self.tmax: \n imax = -1*int(self.tmax/self.dt)\n time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]\n for n in range(self.n_celltypes): \n voltage[n] = voltage[n][imax:]\n \n if freq_times != []: \n if freq_times[-1] > self.tmax:\n imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency \n freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2] \n \n bvec = [\"_syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n # normalize synaptic integration with others \n mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0] \n \n if self.id == 0: print \"start pickle\"\n \n results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,\n 'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,\n 'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec} \n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n print \"pickle done\" \n \n \n if self.plot_train:\n \n for a in self.a_celltype:\n\n #i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]\n #i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]\n \n #t_all_cut = t_all_vec_vec[a][i_start:i_stop]\n #id_all_cut = id_all_vec_vec[a][i_start:i_stop]\n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(use_spikes,use_id,'|', ms=2)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n \n plt.clf()\n \n if len(t_all_cut) > 0:\n \n tbin = 100*ms\n tb = np.arange(0,t[-1],tbin)\n [all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)\n all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin\n \n plt.figure('results_train2') \n plt.plot(tb,all_rate)\n plt.savefig(\"./figs/Pub/PSTH_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n plt.figure('results_noise') \n plt.plot(time,current)\n plt.savefig(\"./figs/Pub/Noise_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n \n if self.plot_input:\n \n if len(t_all_vec_input_sorted[0]) > 0:\n \n i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]\n i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]\n \n t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]\n id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]\n \n plt.figure('results_input') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(t_all_cut,id_all_cut,'|', ms=2)\n plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Input_\" + str(self.pickle_prefix) + \"_N\" + str(self.N[self.a_celltype[0]]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n\n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n \n #print results\n #print {key:np.shape(value) for key,value in results.iteritems()}\n \n if self.minimal_dir: # save only info needed for plot\n \n print {key:np.shape(value) for key,value in results.iteritems()}\n \n if \"Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p\" in filename:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = [] \n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p\" in filename) \\\n :\n\n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig2_pop_transfer_\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n \n else:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n\n print {key:np.shape(value) for key,value in results.iteritems()}\n\n pickle.dump( results, gzip.GzipFile( self.minimal_dir + \"/\" + filename, \"wb\" ) ) \n \n else:\n results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],\n 'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],\n 'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]} \n \n if self.id == 0: \n\n if self.plot_train: \n\n for a in self.a_celltype:\n \n t1 = results.get('t1') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n fmax = results.get('fmax') \n fmstd = results.get('fmstd') \n \n \n if results.has_key('t_all_vec_vec'):\n \n if len(results['t_all_vec_vec']) > 0: \n t_all_vec_vec = results.get('t_all_vec_vec') \n id_all_vec_vec = results.get('id_all_vec_vec') \n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax97 = plt.subplot(1,1,1)\n ax97.plot(use_spikes,use_id,'|', ms=6)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n\n \n plt.figure('results_voltage') \n ax99 = plt.subplot(2,1,1)\n ax99.plot(t1,voltage[a])\n \n t_noise = arange(0, t_stim, self.dt)\n noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)\n stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) \n ax98 = plt.subplot(2,1,2)\n ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')\n \n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.show()\n plt.clf()\n \n if (self.id == 0) and (do_csd == 1):\n Qual = results.get('Qual') \n for i, ii in enumerate(self.method_interpol):\n print \"\\n[QUAL:] Interpol:\", ii, \"SNR0:\", Qual[i,0,0], \"SNR_cutff:\", Qual[i,0,1], \"SNR_mean:\", Qual[i,0,2], \"\\n VAF0:\", Qual[i,1,0], \"VAF_cutff:\", Qual[i,1,1], \"VAF_mean:\", Qual[i,1,2], \"\\n CF(subtracted):\", Qual[i,2,0], \"VAF(subtracted):\", Qual[i,2,1] \n \n VAF = results.get('VAF')\n freq_used = results.get('freq_used') \n iend = mlab.find(freq_used >= self.xmax)[0] \n print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend])) \n \n self.barrier() # wait for other nodes\n \n return results", "def tocsc(self):\n return self.tocsr().tocsc()", "def stoch_fix():\n df = stoch_test_data()\n return df", "def slow_stochastic(df):\n stochs = stochastics( df, 'Low', 'High', 'Close', 14, 3 )\n slow_k = stochs['k_slow']\n return slow_k", "def stft_scipy(data_in, ch_idx):\n\n res = stft(data_in[ch_idx, :], fs=self.fs, nperseg=self.nfft, window=self.window,\n detrend=self.detrend, noverlap=self.noverlap, padded=False,\n return_onesided=False, boundary=None)\n\n return res[2]", "def C_T(self):\n return self.generic_getter(\n get_sound_speed_temperature, \"C_T\", \"convert_sound_speed\"\n )", "def compute_scl(self):\n \n # We can initialize run-once stuff here\n interval = 5.0 # time between samples\n ch = 0 # channel index\n\n # ----------- Process loop for acquiring secondary data ---------------\n while self.run_state.value:\n\n # 1. Snapshot current data buffer (last 90 seconds)\n data,times = self.data_snapshot([90, 90])\n\n # 2. Calculate the desired metric and grab the current time-stamp\n if len(data[0])>0:\n new_value = np.mean(data[0])\n else:\n new_value = 0\n time_stamp = lsl.local_clock()\n\n # 3. Update secondary buffer \n self.push_sample_secondary(ch,time_stamp,new_value) \n\n # 4. Sleep until its time to calculate another value \n time.sleep(interval)\n # ---------------------------------------------------------------------", "def _excitonic_coft(self,SS,AG,n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n for el2 in elst:\n if cfm.cpointer[el1-1,el2-1] == 0:\n continue\n coft = cfm.get_coft(el1-1,el2-1) \n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct += ((SS[kk,n]**2)*(SS[ll,n]**2)*coft)\n return ct", "def sdc(\n frames: np.ndarray,\n sample_rate: int,\n n_mfcc: int = 20,\n dct_type: int = 2,\n lifter: int = 0,\n *,\n kwargs={},\n):\n mfcc_features = mfcc(frames, sample_rate, n_mfcc,\n dct_type, lifter, **kwargs)\n mfcc_delta = librosa.feature.delta(mfcc_features)\n mfcc_delta2 = librosa.feature.delta(mfcc_features, order=2)\n return np.concatenate((mfcc_delta, mfcc_delta2), axis=1)", "def read_stc(filename):\n fid = open(filename, 'rb')\n\n stc = dict()\n\n fid.seek(0, 2) # go to end of file\n file_length = fid.tell()\n fid.seek(0, 0) # go to beginning of file\n\n # read tmin in ms\n stc['tmin'] = float(np.fromfile(fid, dtype=\">f4\", count=1))\n stc['tmin'] /= 1000.0\n\n # read sampling rate in ms\n stc['tstep'] = float(np.fromfile(fid, dtype=\">f4\", count=1))\n stc['tstep'] /= 1000.0\n\n # read number of vertices/sources\n vertices_n = int(np.fromfile(fid, dtype=\">I4\", count=1))\n\n # read the source vector\n stc['vertices'] = np.fromfile(fid, dtype=\">I4\", count=vertices_n)\n\n # read the number of timepts\n data_n = int(np.fromfile(fid, dtype=\">I4\", count=1))\n\n if ((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0:\n raise ValueError('incorrect stc file size')\n\n # read the data matrix\n stc['data'] = np.fromfile(fid, dtype=\">f4\", count=vertices_n * data_n)\n stc['data'] = stc['data'].reshape([data_n, vertices_n]).T\n\n # close the file\n fid.close()\n return stc", "def at_SES(self):\n\t t = self.t\n\t dM = self.dM\n\t rLbl = transLabel(t,self.P,self.t0,self.tdur*2)\n\n\t # Hack. K2 doesn't have seasons\n\t q = np.zeros(t.size) - 1\n\t season = np.mod(q,4)\n\t dtype = [('ses',float),('tnum',int),('season',int)]\n\t dM.fill_value = np.nan\n\t rses = np.array(list(zip(dM.filled(),rLbl['tLbl'],season)),dtype=dtype )\n\t rses = rses[ rLbl['tLbl'] >= 0 ]\n\n\t # If no good transits, break out\n\t if rses.size==0:\n\t return\n\n\t self.add_dset('rLbl',rLbl,\n\t description='Transit/continuum labeled (see transLabel doc string')\n\t self.add_dset('SES',rses,\n\t description='Record array with single event statistic')\n\t self.add_attr('num_trans',rses.size,\n\t description='Number of good transits')\n\n\t # Median SES, even/odd\n\t for sfx,i in zip(['even','odd'],[0,1]):\n\t medses = np.median( rses['ses'][rses['tnum'] % 2 == i] ) \n\t self.add_attr('SES_%s' % sfx, medses,\n\t description='Median SES %s' % sfx)\n\n\t # Median SES, different seasons\n\t for i in range(4):\n\t medses = -99 #Hack\n\t self.add_attr('SES_%i' % i, medses,\n\t description='Median SES [Season %i]' % i )", "def at_SES(self):\n\t t = self.t\n\t dM = self.dM\n\t rLbl = transLabel(t,self.P,self.t0,self.tdur*2)\n\n\t # Hack. K2 doesn't have seasons\n\t q = np.zeros(t.size) - 1\n\t season = np.mod(q,4)\n\t dtype = [('ses',float),('tnum',int),('season',int)]\n\t dM.fill_value = np.nan\n\t rses = np.array(list(zip(dM.filled(),rLbl['tLbl'],season)),dtype=dtype )\n\t rses = rses[ rLbl['tLbl'] >= 0 ]\n\n\t # If no good transits, break out\n\t if rses.size==0:\n\t return\n\n\t self.add_dset('rLbl',rLbl,\n\t description='Transit/continuum labeled (see transLabel doc string')\n\t self.add_dset('SES',rses,\n\t description='Record array with single event statistic')\n\t self.add_attr('num_trans',rses.size,\n\t description='Number of good transits')\n\n\t # Median SES, even/odd\n\t for sfx,i in zip(['even','odd'],[0,1]):\n\t medses = np.median( rses['ses'][rses['tnum'] % 2 == i] ) \n\t self.add_attr('SES_%s' % sfx, medses,\n\t description='Median SES %s' % sfx)\n\n\t # Median SES, different seasons\n\t for i in range(4):\n\t medses = -99 #Hack\n\t self.add_attr('SES_%i' % i, medses,\n\t description='Median SES [Season %i]' % i )", "def saccades(x, VFAC=5, MINDUR=8, SAMPLING=1000):\n MINDUR = MINDUR / 1000 * SAMPLING\n\n v = vecvel(x, SAMPLING=SAMPLING)\n\n # compute threshold\n medx = np.median(v[:, 0])\n msdx = np.sqrt(np.median((v[:, 0] - medx) ** 2))\n medy = np.median(v[:, 1])\n msdy = np.sqrt(np.median((v[:, 1] - medx) ** 2))\n\n if msdx < 1e-10:\n msdx = np.sqrt(np.mean(v[:, 0] ** 2) - (np.mean(v[:, 0])) ** 2)\n if msdx < 1e-10:\n raise ValueError(\"msdx<realmin in saccades\")\n if msdy < 1e-10:\n msdy = np.sqrt(np.mean(v[:, 1] ** 2) - (np.mean(v[:, 1])) ** 2)\n if msdy < 1e-10:\n raise ValueError(\"msdy<realmin in saccades\")\n\n radiusx = VFAC * msdx\n radiusy = VFAC * msdy\n\n radius = [radiusx, radiusy]\n\n # apply test criterion as threshold\n test = (v[:, 0] / radiusx) ** 2 + (v[:, 1] / radiusy) ** 2\n indx = np.where(test > 1)[0]\n\n # Find saccades\n N = len(indx) - 1\n nsac = 0\n sac = []\n dur = 1\n a = 0\n k = 0\n\n while k < N:\n\n if indx[k + 1] - indx[k] == 1:\n dur += 1\n\n else:\n if dur >= MINDUR:\n nsac += 1\n b = k\n sac.append(np.hstack((indx[a], indx[b], [0] * 5)))\n\n a = k + 1\n dur = 1\n\n k = k + 1\n\n if dur >= MINDUR:\n nsac += 1\n b = k\n sac.append(np.hstack((indx[a], indx[b], [0] * 5)))\n\n if nsac > 0:\n # compute peak velocity and horizontal and vertical components\n saccades = []\n for s in sac:\n # onset and offset\n a = s[0]\n b = s[1]\n\n # idx = a:b\n\n # saccade peak velocity\n vpeak = np.max(np.sqrt(v[a:b, 0] ** 2 + v[a:b, 1] ** 2))\n s[2] = vpeak\n\n # saccade vector\n dx = x.iloc[b, 0] - x.iloc[a, 0]\n dy = x.iloc[b, 1] - x.iloc[a, 1]\n s[3:5] = [dx, dy]\n\n # saccade amplitude\n minx = np.min(x.iloc[a:b, 0])\n maxx = np.max(x.iloc[a:b, 0])\n miny = np.min(x.iloc[a:b, 1])\n maxy = np.max(x.iloc[a:b, 1])\n\n ix1 = np.argmin(x.iloc[a:b, 0])\n ix2 = np.argmax(x.iloc[a:b, 0])\n iy1 = np.argmin(x.iloc[a:b, 1])\n iy2 = np.argmax(x.iloc[a:b, 1])\n\n dX = np.sign(ix2 - ix1) * (maxx - minx)\n dY = np.sign(iy2 - iy1) * (maxy - miny)\n\n s[5:7] = [dX, dY]\n\n saccades.append(s)\n\n out = [saccades, radius]\n\n else:\n out = [None, None]\n\n return out", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemperatureStatus()\n print \"Sensor Temp=%6.1f, TargetTemp=%6.1f, AmbientTemp=%6.1f, CoolerVolts=%6.2f\" % (f1,f2,f3,f4)\n return temp", "def convertC(TF):\r\n TC = 5.0/9.0*(TF - 32.0)\r\n return TC", "def C_S(self):\n return self.generic_getter(\n get_sound_speed_entropy, \"C_S\", \"convert_sound_speed\"\n )", "def tcs2(self):\n S = self.M.allocState({})\n self.M.propagate(S, 0, 1)\n\n # set initial beam data\n S.ref_IonZ = self.refIonZ\n S.IonZ = self.IonZ\n\n S.moment0 = self.BC0\n S.moment1 = self.ENV0\n\n S.ref_IonEk = self.refIonEk\n \n S.phis = S.moment0[PS_S,:]\n S.IonEk = S.moment0[PS_PS,:]*MeVtoeV + S.ref_IonEk\n\n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n \n U,T = self.save(U, T, S)\n\n\n #S.clng = self.clng\n\n fin = len(self.M)\n\n H = self.M.propagate(S, 1, fin, observe=range(fin))\n \n ### Reconstract data\n U = collections.OrderedDict()\n T = collections.OrderedDict()\n\n for i in range(fin-1):\n U,T = self.save(U, T, H[i][1])\n\n return U,T", "def boot_strap_back_ccy( sdate,\r\n ccy,\r\n instruments,\r\n Day_Counter ):\r\n convention = BS_Con[ccy]\r\n years_days_cash = convention[\"Cash_Day\"]\r\n swap_freq = convention[\"Swap_Freq\"]\r\n Day_Counter.set_convention(convention[\"Swap\"])\r\n instruments = BS_TF.check_instruments( instruments )\r\n \r\n sdate = str_2_date( sdate )\r\n \r\n flag = False\r\n if BS_TF.check_if_last_day_of_month( sdate ):\r\n flag = True\r\n \"\"\" Sdate stands for the begin of bootstrapping \r\n Inputs structure instruments contains:\r\n 1\\cash rates list of tuple and number of contracts \r\n 2\\future rates list of tuple and number of contracts\r\n 3\\swap rates list of tuple and number of contracts\r\n Structure of rate list looks like (time,rates)\r\n \"\"\"\r\n Cash_Rate = instruments[\"cash\"][0]\r\n Cash_Num = len(Cash_Rate)\r\n \"\"\" NOTE: inputs of futures rate should have one start row \r\n with date and the discount factor is interpolated from \r\n cash rate\r\n \"\"\"\r\n Swap_Rate = instruments[\"swap\"][0]\r\n units = 100\r\n\r\n discount_curve = []\r\n ans_curve = []\r\n discount_curve.append( [sdate,1] )\r\n ans_curve.append([sdate,1])\r\n \"\"\" Remeber par swap key dates location in discount_curve\r\n \"\"\"\r\n \r\n for i in range( 0, int(Cash_Num) ):\r\n \"\"\" Begin bootstrapping from Cash rates\r\n \"\"\"\r\n yearfrac = (Cash_Rate[i][0]-sdate).days/years_days_cash\r\n DF = 1.0/(1.0+Cash_Rate[i][1]/units*yearfrac)\r\n discount_curve.append([Cash_Rate[i][0],DF])\r\n if (Cash_Rate[i][0]-sdate).days <= 200:\r\n ans_curve.append([Cash_Rate[i][0],DF])\r\n\r\n Swap_Rate = BS_TF.augument_by_frequency( Swap_Rate, int(12/swap_freq) )\r\n \r\n \"\"\" Only do interpolation for \r\n the first three swaps \r\n 0.5y, 1y and 1.5y\r\n \"\"\"\r\n \"\"\" Pre-calculate the sum of discount \r\n factors of 0.5y, 1y and 1.5y based \r\n on the current discount curve we have \r\n \"\"\"\r\n sum_df = 0\r\n swap_frequency = relativedelta( months = int(12/swap_freq) )\r\n \"\"\" Move cur_date back to do bootstrapping\r\n \"\"\"\r\n cur_date = sdate\r\n for i in range( 1, len(discount_curve) ):\r\n while cur_date+swap_frequency < Swap_Rate[0][0] \\\r\n and cur_date >= discount_curve[i-1][0] \\\r\n and cur_date < discount_curve[i][0]:\r\n nxt_date = cur_date+swap_frequency\r\n if flag:\r\n nxt_date = BS_TF.last_day_of_month(cur_date+swap_frequency)\r\n yearfrac = Day_Counter.yearfrac( cur_date, nxt_date )\r\n DF = BS_TF.interpolation_act( nxt_date,\r\n discount_curve[i-1][0],\r\n\t\t\t\t\t\t discount_curve[i-1][1],\r\n\t\t\t\t\t\t\t\t\t discount_curve[i][0],\r\n\t\t\t\t\t\t\t\t\t discount_curve[i][1] )\r\n sum_df += DF*yearfrac\r\n ans_curve.append([nxt_date,DF])\r\n cur_date += swap_frequency\r\n if flag:\r\n cur_date = BS_TF.last_day_of_month(cur_date)\r\n \r\n cur_date = Swap_Rate[0][0]\r\n \r\n for i in range( 0, len(Swap_Rate) ):\r\n# if sum_df == 0:\r\n# print(\"Warning Cannot get correct 0.5y, 1y and 1.5y discount factors...\")\r\n# print(\"Current Date:\"+str(cur_date))\r\n# print(ccy)\r\n# print(ans_curve)\r\n \"\"\" Sum of previous discount \r\n factors stored in \"sum_df\"\r\n \"\"\"\r\n nxt_date = cur_date+swap_frequency\r\n if flag:\r\n nxt_date = BS_TF.last_day_of_month(nxt_date)\r\n yearfrac = Day_Counter.yearfrac( cur_date, nxt_date )\r\n rates = Swap_Rate[i][1]\r\n cur_DF = (100-sum_df*rates)/(100+rates*yearfrac)\r\n discount_curve.append([cur_date,cur_DF])\r\n ans_curve.append([cur_date,cur_DF])\r\n sum_df += cur_DF*yearfrac\r\n cur_date += swap_frequency\r\n if flag:\r\n cur_date = BS_TF.last_day_of_month(cur_date)\r\n \r\n sorted_discount_curve = sorted( ans_curve, key = lambda tup: tup[0] )\r\n return sorted_discount_curve", "async def cmd_santaclock(self, **_):\n td = (dt(2019, 12, 25, 0, 0) - dt.utcnow()).total_seconds()\n if td < 0:\n return \"Christmas already happened...Gotta wait a bit more for presents. Enjoy the snow! Unless you live in the south where climate change prevents snow now.\"\n d = divmod(td, 86400)\n h = divmod(d[1], 3600)\n m = divmod(h[1], 60)\n s = int(m[1])\n return \":christmas_tree: **Santa Clock Says:** Santa is `{} days, {} hours, {} minutes, and {} seconds` away :christmas_tree:\".format(\n str(int(d[0])), str(int(h[0])), str(int(m[0])), str(s)\n )", "def TB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB):\n\n y=SCB(t,init,rhoS,deltaSC,rhoC,deltaCB,rhoB)\n T=np.sum(y,axis=0)\n Y=np.vstack((T,y[2]))\n return(Y)", "def test_set_sT(self):\n s = State(substance=\"water\")\n s.sT = Q_(7496.2021523754065, \"J/(kg*K)\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.sT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.sT[0], Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(2547715.3635084038, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(7496.2021523754065, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(2009.2902478486988, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(1509.1482452129906, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(1.801983936953226, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(2730301.3859201893, \"J/kg\")) # type: ignore\n assert s.x is None", "def spin_state(c, dt, T, t0=0.0, t1=-1.):\n\n profile = c.sum(0)\n edge = fitting.find_edge(profile)\n \n I = int(round(T/float(dt)))\n i0 = edge + int(round(t0/float(dt)))\n y = np.empty((c.shape[0],))\n for i, slot in enumerate(c):\n y[i] = slot[i0:i0+I].sum()\n if t1 >= 0:\n i1 = edge + int(round(t1/float(dt))) \n y1 = np.empty((c.shape[0],))\n for i, slot in enumerate(c):\n y1[i] = slot[i1:i1+I].sum()\n y = y/y1*y1.mean()\n return y, profile, edge", "def at_s2ncut(self):\n\n\t # Notch out the transit and recompute\n\t fmcut = self.fm.copy()\n\t fmcut.fill_value=0\n\t # Widen by twice the transit duration\n\t tmask = self.rLbl['tRegLbl'] >= 0\n\t tmask = np.convolve(\n\t tmask.astype(float),\n\t np.ones(self.header['tdurcad'] * 2),\n\t mode='same'\n\t )\n\t tmask = tmask.astype(bool)\n\t fmcut.mask = fmcut.mask | tmask\n\t grid = tfind.Grid(self.t,fmcut)\n\n\n\t pgram_params = [\n\t dict(Pcad1=self.Pcad - 1, Pcad2=self.Pcad + 1, twdG = [self.header['tdurcad']])\n\t ]\n\t pgram = grid.periodogram(pgram_params,mode='max')\n\t idxmax = pgram.s2n.idxmax()\n\n\t dkeys = 's2ncut s2ncut_t0 s2ncut_mean'.split()\n\t pkeys = 's2n t0 mean'.split()\n\n\t for dkey,pkey in zip(dkeys,pkeys):\n\t self.add_attr(dkey,pgram.ix[idxmax,pkey])", "def at_s2ncut(self):\n\n\t # Notch out the transit and recompute\n\t fmcut = self.fm.copy()\n\t fmcut.fill_value=0\n\t # Widen by twice the transit duration\n\t tmask = self.rLbl['tRegLbl'] >= 0\n\t tmask = np.convolve(\n\t tmask.astype(float),\n\t np.ones(self.header['tdurcad'] * 2),\n\t mode='same'\n\t )\n\t tmask = tmask.astype(bool)\n\t fmcut.mask = fmcut.mask | tmask\n\t grid = tfind.Grid(self.t,fmcut)\n\n\n\t pgram_params = [\n\t dict(Pcad1=self.Pcad - 1, Pcad2=self.Pcad + 1, twdG = [self.header['tdurcad']])\n\t ]\n\t pgram = grid.periodogram(pgram_params,mode='max')\n\t idxmax = pgram.s2n.idxmax()\n\n\t dkeys = 's2ncut s2ncut_t0 s2ncut_mean'.split()\n\t pkeys = 's2n t0 mean'.split()\n\n\t for dkey,pkey in zip(dkeys,pkeys):\n\t self.add_attr(dkey,pgram.ix[idxmax,pkey])", "def stochasticModel(x, H, N, stocf):\n\thN = N/2+1 \t\t# positive size of fft\n\tNo2 = N/2\t\t\t\t\t\t\t# half of N\n\tif (hN*stocf < 3): # raise exception if decimation factor too small\n\t\traise ValueError(\"Stochastic decimation factor too small\")\n\t\t\n\tif (stocf > 1): # raise exception if decimation factor too big\n\t\traise ValueError(\"Stochastic decimation factor above 1\")\n\t\n\tif (H <= 0): # raise error if hop size 0 or negative\n\t\traise ValueError(\"Hop size (H) smaller or equal to 0\")\n\t\t\n\tif not(isPower2(N)): # raise error if N not a power of twou\n\t\traise ValueError(\"FFT size (N) is not a power of 2\")\n\t\t\n\tw = hanning(N) # analysis/synthesis window\n\tx = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample\n\tpin = No2 # initialize sound pointer in middle of analysis window \n\tpend = x.size - No2 # last sample to start a frame\n\ty = np.zeros(x.size) # initialize output array\n\twhile pin<=pend: \n\t#-----analysis----- \n\t\txw = x[pin-No2:pin+No2]*w # window the input sound\n\t\tX = fft(xw) # compute FFT\n\t\tmX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies\n\t\tstocEnv = resample(np.maximum(-200, mX), hN*stocf) # decimate the mag spectrum \n\t#-----synthesis-----\n\t\tmY = resample(stocEnv, hN) # interpolate to original size\n\t\tpY = 2*np.pi*np.random.rand(hN) # generate phase random values\n\t\tY = np.zeros(N, dtype = complex)\n\t\tY[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.\n\t\tY[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.\n\t\tfftbuffer = np.real(ifft(Y)) # inverse FFT\n\t\ty[pin-No2:pin+No2] += w*fftbuffer # overlap-add\n\t\tpin += H \t\t\t\t\t # advance sound pointer\n\ty = np.delete(y, range(No2)) # delete half of first window which was added \n\ty = np.delete(y, range(y.size-No2, y.size)) # delete half of last window which was added \n\treturn y", "def _stft(d, coordinate=None, options=None):\n default_options = {'window': 'hann',\n 'nperseg': 256,\n 'noverlap': None,\n 'nfft': None,\n 'detrend': False,\n 'return_onesided': True,\n 'boundary': 'zeros',\n 'padded': True\n }\n\n _options = flap.config.merge_options(default_options, options, data_source=d.data_source, section='STFT')\n\n window=_options['window']\n nperseg=_options['nperseg']\n noverlap=_options['noverlap']\n nfft=_options['nfft']\n detrend=_options['detrend']\n return_onesided=_options['return_onesided']\n boundary=_options['boundary']\n padded=_options['padded']\n\n if (noverlap is None):\n noverlap = nperseg // 4 * 3 # the default setting for stft - it should contain 4x the information\n\n if (d.data is None):\n raise ValueError(\"Cannot do spectral analysis without data.\")\n if (coordinate is None):\n c_names = d.coordinate_names()\n try:\n c_names.index('Time') # this will fail if Time coordinate doesn't exist\n _coordinate = 'Time'\n except ValueError:\n raise ValueError(\"No coordinate is given for spectrum calculation and no Time coordinate found.\")\n else:\n _coordinate = coordinate\n try:\n coord_obj = d.get_coordinate_object(_coordinate)\n except Exception as e:\n raise e\n\n if (len(coord_obj.dimension_list) != 1):\n raise ValueError(\"STFT calculation is possible only along coordinates changing along one dimension.\")\n if (not coord_obj.mode.equidistant):\n raise ValueError(\"STFT calculation is possible only along equidistant coordinates.\")\n\n proc_dim = coord_obj.dimension_list[0]\n # the dimension to transform along is the only element in this ccordinate's dimension list\n\n fs = 1 / coord_obj.step[0] # sampling frequency is the step of time axis\n\n f_ax, t_ax, stft = signal.stft(d.data, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,\n nfft=nfft, detrend=detrend, return_onesided=return_onesided,\n boundary=boundary, padded=padded, axis=proc_dim)\n # all parameters are taken from inputs, except the axis to transform along\n\n d_out = type(d)(data_array=stft,\n # error=out_err, # error calculation not yet implemented\n coordinates=d.coordinates,\n exp_id=d.exp_id,\n data_unit=flap.coordinate.Unit(\"Spectral density\"))\n\n # Finding all coordinates which have common dimension with the converted one.\n # These will be deleted.\n del_coord_list = []\n for c in d_out.coordinates:\n try:\n c.dimension_list.index(proc_dim)\n del_coord_list.append(c.unit.name)\n except ValueError:\n pass\n for c in del_coord_list:\n d_out.del_coordinate(c)\n\n # create new coordinate axes from stft return axes (everything is equidistant)\n # Frequency coordinate is parallel with the newly created dimension\n c_f = flap.coordinate.Coordinate(name='Frequency',\n unit='Hz',\n mode=flap.coordinate.CoordinateMode(equidistant=True),\n shape=[],\n start=f_ax[0],\n step=(f_ax[-1] - f_ax[0]) / len(f_ax),\n dimension_list=[proc_dim])\n\n c_t = flap.coordinate.Coordinate(name='Time',\n unit='s',\n mode=flap.coordinate.CoordinateMode(equidistant=True),\n shape=[],\n start=t_ax[0] + coord_obj.start,\n step=(t_ax[-1] - t_ax[0]) / len(t_ax), # *np.prod(stft.shape[:-2]),\n # has to increase step if non-1d data (due to scipy's interpretation of fs)\n dimension_list=[len(stft.shape) - 1])\n\n d_out.add_coordinate_object(c_t, index=0)\n d_out.add_coordinate_object(c_f, index=0)\n return d_out", "def _insert_CD_state(self, alpha, clear=True):\n # Defines a new thermodynamic state based on the neighboring state\n neighbor_ind = [alpha < p['alpha']\n for p in self.data['CD'].protocol].index(True) - 1\n params_n = self.system.paramsFromAlpha(\n alpha, params_o=self.data['CD'].protocol[neighbor_ind])\n\n # For sampling importance resampling,\n # prepare an augmented matrix for pymbar calculations\n # with a new thermodynamic state\n (u_kln_s, N_k) = self._u_kln(self.data['CD'].Es, self.data['CD'].protocol)\n (K, L, N) = u_kln_s.shape\n\n u_kln_n = self._u_kln(self.data['CD'].Es, [params_n])[0]\n L += 1\n N_k = np.append(N_k, [0])\n\n u_kln = np.zeros([K, L, N])\n u_kln[:, :-1, :] = u_kln_s\n for k in range(K):\n u_kln[k, -1, :] = u_kln_n[k, 0, :]\n\n # Determine SIR weights\n weights = self.run_MBAR(u_kln, N_k, augmented=True)[1][:, -1]\n weights = weights / sum(weights)\n\n # Resampling\n # Convert linear indices to 3 indicies: state, cycle, and snapshot\n cum_N_state = np.cumsum([0] + list(N_k))\n cum_N_cycle = [np.cumsum([0] + [self.data['CD'].Es[k][c]['MM'].shape[0] \\\n for c in range(len(self.data['CD'].Es[k]))]) for k in range(len(self.data['CD'].Es))]\n\n def linear_index_to_snapshot_index(ind):\n state_index = list(ind < cum_N_state).index(True) - 1\n nis_index = ind - cum_N_state[state_index]\n cycle_index = list(nis_index < cum_N_cycle[state_index]).index(True) - 1\n nic_index = nis_index - cum_N_cycle[state_index][cycle_index]\n return (state_index, cycle_index, nic_index)\n\n def snapshot_index_to_linear_index(state_index, cycle_index, nic_index):\n return cum_N_state[state_index] + cum_N_cycle[state_index][\n cycle_index] + nic_index\n\n # Terms to copy\n if self.args.params['CD']['pose'] > -1:\n # Pose BPMF\n terms = ['MM',\\\n 'k_angular_ext','k_spatial_ext','k_angular_int'] + scalables\n else:\n # BPMF\n terms = ['MM', 'site'] + scalables\n\n CD_Es_s = []\n confs_s = []\n for c in range(len(self.data['CD'].Es[0])):\n CD_Es_c = dict([(term, []) for term in terms])\n confs_c = []\n for n_in_c in range(len(self.data['CD'].Es[-1][c]['MM'])):\n if (cum_N_cycle[-1][c] == 0):\n (snapshot_s,snapshot_c,snapshot_n) = linear_index_to_snapshot_index(\\\n np.random.choice(range(len(weights)), size = 1, p = weights)[0])\n else:\n snapshot_c = np.inf\n while (snapshot_c > c):\n (snapshot_s,snapshot_c,snapshot_n) = linear_index_to_snapshot_index(\\\n np.random.choice(range(len(weights)), size = 1, p = weights)[0])\n for term in terms:\n CD_Es_c[term].append(\\\n np.copy(self.data['CD'].Es[snapshot_s][snapshot_c][term][snapshot_n]))\n if self.args.params['CD']['keep_intermediate']:\n # Has not been tested:\n confs_c.append(\\\n np.copy(self.data['CD'].confs['samples'][snapshot_s][snapshot_c]))\n for term in terms:\n CD_Es_c[term] = np.array(CD_Es_c[term])\n CD_Es_s.append(CD_Es_c)\n confs_s.append(confs_c)\n\n # Insert resampled values\n self.data['CD'].protocol.insert(neighbor_ind + 1, params_n)\n self.data['CD'].Es.insert(neighbor_ind + 1, CD_Es_s)\n self.data['CD'].confs['samples'].insert(neighbor_ind + 1, confs_s)\n self.data['CD'].confs['replicas'].insert(neighbor_ind+1, \\\n np.copy(self.data['CD'].confs['replicas'][neighbor_ind]))\n\n if clear:\n self._clear_f_RL()", "def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):\r\n \t\r\n df=float(df)\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx[0]))\r\n #fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fx[0]\r\n fb=fx[1]\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxx=pxa*pxb.conj()\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx))\r\n fa=fx\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n# pxb=pxa\r\n\r\n #make an new array to put the new tfd in\r\n tfarray=abs(pxx)**2\r\n #get shape of spectrogram\r\n nf,nt=tfarray.shape\r\n #create a list of frequency shifts\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n #create a frequency gaussian window\r\n if sigmaL==None:\r\n sigmaL=L/(1*np.sqrt(2*np.log(2)))\r\n p=sps.gaussian(L,sigmaL)\r\n #make a matrix of windows\r\n pm=np.zeros((L,nt))\r\n for kk in range(nt):\r\n pm[:,kk]=p\r\n \r\n #loop over frequency and calculate the s-method \r\n for ff in range(L/2,nf-L/2):\r\n tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*\r\n pxx[ff-Llst,:].conj(),axis=0))\r\n tfarray=tfarray/L\r\n \r\n return tfarray,tlst,flst,pxx", "def TwoModeSqueezedHD(Ns,t,nth,shots):\n \n s1 = (1+1j)*np.zeros(shots)\n s2 = (1+1j)*np.zeros(shots)\n \n r = np.arcsinh(np.sqrt(Ns/2))\n \n for i in range(shots):\n prog= sf.Program(2)\n \n with prog.context as q:\n \n sf.ops.S2gate(r,0) | (q[0],q[1]) # State preparation\n sf.ops.ThermalLossChannel(t,nth) | q[0] # Thermal loss channel mimicing target\n \n sf.ops.MeasureHD | q[0] # Het. Msmnt of signal 1\n sf.ops.MeasureHD | q[1] # Het. Msmnt of signal 2\n\n # Need to run twice because of bug in the bosonic backend in dealing with repeated HD measurements\n \n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n \n #Collecting the samples\n samples = results.all_samples\n \n #Creating the measurement records\n s1[i] = samples[0][0]\n s2[i] = samples[1][0]\n \n # Interation over number of shots is done, outputing the records\n \n return s1,s2", "def _ssc(pars, nu):\n\t(log10_gamma_max, redshift, delta, log10_R, log10_B, log10_Norm, index, log10_gamma_c) = pars\n\n\t# define from the input parameters the dictionary to be feeded to the model\n\t# we neglect the time-dependent part for now\n\ttime_grid = dict(time_min=0, time_max=3, time_bins=50, time_inj=2)\n\t# gamma grid\n\tgamma_grid = dict(log10_gamma_min=2, log10_gamma_max=log10_gamma_max, gamma_bins=50)\n\t# emission region, again time dependent part is ignored\n\temission_region = dict(log10_R=log10_R, R_unit='cm', delta=delta,\n\t\t\t\t\t\t log10_B=log10_B, B_unit='G', t_esc=1.5, z=redshift)\n\t# injected spectrum\n\tinjected_spectrum = dict(type='ExponentialCutoffPowerLaw',\n\t\t\t\t\t\t\t log10_Norm=log10_Norm,\n\t\t\t\t\t\t\t Norm_unit='cm-3',\n\t\t\t\t\t\t\t index=index,\n\t\t\t\t\t\t\t log10_gamma_c=log10_gamma_c)\n\n\t# dump into a tmp yaml file\n\twith open('tmp_config.yaml', 'w') as yaml_file:\n\t\tyaml.dump({'time_grid': time_grid,\n\t\t\t\t 'gamma_grid': gamma_grid,\n\t\t\t\t 'emission_region': emission_region,\n\t\t\t\t 'injected_spectrum': injected_spectrum},\n\t\t\t\t yaml_file, default_flow_style=False)\n\n\n\t# initialize the ssc model\n\tmodel = BaseModel('tmp_config.yaml')\n\n\t# define the base electron population for now just as the injected one\n\tgamma = model.gamma\n\tN_e = model.N_e_inj(gamma)\n\n\t# test synchrotron\n\tsyn = Synchrotron(model)\n\tic = InverseCompton(model)\n\n\tobs_nu = nu * u.Hz\n\t# de - boosting, for intrinsic values\n\tnu = obs_nu / model.blob.delta\n\n\t# transform to energy\n\tE = const.h * obs_nu\n\n\tsyn_flux = syn.flux(nu, N_e, self_absorption=True)\n\tic_flux = ic.flux(nu, N_e, ebl_absorption=True)\n\n\tsed = (E**2*(syn_flux + ic_flux)).to('erg cm-2 s-1')\n\n\treturn sed.value", "def test_csc():\n c=14\n assert {'diff':EF.csc(c).der, 'value': EF.csc(c).val}=={'diff':0, 'value': 1/math.sin(c)}", "def scc (self, reset=False):\n if reset or (self.__scc is None):\n self.tarjan(reset)\n return self.__scc", "def LdsSts():\n\n global Asm\n\n if dec.Asm.Mnemonic == 'LDS':\n # LDS, get register first, then the address\n reg = GetReg() << 4\n\n if not assem.MoreParameters():\n # Oops, only the reigster was given\n errors.DoError('missoper', False)\n # Write dummy words\n target.CodeWord(0)\n target.CodeWord(0)\n return\n\n value = assem.EvalExpr()[0]\n\n else:\n # STS, get address first, then the register\n value = assem.EvalExpr()[0]\n\n if not assem.MoreParameters():\n # Oops, only the address is given\n errors.DoError('missoper', False)\n # Write dummy words\n target.CodeWord(0)\n target.CodeWord(0)\n return\n\n reg = GetReg() << 4\n\n if dec.Asm.AVR_Family != 1 and dec.Asm.AVR_Family != 5:\n # Normal behaviour of these instructions\n target.CodeWord(dec.Asm.Instructions[dec.Asm.Mnemonic][3] + reg)\n target.CodeWord(value)\n else:\n # ATtiny or Reduced Core behaviour\n if dec.Asm.Instructions[dec.Asm.Mnemonic][3] == 0x9000:\n # It's LDS\n opcode = 0xa000\n else:\n # It's STS\n opcode = 0xa800\n value = (value & 0x0F) + ((value & 0x70) << 4)\n target.CodeWord(opcode + reg + value)\n dec.Asm.Timing = '1'\n\n NoMore()", "def dS_dt(self, species_conc, t):\n ret = self.model.species_volume_prefactor * numpy.dot(self.model.N, self.flux(species_conc, t))\n # handle rate rules\n for var in self.model.rate_rules:\n f = self.model.rate_rules[var].replace(Model.TIME_VARIABLE, str(t))\n f = self.model.rate_rules[var].replace(Model.TIME_VARIABLE, str(t))\n species2conc = dict(zip(self.model.ode_variables, species_conc))\n species2conc['math'] = globals()['math']\n # rate = eval( f, species2conc, self._external_species_conc )\n rate = eval(f, self.model.external_species_concentrations, species2conc)\n if self.model.species_2_position.has_key(var):\n ret[self.model.species_2_position[var]] = rate\n else:\n l = ret.tolist()\n l.append(rate)\n ret = numpy.array(l)\n return ret", "def set_temps_cuisson(self, tc: int):\n self.temps_cuisson = tc", "def _STDP(self, fired):\n if not np.any(fired): return\n\n expy = self.g_max * np.exp(-self.last_fired / self.tau_syn)\n self.S[fired, :] += self.alpha_plus * expy.reshape((1,-1))\n self.S[:, fired] -= self.alpha_minus * expy.reshape((-1,1))\n np.clip(self.S[fired,:], -self.g_max, self.g_max,\n out=self.S[fired,:])\n np.clip(self.S[:,fired], -self.g_max, self.g_max,\n out=self.S[:,fired])\n # This doesn't prevent itself from changing signs!", "def Tt(s_c, point, system):\n Tx = tra(s_c, point, system)\n Tx.get_time()\n return Tx.time", "def calc_saturation_curves(self):\n HEOS = CP.AbstractState(self.additional_backend, self.fluid)\n PCSAFT = CP.AbstractState(self.backend, self.fluid)\n self.dictL, self.dictV = {}, {}\n for Q, dic in zip([0, 1], [self.dictL, self.dictV]):\n # rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []\n rhomolar, T, p = [], [], []\n for _T in np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)), np.log10(HEOS.keyed_output(CP.iT_critical)), 500):\n try:\n PCSAFT.update(CP.QT_INPUTS, Q, _T)\n # print('T', PCSAFT.T())\n # print('p', PCSAFT.p())\n # print('rhomolar', PCSAFT.rhomolar())\n if (PCSAFT.p() < 0): raise ValueError('P is negative:' + str(PCSAFT.p()))\n PCSAFT.T(), PCSAFT.p(), PCSAFT.rhomolar()\n # PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar()\n\n T.append(PCSAFT.T())\n p.append(PCSAFT.p())\n rhomolar.append(PCSAFT.rhomolar())\n # hmolar.append(PCSAFT.hmolar())\n # smolar.append(PCSAFT.smolar())\n # umolar.append(PCSAFT.umolar())\n except ValueError as VE:\n myprint(1, 'satT error:', VE, '; T:', '{T:0.16g}'.format(T=_T), 'T/Tc:', _T / HEOS.keyed_output(CP.iT_critical))\n\n dic.update(dict(T=np.array(T),\n P=np.array(p),\n Dmolar=np.array(rhomolar)))\n # Hmolar=np.array(hmolar),\n # Smolar=np.array(smolar)))\n # Umolar=np.array(umolar)))", "def multi_taper_csd(s, Fs=2 * np.pi, NW=None, BW=None, low_bias=True,\r\n adaptive=False, sides='default', NFFT=None):\r\n # have last axis be time series for now\r\n N = s.shape[-1]\r\n M = int(np.product(s.shape[:-1]))\r\n\r\n if BW is not None:\r\n # BW wins in a contest (since it was the original implementation)\r\n norm_BW = np.round(BW * N / Fs)\r\n NW = norm_BW / 2.0\r\n elif NW is None:\r\n # default NW\r\n NW = 4\r\n # (else BW is None and NW is not None) ... all set\r\n Kmax = int(2 * NW)\r\n\r\n # if the time series is a complex vector, a one sided PSD is invalid:\r\n if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':\r\n sides = 'twosided'\r\n elif sides in ('default', 'onesided'):\r\n sides = 'onesided'\r\n\r\n # Find the direct spectral estimators S_k(f) for k tapered signals..\r\n # don't normalize the periodograms by 1/N as normal.. since the taper\r\n # windows are orthonormal, they effectively scale the signal by 1/N\r\n spectra, eigvals = tapered_spectra(\r\n s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias\r\n )\r\n NFFT = spectra.shape[-1]\r\n K = len(eigvals)\r\n # collapse spectra's shape back down to 3 dimensions\r\n spectra.shape = (M, K, NFFT)\r\n\r\n # compute the cross-spectral density functions\r\n last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT\r\n\r\n if adaptive:\r\n w = np.empty((M, K, last_freq))\r\n nu = np.empty((M, last_freq))\r\n for i in range(M):\r\n w[i], nu[i] = utils.adaptive_weights(\r\n spectra[i], eigvals, sides=sides\r\n )\r\n else:\r\n weights = np.sqrt(eigvals).reshape(K, 1)\r\n\r\n csd_pairs = np.zeros((M, M, last_freq), 'D')\r\n for i in range(M):\r\n if adaptive:\r\n wi = w[i]\r\n else:\r\n wi = weights\r\n for j in range(i + 1):\r\n if adaptive:\r\n wj = w[j]\r\n else:\r\n wj = weights\r\n ti = spectra[i]\r\n tj = spectra[j]\r\n csd_pairs[i, j] = mtm_cross_spectrum(ti, tj, (wi, wj), sides=sides)\r\n\r\n csdfs = csd_pairs.transpose(1,0,2).conj()\r\n csdfs += csd_pairs\r\n diag_idc = (np.arange(M), np.arange(M))\r\n csdfs[diag_idc] /= 2\r\n csdfs /= Fs\r\n \r\n if sides == 'onesided':\r\n freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)\r\n else:\r\n freqs = np.linspace(0, Fs, NFFT, endpoint=False)\r\n\r\n return freqs, csdfs", "def swclockcorr(met,met_tolerance=50,):\n import os\n import numpy as np\n from astropy.io import fits\n # uncorrected date and time\n times=swtime2JD(met,useFtool=False)\n date = times[3][:10]\n time = times[3][11:19]\n # get file with corrections\n caldb = os.getenv(\"CALDB\")\n command=\"quzcif swift sc - - clock \"+date+\" \"+time+\" - > quzcif.out\"\n os.system(command)\n f = open(\"quzcif.out\")\n result = True\n try:\n tcorfile, ext = f.read().split()\n ext = int(ext)\n f.close()\n except:\n f.close()\n f = open(\"quzcif.out\")\n print (f.readlines())\n f.close()\n os.system(\"rm -f quzcif.out\")\n raise RuntimeError('Swift SC clock file not found')\n #return np.polyval(np.array([4.92294757e-08, -8.36992570]),met), result \n os.system(\"rm -f quzcif.out\")\n xx = fits.open(tcorfile)\n x = xx[ext].data\n k = (met >= x['tstart']) & (met < x['tstop'])\n if np.sum(k) != 1:\n if met > x['tstart'][-1]:\n k = (met >= x['tstart'])\n k = np.max(np.where(k))\n if (met - x['tstart'][k]) > met_tolerance*86400.0:\n print (met, x['tstart'][k], met_tolerance)\n print (\"WARNING: update the Swift SC CALDB - it is out of date\")\n result=False\n else:\n result=True \n else: \n raise IOError('input MET not found in Swift SC clock file')\n #return np.polyval(np.array([4.92294757e-08, -8.36992570]),met), result\n t1 = old_div((met - x['tstart'][k]),86400.0)\n tcorr = x['toffset'][k] + ( x['C0'][k] + \n x['C1'][k]*t1 + x['C2'][k]*t1*t1)*1.0e-6\n tcorr = -tcorr # add tcorr to MET to get time in UT\n xx.close()\n return tcorr, result", "def ftcs(_x,_y,_cs,_dx,_dt):\n #todo: in a loop ... think about a more pythonic way to do this\n\n # s = _cs * _dt / (2. * _dx)\n # next_y = np.zeros(np.shape(_y)) #next time step\n # nLen = len(_y)\n # for n in range(nLen):\n # n_next = (n + 1) if n < (nLen-1) else 0\n # n_prev = (n - 1) if n > 0 else nLen-1\n #\n # next_y[n] = _y[n] - s*(_y[n_next] - _y[n_prev])\n #\n # print(n, s, next_y[n], _y[n], _y[n_next], _y[n_prev])\n #\n # next_y = _y[:] - s * (np.append(_y[1:], _y[0]) - np.append(_y[-1], _y[:-1]))\n #\n #\n # return next_y\n\n #this can get out of hand fast (overflow), so will limit the max value\n if np.max(_y) > 1e30:\n _y /= 1e30 #rescale, but keep shape (it is a mess anyway, so there is no real harm)\n\n s = _cs * _dt / (2. * _dx)\n next_y = _y[:] - s * (np.append(_y[1:], _y[0]) - np.append(_y[-1], _y[:-1]))\n\n\n return next_y", "def TST_LCE(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST, device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n # pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def TXS(self, *_):\n self.reg.S = self.reg.X\n self.reg.N = self.reg.S << 7\n self.reg.Z = self.reg.S == 0", "def stft():\n trainer = {\n 'model': {\n 'encoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.StftEncoder'\n },\n 'decoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.IstftDecoder'\n },\n }\n }", "def state_transition(CS, CP, state, events, soft_disable_timer, v_cruise_kph, AM):\n enabled = isEnabled(state)\n\n v_cruise_kph_last = v_cruise_kph\n\n # if stock cruise is completely disabled, then we can use our own set speed logic\n if not CP.enableCruise:\n v_cruise_kph = update_v_cruise(v_cruise_kph, CS.buttonEvents, enabled)\n elif CP.enableCruise and CS.cruiseState.enabled:\n v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH\n\n # decrease the soft disable timer at every step, as it's reset on\n # entrance in SOFT_DISABLING state\n soft_disable_timer = max(0, soft_disable_timer - 1)\n\n # DISABLED\n if state == State.disabled:\n if get_events(events, [ET.ENABLE]):\n if get_events(events, [ET.NO_ENTRY]):\n for e in get_events(events, [ET.NO_ENTRY]):\n AM.add(str(e) + \"NoEntry\", enabled)\n\n else:\n if get_events(events, [ET.PRE_ENABLE]):\n state = State.preEnabled\n else:\n state = State.enabled\n AM.add(\"enable\", enabled)\n v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, v_cruise_kph_last)\n\n # ENABLED\n elif state == State.enabled:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE]):\n AM.add(e, enabled)\n\n elif get_events(events, [ET.SOFT_DISABLE]):\n state = State.softDisabling\n soft_disable_timer = 300 # 3s\n for e in get_events(events, [ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n # SOFT DISABLING\n elif state == State.softDisabling:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE]):\n AM.add(e, enabled)\n\n elif not get_events(events, [ET.SOFT_DISABLE]):\n # no more soft disabling condition, so go back to ENABLED\n state = State.enabled\n\n elif get_events(events, [ET.SOFT_DISABLE]) and soft_disable_timer > 0:\n for e in get_events(events, [ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n elif soft_disable_timer <= 0:\n state = State.disabled\n\n # PRE ENABLING\n elif state == State.preEnabled:\n if get_events(events, [ET.USER_DISABLE]):\n state = State.disabled\n AM.add(\"disable\", enabled)\n\n elif get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):\n state = State.disabled\n for e in get_events(events, [ET.IMMEDIATE_DISABLE, ET.SOFT_DISABLE]):\n AM.add(e, enabled)\n\n elif not get_events(events, [ET.PRE_ENABLE]):\n state = State.enabled\n\n return state, soft_disable_timer, v_cruise_kph, v_cruise_kph_last", "def stochasticModelAnal(x, H, N, stocf):\n\n\thN = N/2+1 # positive size of fft\n\tNo2 = N/2 # half of N\n\tif (hN*stocf < 3): # raise exception if decimation factor too small\n\t\traise ValueError(\"Stochastic decimation factor too small\")\n\t\t\n\tif (stocf > 1): # raise exception if decimation factor too big\n\t\traise ValueError(\"Stochastic decimation factor above 1\")\n\t\t\n\tif (H <= 0): # raise error if hop size 0 or negative\n\t\traise ValueError(\"Hop size (H) smaller or equal to 0\")\n\n\tif not(isPower2(N)): # raise error if N not a power of two\n\t\traise ValueError(\"FFT size (N) is not a power of 2\")\n\t\t\n\tw = hanning(N) # analysis window\n\tx = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample\n\tpin = No2 # initialize sound pointer in middle of analysis window \n\tpend = x.size-No2 # last sample to start a frame\n\twhile pin<=pend: \n\t\txw = x[pin-No2:pin+No2] * w # window the input sound\n\t\tX = fft(xw) # compute FFT\n\t\tmX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies\n\t\tmY = resample(np.maximum(-200, mX), stocf*hN) # decimate the mag spectrum \n\t\tif pin == No2: # first frame\n\t\t\tstocEnv = np.array([mY])\n\t\telse: # rest of frames\n\t\t\tstocEnv = np.vstack((stocEnv, np.array([mY])))\n\t\tpin += H # advance sound pointer\n\treturn stocEnv", "def __reverse_we_c(cls, sens_mv, cnc): # TODO: don't use concentration - factor out baseline?\n we_c = (cnc * sens_mv) / 1000.0\n\n # print(\"__reverse_we_c: sens_mv:%s cnc:%f we_c:%s\" % (sens_mv, cnc, we_c), file=sys.stderr)\n\n return we_c", "def _excitonic_coft_all(self,SS,AG):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n Nst = AG.HamOp.dim\n ct = numpy.zeros((Nst,Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n import time\n timecount = 0\n elst = numpy.where(AG.which_band == 1)[0]\n start = time.time()\n for el1 in elst:\n for el2 in elst:\n coft = cfm.get_coft(el1-1,el2-1)\n start2 = time.time()\n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct[:,:] += numpy.dot(\n numpy.expand_dims((SS[kk,:]**2)*(SS[ll,:]**2),axis=1),\n numpy.expand_dims(coft,axis=0))\n stop2 = time.time()\n timecount += stop2 - start2\n stop = time.time()\n print(stop-start,stop-start - timecount)\n return ct", "def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat", "def TST_C2ST_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT", "def one_transition_spectrum_fluor(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n dd = tr[\"dd\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n re = tr[\"re\"] # reorganisation energy\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-numpy.conjugate(gt) -1j*om*ta.data + 2j*re*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = dd*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def _excitonic_coft_old(self,SS,AG,n):\n \n # FIXME: works only for 2 level molecules\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n # get number of monomeric basis states\n Na = 0\n for monomer in AG.monomers:\n Na += monomer.nel-1\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n #Na = AG.nmono\n for kk in range(Na):\n \n #nkk = AG.monomers[kk].egcf_mapping[0]\n \n for ll in range(Na):\n \n #nll = AG.monomers[ll].egcf_mapping[0]\n \n ct += ((SS[kk+1,n+1]**2)*(SS[ll+1,n+1]**2)*cfm.get_coft(kk,ll))\n #*AG.egcf_matrix.get_coft(nkk,nll))\n \n return ct", "def ata_sct_temperature_history(self, ata_sct_temperature_history: SmartSsdAtaSctTemperatureHistory):\n\n self._ata_sct_temperature_history = ata_sct_temperature_history", "def detect_via_cusum_lg(ts, istart=30, threshold_times=5):\n S_h = 0\n S_l = 0\n S_list = np.zeros(istart) # 前面填充的30个空数据\n meanArray = talib.SMA(ts,timeperiod = istart)\n stdArray = talib.STDDEV(np.log(ts/meanArray),timeperiod = istart)\n for i in range(istart, len(ts)): # 这里是否应该掐头去尾?\n tslog = np.log(ts[i] / meanArray[i - 1])\n S_h_ = max(0, S_h + tslog - stdArray[i-1])\n S_l_ = min(0, S_l + tslog + stdArray[i-1])\n if S_h_> threshold_times * stdArray[i-1]:\n S_list = np.append(S_list,1) # 该点为上变点\n S_h_ = 0\n elif abs(S_l_)> threshold_times * stdArray[i-1]:\n S_list = np.append(S_list, -1) # 该点为下变点\n S_l_ = 0\n else:\n S_list = np.append(S_list, 0) # 该点无特殊情况\n S_h = S_h_\n S_l = S_l_\n\n return S_list", "def do_tc(self, arg):\n a = arg.split()\n\n if len(a) >= 1:\n ise.useTemperatureCompensation(int(a[0]))\n\n print(\"\\ttemp. compensation: \" + str(ise.usingTemperatureCompensation()))", "def SST(Y):\n\tsummation = 0\n\taverage = mean(Y)\n\tfor i in range(len(Y)):\n\t\tsummation += (Y[i]-average)**2\n\treturn summation", "def example(self, s, d, s_len, d_len, snr):\n\t\ts, d, x, n_frames = self.mix(s, d, s_len, d_len, snr)\n\t\ts_STDCT = self.stdct_analysis(s)\n\t\td_STDCT = self.stdct_analysis(d)\n\t\tx_STDCT = self.stdct_analysis(x)\n\t\txi = self.xi(s_STDCT, d_STDCT)\n\t\txi_bar = self.xi_map.map(xi)\n\t\tcd = self.cd(s_STDCT, d_STDCT)\n\t\tcd_bar = self.cd_map.map(cd)\n\t\txi_cd_map = tf.concat([xi_bar, cd_bar], axis=-1)\n\t\treturn x_STDCT, xi_cd_map, n_frames", "def _read_stc(stc_file):\r\n hdr = _read_hdr_file(stc_file) # read header the normal way\r\n\r\n with open(stc_file, 'rb') as f:\r\n f.seek(0, SEEK_END)\r\n endfile = f.tell()\r\n f.seek(352) # end of header\r\n hdr['next_segment'] = unpack('<i', f.read(4))[0]\r\n hdr['final'] = unpack('<i', f.read(4))[0]\r\n hdr['padding'] = unpack('<' + 'i' * 12, f.read(48))\r\n\r\n all_stamp = []\r\n\r\n while True:\r\n if f.tell() == endfile:\r\n break\r\n stamp = {}\r\n stamp['segment_name'] = _make_str(unpack('c' * 256, f.read(256)))\r\n stamp['start_stamp'] = unpack('<i', f.read(4))[0]\r\n stamp['end_stamp'] = unpack('<i', f.read(4))[0]\r\n stamp['sample_num'] = unpack('<i', f.read(4))[0]\r\n stamp['sample_span'] = unpack('<i', f.read(4))[0]\r\n\r\n all_stamp.append(stamp)\r\n\r\n return hdr, all_stamp" ]
[ "0.55765706", "0.5497741", "0.5477658", "0.5466189", "0.54562336", "0.54297197", "0.53879595", "0.5304885", "0.5280067", "0.52764845", "0.5274857", "0.52428555", "0.5241364", "0.51811355", "0.5174693", "0.51709056", "0.5167392", "0.5165704", "0.5157772", "0.51531", "0.5118141", "0.51161176", "0.51076746", "0.50866187", "0.507307", "0.507083", "0.5069277", "0.50611186", "0.50519997", "0.50493616", "0.50460774", "0.5030787", "0.502742", "0.502112", "0.50167674", "0.5014304", "0.5010993", "0.50100195", "0.50059617", "0.49985152", "0.49969578", "0.49851176", "0.49733946", "0.4970077", "0.49420908", "0.49291366", "0.49242374", "0.49135268", "0.49098074", "0.4896481", "0.48960444", "0.4893727", "0.4870698", "0.4870698", "0.4867381", "0.48626646", "0.48623317", "0.48606947", "0.4858983", "0.48479792", "0.48478958", "0.48478055", "0.48447397", "0.4842238", "0.48410827", "0.48410827", "0.48361126", "0.48337284", "0.48300478", "0.48278618", "0.48188585", "0.48171997", "0.48078102", "0.48076406", "0.48072186", "0.47975028", "0.47972608", "0.47883976", "0.47847348", "0.47825077", "0.4781561", "0.47775266", "0.47746125", "0.47732887", "0.47703856", "0.47663623", "0.47659078", "0.476158", "0.47608903", "0.47599033", "0.47592425", "0.47575668", "0.47542617", "0.4752687", "0.47518522", "0.47494334", "0.47493446", "0.47418144", "0.4741719", "0.47375283" ]
0.4891067
52
Calculates the variational expectations used by an SVGP model.
def variational_expectations(self, Fmu, Fvar, Y): # Y must be in (-1, +1), not (0, 1) assert_01 = tf.Assert(tf.reduce_all((Y == 0.0) | (Y == 1.0)), [Y]) with tf.control_dependencies([assert_01]): Y = Y * 2.0 - 1.0 c2 = Fmu ** 2 + Fvar c = tf.sqrt(c2) theta = tf.tanh(c / 2) / (2 * c) varexp = 0.5 * (Y * Fmu - theta * c2) return varexp - PolyaGammaBernoulli.kl_term(c) - np.log(2.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_var_exp(self):\n with self.test_context() as session:\n test_setups, F, feed = self.prepare()\n for test_setup in test_setups:\n l = test_setup.likelihood\n y = test_setup.Y\n l.compile()\n r1 = session.run(l.logp(F, y), feed_dict=feed)\n zero = F * 0.\n r2 = session.run(\n l.variational_expectations(F, zero, test_setup.Y), feed_dict=feed)\n assert_allclose(r1, r2, atol=test_setup.tolerance, rtol=test_setup.tolerance)", "def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):\n\n if gh_points is None:\n gh_x, gh_w = self._gh_points()\n else:\n gh_x, gh_w = gh_points\n\n shape = m.shape\n m,v,Y = m.flatten(), v.flatten(), Y.flatten()\n\n #make a grid of points\n X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]\n\n #evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.\n # broadcast needs to be handled carefully.\n logp = self.logpdf(X,Y[:,None], Y_metadata=Y_metadata)\n dlogp_dx = self.dlogpdf_df(X, Y[:,None], Y_metadata=Y_metadata)\n d2logp_dx2 = self.d2logpdf_df2(X, Y[:,None], Y_metadata=Y_metadata)\n\n #clipping for numerical stability\n #logp = np.clip(logp,-1e9,1e9)\n #dlogp_dx = np.clip(dlogp_dx,-1e9,1e9)\n #d2logp_dx2 = np.clip(d2logp_dx2,-1e9,1e9)\n\n #average over the gird to get derivatives of the Gaussian's parameters\n #division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)\n F = np.dot(logp, gh_w)/np.sqrt(np.pi)\n dF_dm = np.dot(dlogp_dx, gh_w)/np.sqrt(np.pi)\n dF_dv = np.dot(d2logp_dx2, gh_w)/np.sqrt(np.pi)\n dF_dv /= 2.\n\n if np.any(np.isnan(dF_dv)) or np.any(np.isinf(dF_dv)):\n stop\n if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)):\n stop\n\n if self.size:\n dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None], Y_metadata=Y_metadata) # Ntheta x (orig size) x N_{quad_points}\n dF_dtheta = np.dot(dF_dtheta, gh_w)/np.sqrt(np.pi)\n dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1])\n else:\n dF_dtheta = None # Not yet implemented\n return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta", "def variational_expectation(self, y, m, v, cubature=None):\n\n # align shapes and compute mask\n y = y.reshape(-1, 1, 1)\n m = m.reshape(-1, 1, 1)\n v = np.diag(v).reshape(-1, 1, 1)\n mask = np.isnan(y)\n y = np.where(mask, m, y)\n\n # compute variational expectations and their derivatives\n var_exp, dE_dm, d2E_dm2 = vmap(self.variational_expectation_, (0, 0, 0, None))(y, m, v, cubature)\n\n # apply mask\n var_exp = np.where(np.squeeze(mask), 0., np.squeeze(var_exp))\n dE_dm = np.where(mask, np.nan, dE_dm)\n d2E_dm2 = np.where(mask, np.nan, d2E_dm2)\n\n return var_exp, np.squeeze(dE_dm, axis=2), np.diag(np.squeeze(d2E_dm2, axis=(1, 2)))", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def variational_expectations(self, Fmu, Fvar, Y):\n\n gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)\n gh_x = gh_x.reshape(1, -1)\n gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)\n shape = tf.shape(Fmu)\n Fmu, Fvar, Y = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar, Y)]\n X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu\n Y = tf.tile(Y, [1, self.num_gauss_hermite_points]) # broadcast Y to match X\n\n logp = self.logp(X, Y)\n return tf.reshape(tf.matmul(logp, gh_w), shape)", "def variational_expectation_(self, y, m, v, cubature=None):\n return variational_expectation_cubature(self, y, m, v, cubature)", "def em_var(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical variance.')\n return (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls", "def variational_update(self):\n with self.elbo_check('update_p_allele_swap'):\n self.model.update_p_allele_swap()\n\n with self.elbo_check('p_cn'):\n self.model.update_p_cn()\n\n with self.elbo_check('p_breakpoint'):\n self.model.update_p_breakpoint()\n\n with self.elbo_check('p_outlier_total'):\n self.model.update_p_outlier_total()\n\n with self.elbo_check('p_outlier_allele'):\n self.model.update_p_outlier_allele()", "def expect(self, var):\n e = 0.0\n for prob, val in self.rv(var):\n e += prob * float(val)\n return e", "def simulation_satisficing(df_exp, df_model, n_subj):\n\n # Inform user\n sleep(0.1)\n print('\\nModel simulation:')\n sleep(0.1)\n\n # Initialize progress bar\n pbar = tqdm(total=n_subj)\n\n # Agent variables object\n agent_vars = AgentVars()\n\n # Initialize data frame for data that will be recovered\n df_sim = pd.DataFrame()\n\n # Initialize group vector\n group = np.full(n_subj, np.nan)\n\n # Initialize data frames from estimation errors and perseveration\n sim_est_err = pd.DataFrame(columns=['noPush', 'push', 'age_group'], index=np.arange(n_subj), dtype=float)\n sim_pers_prob = pd.DataFrame(columns=['noPush', 'push', 'age_group'], index=np.arange(n_subj), dtype=float)\n\n # Cycle over participants\n # -----------------------\n for i in range(0, n_subj):\n\n # Extract subject-specific data frame\n df_subj = get_df_subj(df_exp, i)\n\n # Extract model parameters from model data frame\n sel_coeffs = df_model[df_model['subj_num'] == i + 1].copy()\n\n # Extract age group of current participant\n group[i] = sel_coeffs[['age_group']].values\n\n # Save parameters for parameter recovery analysis\n if i == 0:\n true_params = sel_coeffs\n elif i > 0:\n true_params = true_params.append(sel_coeffs, ignore_index=True, sort=True)\n\n if group[i] == 3:\n sel_coeffs = sel_coeffs[['omikron_0', 'omikron_1', 'b_0', 'b_1', 'h', 's',\n 'u', 'q', 'sigma_H', 'd', 'low_satisficing']].values.tolist()[0]\n else:\n sel_coeffs = sel_coeffs[['omikron_0', 'omikron_1', 'b_0', 'b_1', 'h', 's',\n 'u', 'q', 'sigma_H', 'd', 'high_satisficing']].values.tolist()[0]\n\n # Set agent variables of current participant\n agent_vars.h = sel_coeffs[4]\n agent_vars.s = sel_coeffs[5]\n agent_vars.u = np.exp(sel_coeffs[6])\n agent_vars.q = sel_coeffs[7]\n agent_vars.sigma_H = sel_coeffs[8]\n\n # Agent object\n agent = AlAgent(agent_vars)\n\n # Run task-agent interaction\n df_data = task_agent_int_satisficing(df_subj, agent, agent_vars, sel_coeffs)\n\n # Add subject number to data frame\n df_data['subj_num'] = i+1\n\n # Add data to data frame\n df_sim = df_sim.append(df_data, ignore_index=True)\n\n # Extract no-changepoint trials\n no_cp = df_subj['c_t'] == 0\n\n # Extract true helicopter location for estimation error computation\n real_mu = df_subj['mu_t'][0:(len(df_subj) - 2)]\n\n # Extract model prediction for estimation error computation\n sim_pred = df_data['sim_b_t'][:-1]\n sim_pred = sim_pred.reset_index(drop=True) # adjust index\n\n # Compute estimation error\n sim_est_err_all = real_mu - sim_pred\n sim_est_err_nocp = sim_est_err_all[no_cp] # estimation error without changepoints\n\n # Compute perseveration\n df_data['pers'] = df_data['sim_a_t'] == 0\n\n # Extract shifting- and stable-bucket conditions\n cond_1 = df_subj['cond'] == \"main_noPush\"\n cond_1 = cond_1[no_cp]\n cond_2 = df_subj['cond'] == \"main_push\"\n cond_2 = cond_2[no_cp]\n\n # Save estimation errors for both conditions and add age\n sim_est_err['noPush'][i] = np.mean(abs(sim_est_err_nocp[cond_1]))\n sim_est_err['push'][i] = np.mean(abs(sim_est_err_nocp[cond_2]))\n sim_est_err['age_group'][i] = group[i]\n\n # Save perseveration for both conditions and add age\n sim_pers_prob['noPush'][i] = np.mean(df_data[(df_data[\"cond\"] == \"main_noPush\")]['pers'])\n sim_pers_prob['push'][i] = np.mean(df_data[(df_data[\"cond\"] == \"main_push\")]['pers'])\n sim_pers_prob['age_group'][i] = group[i]\n\n # Update progress bar\n pbar.update(1)\n\n # Close progress bar\n if i == n_subj - 1:\n pbar.close()\n\n return sim_est_err, sim_pers_prob, df_sim, true_params", "def invariant_mass_distributions_v_v_v(\n model: SingleRhNeutrinoModel,\n genv: Generation,\n nbins: int,\n):\n\n def msqrd(s, t):\n return msqrd_v_v_v(s, t, model, genv)\n\n tb = ThreeBody(model.mx, (0, 0, 0), msqrd=msqrd)\n return tb.invariant_mass_distributions(nbins=nbins)", "def test_var_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.var(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_inference_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n lr_newton = 1.\n\n gp_model.inference(lr=lr_newton) # update variational params\n\n markovgp_model.inference(lr=lr_newton) # update variational params\n\n np.testing.assert_allclose(gp_model.posterior_mean.value, markovgp_model.posterior_mean.value, rtol=1e-4)\n np.testing.assert_allclose(gp_model.posterior_variance.value, markovgp_model.posterior_variance.value, rtol=1e-4)", "def energy_distributions_v_v_v(\n model: SingleRhNeutrinoModel,\n genv: Generation,\n nbins: int,\n):\n\n def msqrd(s, t):\n return msqrd_v_v_v(s, t, model, genv)\n\n tb = ThreeBody(model.mx, (0, 0, 0), msqrd=msqrd)\n return tb.energy_distributions(nbins=nbins)", "def variational_expectations(self, Fmu, Fvar, Y, epsilon=None):\n return self._mc_quadrature(self.log_prob,\n Fmu,\n Fvar,\n Y=Y,\n epsilon=epsilon)", "def _calc_msve(self):\n v = []\n for state in self._env.state_iterator():\n feature_vector = self._features.vector(state)\n v.append(utils.state_value(feature_vector, self.theta))\n\n self.msve.append(utils.rmse(v, self._true_values))", "def test_velocity_vs_current(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n beta_n = self.model.param.beta_n\n beta_n = self.param.evaluate(beta_n)\n beta_p = self.model.param.beta_p\n beta_p = self.param.evaluate(beta_p)\n\n np.testing.assert_array_almost_equal(\n self.v_box(t, x_n), beta_n * self.i_e(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.v_box(t, x_p), beta_p * self.i_e(t, x_p)\n )", "def _em_variance(self, result, endog, exog, betas, tmp=None):\n k_exog = 0 if exog is None else exog.shape[1]\n\n if self.switching_variance:\n variance = np.zeros(self.k_regimes)\n for i in range(self.k_regimes):\n if k_exog > 0:\n resid = endog - np.dot(exog, betas[i])\n else:\n resid = endog\n variance[i] = (\n np.sum(resid ** 2 *\n result.smoothed_marginal_probabilities[i]) /\n np.sum(result.smoothed_marginal_probabilities[i]))\n else:\n variance = 0\n if tmp is None:\n tmp = np.sqrt(result.smoothed_marginal_probabilities)\n for i in range(self.k_regimes):\n tmp_endog = tmp[i] * endog\n if k_exog > 0:\n tmp_exog = tmp[i][:, np.newaxis] * exog\n resid = tmp_endog - np.dot(tmp_exog, betas[i])\n else:\n resid = tmp_endog\n variance += np.sum(resid ** 2)\n variance /= self.nobs\n return variance", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]", "def var(self):\n return self.sumsquares / (self.sum_weights - self.ddof)", "def variational_expectations(self, Fmu, Fvar, Y):\n integrand = self.log_prob\n nghp = self.num_gauss_hermite_points\n return ndiagquad(integrand, nghp, Fmu, Fvar, Y=Y)", "def eval(self):\n\n T = 0. # the test statistic\n N = float(len(self.x))\n M = float(len(self.y))\n\n if N == 0 or M == 0:\n raise ValueError('cvm: empty vector')\n\n s1 = 0.\n for ex in self.x:\n s1 += (self.eval_ecdf(self.x, self.ecdf_x, ex) -\n self.eval_ecdf(self.y, self.ecdf_y, ex))**2\n \n s2 = 0.\n for ey in self.y:\n s2 += (self.eval_ecdf(self.x, self.ecdf_x, ey) -\n self.eval_ecdf(self.y, self.ecdf_y, ey))**2\n\n # the CVM test statistic\n T = N*M/(N + M)**2*(s1 + s2)\n\n # the expected value of T (under the null hypothesis)\n expT = 1./6. + 1./(6.*(M + N))\n\n # the variance of T\n varT = 1./45.*(M + N + 1.)/(M + N)**2*\\\n (4.*M*N*(M + N) - 3.*(M**2 + N**2) - 2.*M*N)/(4.*M*N)\n\n # adjust T so that its significance can be computed using the limiting\n # distribution\n limitT = (T - expT)/np.sqrt(45.*varT) + 1./6.\n\n\n # p-value for this test statistic\n if limitT > self._z[-1]:\n p = 0.\n else:\n p = 1. - self._interp_f(limitT)\n\n return T, limitT, p", "def expectation(self,result,shots):\n E = 0\n for state,num_measure in result.items():\n state = state[::-1]\n eigval = 1\n for qubit in self.measure_qubits:\n if state[qubit] == '1':\n eigval *= -1\n if self.rev_eig:\n eigval *= -1\n E += eigval*num_measure\n E /= shots\n return E*self.factor", "def each_evidence(y_, f, fh, v, s, vh, N, D):\n epsilon = 1e-5\n alpha = 1.0\n beta = 1.0\n lam = alpha / beta\n tmp = (vh @ (f @ np.ascontiguousarray(y_)))\n for _ in range(11):\n # should converge after at most 10 steps\n # typically converge after two or three steps\n gamma = (s / (s + lam)).sum()\n # A = v @ np.diag(alpha + beta * s) @ v.transpose() # no need to compute A\n # A_inv = v @ np.diag(1.0 / (alpha + beta * s)) @ v.transpose() # no need to compute A_inv\n m = v @ (tmp * beta / (alpha + beta * s))\n alpha_de = (m * m).sum()\n alpha = gamma / (alpha_de + epsilon)\n beta_de = ((y_ - fh @ m) ** 2).sum()\n beta = (N - gamma) / (beta_de + epsilon)\n new_lam = alpha / beta\n if np.abs(new_lam - lam) / lam < 0.01:\n break\n lam = new_lam\n evidence = D / 2.0 * np.log(alpha) \\\n + N / 2.0 * np.log(beta) \\\n - 0.5 * np.sum(np.log(alpha + beta * s)) \\\n - beta / 2.0 * (beta_de + epsilon) \\\n - alpha / 2.0 * (alpha_de + epsilon) \\\n - N / 2.0 * np.log(2 * np.pi)\n return evidence / N, alpha, beta, m", "def var_ratio(M, N, y, cov_fun, gamma, lw, loo, K, ylim, figsize, seed):\n np.random.seed(seed)\n T = y * N\n\n names = ['sample', 'lw_oracle', 'lw_iso_oracle', 'lw_kfold', 'lw_isokfold',\n 'mv_isonlsq_oracle', 'mv_isonlsq_kfold'] # , 'isomv_oracle']\n\n if loo:\n names.insert(1, 'lw_loo')\n names.insert(2, 'lw_isoloo')\n if lw:\n names.insert(1, 'lw')\n\n empty_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n\n dfs = {\n 'oos_var': empty_df,\n 'is_var': empty_df.copy(),\n 'forecast_var_ratio': empty_df.copy(),\n 'true_var_ratio': empty_df.copy(),\n 'te': empty_df.copy(),\n }\n # forecast_var_ratio_df = pd.DataFrame(\n # np.zeros((M, len(names))), columns=names)\n # oos_var_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n # is_var_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n # true_var_ratio_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n # te_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n\n pbar = tqdm(total=M)\n\n results = []\n for j in range(M):\n # Build Model\n if cov_fun in ['slr', 'factor']:\n fm_seed = np.random.randint(1, 2**32 - 1)\n Sigma, tau = cov_functions[cov_fun](N, seed=fm_seed)\n else:\n Sigma, tau = cov_functions[cov_fun](N)\n pi_true = min_var_portfolio(Sigma, gamma=gamma)\n\n # Generate Data\n X = sample(Sigma, T)\n X = X - X.mean()\n S = cov(X)\n lam, U = eig(S)\n\n # Sample covariance\n name = 'sample'\n result = portfolio_analysis(S, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # Oracle LW NLS shrinkage\n name = 'lw_oracle'\n _, d_lw_oracle = nls_oracle(X, S, U, Sigma)\n S_lw_oracle = eig_multiply(U, d_lw_oracle)\n result = portfolio_analysis(S_lw_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_iso_oracle'\n d_lw_iso_oracle = isotonic_regression(d_lw_oracle)\n S_lw_iso_oracle = eig_multiply(U, d_lw_iso_oracle)\n result = portfolio_analysis(S_lw_iso_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # LW NLS shrinkage\n if lw:\n name = 'lw'\n S_lw = nlshrink_covariance(X, centered=True)\n result = portfolio_analysis(S_lw, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # LOO LW NLS shrinkage\n if loo:\n name = 'lw_loo'\n _, d_lw_loo = nls_loo_cv(X, S, U)\n S_lw_loo = eig_multiply(U, d_lw_loo)\n result = portfolio_analysis(S_lw_loo, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_isoloo'\n d_lw_isoloo = isotonic_regression(d_lw_loo)\n S_lw_isoloo = eig_multiply(U, d_lw_isoloo)\n result = portfolio_analysis(S_lw_isoloo, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # K-fold LW NLS shrinkage\n name = 'lw_kfold'\n _, d_lw_kfold = nls_kfold_cv(X, S, U, K)\n S_lw_kfold = eig_multiply(U, d_lw_kfold)\n result = portfolio_analysis(S_lw_kfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_isokfold'\n d_lw_isokfold = isotonic_regression(d_lw_kfold)\n S_lw_isokfold = eig_multiply(U, d_lw_isokfold)\n result = portfolio_analysis(S_lw_isokfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # MinVar NLS shrinkage\n _, d_mv_oracle = minvar_nls_oracle(X, S, lam, U, Sigma)\n # Note: the raw oracle values for MinVar shrinkage are likely to\n # produce negative eigenvalues, which means the minimum variance\n # portfolio cannot be reasonably computed. Computing variance\n # ratios for the MinVar shrinkage only works with some kind of\n # modification to the raw values.\n\n # Note: Applying isotonic regression after solving for the oracle values\n # is consistently way worse than solving the constrained LS problem so\n # it is omitted.\n # d_mv_iso_oracle = isotonic_regression(d_mv_oracle)\n # S_mv_iso_oracle = eig_multiply(U, d_mv_iso_oracle)\n\n name = 'mv_isonlsq_oracle'\n _, d_mv_isonlsq_oracle = minvar_nls_oracle(\n X, S, lam, U, Sigma, isotonic=True)\n S_mv_isonlsq_oracle = eig_multiply(U, d_mv_isonlsq_oracle)\n result = portfolio_analysis(S_mv_isonlsq_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'mv_isonlsq_kfold'\n _, d_mv_isonlsq_kfold = minvar_nls_kfold(X, S, lam, U, K)\n S_d_mv_isonlsq_kfold = eig_multiply(U, d_mv_isonlsq_kfold)\n result = portfolio_analysis(S_d_mv_isonlsq_kfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n pbar.update()\n\n fig, ax = plt.subplots(figsize=figsize, ncols=5)\n fig.suptitle(\"Shrinkage Performance: N={}\".format(N))\n dfs['forecast_var_ratio'].boxplot(ax=ax[0])\n dfs['true_var_ratio'].boxplot(ax=ax[1])\n dfs['oos_var'].boxplot(ax=ax[2])\n dfs['is_var'].boxplot(ax=ax[3])\n dfs['te'].boxplot(ax=ax[4])\n\n ax[0].set_title('Forecast Variance Ratios')\n ax[1].set_title('True Variance Ratios')\n ax[2].set_title('Out-of-Sample Variance')\n ax[3].set_title('In-Sample Variance')\n ax[4].set_title('Tracking Error to True MinVar')\n\n ax[0].set_ylim((0, 2.))\n ax[1].set_ylim((0, 3.))\n\n ylim = (.5 * min(dfs['is_var'].values.min(), dfs['oos_var'].values.min()),\n 2 * max(dfs['is_var'].values.max(), dfs['oos_var'].values.max()))\n ax[2].set_ylim(ylim)\n ax[3].set_ylim(ylim)\n fig.autofmt_xdate(rotation=90)\n fig.subplots_adjust(left=0.05, right=0.95, bottom=.22,\n top=0.9, wspace=.36, hspace=.2)\n plt.show()", "def test_variability(self):\n # some reproducible arbitrariness\n np.random.seed(343143)\n\n n = 10\n t_max = 20.0\n dt = 0.1\n G = RandomLayer(n)\n\n M1 = simulation.EventMonitor(G)\n\n sim1 = simulation.Simulation(G, M1, dt=dt)\n sim1.run(t_max)\n \n M2 = simulation.EventMonitor(G)\n sim2 = simulation.Simulation(G, M2, dt=dt)\n sim2.run(t_max)\n\n self.assertNotEqual(len(M1.t), 0)\n self.assertNotEqual(len(M2.t), 0)\n self.assertNotEqual(M1.t, M2.t)", "def _var(self):\n return self.sumsquares / self.sum_weights", "def test_pmodel_energies(self, make_pmodel_energies):\n pmodel, data, heps, dheps, true_energies = make_pmodel_energies\n\n # first compute the total Gaussian energies between GROMACS and pmodel\n total_energy = np.zeros(np.shape(true_energies))\n for i in pmodel.use_params:\n total_energy += pmodel.model.Hamiltonian._pairs[i].V(data[:,i])\n assert np.max(total_energy - true_energies) < 0.2\n\n # now confirm that hepsilon calculates the correct difference in energies\n diff = heps(pmodel.epsilons + 0.1) - heps(pmodel.epsilons - 0.1)\n\n total_diff = np.zeros(np.shape(true_energies))\n for i in pmodel.use_params:\n pmodel.model.Hamiltonian._pairs[i].set_epsilon(pmodel.epsilons[i] + 0.1)\n total_diff += pmodel.model.Hamiltonian._pairs[i].V(data[:,i])\n\n for i in pmodel.use_params:\n pmodel.model.Hamiltonian._pairs[i].set_epsilon(pmodel.epsilons[i] - 0.1)\n total_diff += pmodel.model.Hamiltonian._pairs[i].V(data[:,i])\n\n # confirms potential energies are linear in epsilons\n assert np.max(total_diff - diff) < 0.001", "def var_ratio(M, N, y, cov_fun, gamma, lw, loo, K, ylim, figsize, seed):\n raise NotImplementedError(\n \"Not up-to-date with new versions of shrinkage functions\")\n np.random.seed(seed)\n T = y * N\n\n names = ['sample', 'lw_oracle', 'lw_iso_oracle', 'lw_kfold', 'lw_isokfold',\n 'mv_isonlsq_oracle', 'mv_isonlsq_kfold'] # , 'isomv_oracle']\n\n if loo:\n names.insert(1, 'lw_loo')\n names.insert(2, 'lw_isoloo')\n if lw:\n names.insert(1, 'lw')\n\n empty_df = pd.DataFrame(np.zeros((M, len(names))), columns=names)\n\n dfs = {\n 'oos_var': empty_df,\n 'is_var': empty_df.copy(),\n 'forecast_var_ratio': empty_df.copy(),\n 'true_var_ratio': empty_df.copy(),\n 'te': empty_df.copy(),\n }\n\n pbar = tqdm(total=M)\n\n results = []\n for j in range(M):\n # Build Model\n if cov_fun in ['slr', 'factor']:\n fm_seed = np.random.randint(1, 2**32 - 1)\n Sigma, tau = cov_functions[cov_fun](N, seed=fm_seed)\n else:\n Sigma, tau = cov_functions[cov_fun](N)\n pi_true = min_var_portfolio(Sigma, gamma=gamma)\n\n # Generate Data\n X = sample(Sigma, T)\n X = X - X.mean()\n S = cov(X)\n lam, U = eig(S)\n\n # Sample covariance\n name = 'sample'\n result = portfolio_analysis(S, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # Oracle LW NLS shrinkage\n name = 'lw_oracle'\n d_lw_oracle = nls_oracle(X, S, U, Sigma)\n S_lw_oracle = eig_multiply(U, d_lw_oracle)\n result = portfolio_analysis(S_lw_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_iso_oracle'\n d_lw_iso_oracle = isotonic_regression(d_lw_oracle)\n S_lw_iso_oracle = eig_multiply(U, d_lw_iso_oracle)\n result = portfolio_analysis(S_lw_iso_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # LW NLS shrinkage\n if lw:\n name = 'lw'\n S_lw = nlshrink_covariance(X, centered=True)\n result = portfolio_analysis(S_lw, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # LOO LW NLS shrinkage\n if loo:\n name = 'lw_loo'\n d_lw_loo = nls_loo(X, S, U)\n S_lw_loo = eig_multiply(U, d_lw_loo)\n result = portfolio_analysis(S_lw_loo, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_isoloo'\n d_lw_isoloo = isotonic_regression(d_lw_loo)\n S_lw_isoloo = eig_multiply(U, d_lw_isoloo)\n result = portfolio_analysis(S_lw_isoloo, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # K-fold LW NLS shrinkage\n name = 'lw_kfold'\n d_lw_kfold = nls_kfold(X, S, U, K)\n S_lw_kfold = eig_multiply(U, d_lw_kfold)\n result = portfolio_analysis(S_lw_kfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'lw_isokfold'\n d_lw_isokfold = isotonic_regression(d_lw_kfold)\n S_lw_isokfold = eig_multiply(U, d_lw_isokfold)\n result = portfolio_analysis(S_lw_isokfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n # MinVar NLS shrinkage\n d_mv_oracle = minvar_nls_oracle(X, S, lam, U, Sigma)\n # Note: the raw oracle values for MinVar shrinkage are likely to\n # produce negative eigenvalues, which means the minimum variance\n # portfolio cannot be reasonably computed. Computing variance\n # ratios for the MinVar shrinkage only works with some kind of\n # modification to the raw values.\n\n # Note: Applying isotonic regression after solving for the oracle values\n # is consistently way worse than solving the constrained LS problem so\n # it is omitted.\n # d_mv_iso_oracle = isotonic_regression(d_mv_oracle)\n # S_mv_iso_oracle = eig_multiply(U, d_mv_iso_oracle)\n\n name = 'mv_isonlsq_oracle'\n d_mv_isonlsq_oracle = minvar_nls_oracle(\n X, S, lam, U, Sigma, isotonic=True)\n S_mv_isonlsq_oracle = eig_multiply(U, d_mv_isonlsq_oracle)\n result = portfolio_analysis(S_mv_isonlsq_oracle, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n name = 'mv_isonlsq_kfold'\n d_mv_isonlsq_kfold = minvar_nls_kfold(X, S, lam, U, K)\n S_d_mv_isonlsq_kfold = eig_multiply(U, d_mv_isonlsq_kfold)\n result = portfolio_analysis(S_d_mv_isonlsq_kfold, Sigma, gamma, pi_true)\n results.append({name: result})\n for key in result:\n dfs[key].loc[j, name] = result[key]\n\n pbar.update()\n\n fig, ax = plt.subplots(figsize=figsize, ncols=5)\n fig.suptitle(\"Shrinkage Performance: N={}\".format(N))\n dfs['forecast_var_ratio'].boxplot(ax=ax[0])\n dfs['true_var_ratio'].boxplot(ax=ax[1])\n dfs['oos_var'].boxplot(ax=ax[2])\n dfs['is_var'].boxplot(ax=ax[3])\n dfs['te'].boxplot(ax=ax[4])\n\n ax[0].set_title('Forecast Variance Ratios')\n ax[1].set_title('True Variance Ratios')\n ax[2].set_title('Out-of-Sample Variance')\n ax[3].set_title('In-Sample Variance')\n ax[4].set_title('Tracking Error to True MinVar')\n\n ax[0].set_ylim((0, 2.))\n ax[1].set_ylim((0, 3.))\n\n ylim = (.5 * min(dfs['is_var'].values.min(), dfs['oos_var'].values.min()),\n 2 * max(dfs['is_var'].values.max(), dfs['oos_var'].values.max()))\n ax[2].set_ylim(ylim)\n ax[3].set_ylim(ylim)\n fig.autofmt_xdate(rotation=90)\n fig.subplots_adjust(left=0.05, right=0.95, bottom=.22,\n top=0.9, wspace=.36, hspace=.2)\n plt.show()", "def variational_distribution(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n #q(z | x, s)\n if self.log_variational:\n x = tf.log(1 + self.expression)\n else:\n x = self.expression\n\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n for layer in range(2, self.n_layers + 1):\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n \n self.qz_m = dense(h, self.n_latent, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.qz_v = dense(h, self.n_latent, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n if self.scalings:\n # q(l | x, s)\n h = dense(x, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n self.ql_m = dense(h, 1, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n self.ql_v = dense(h, 1, activation=tf.exp, \\\n bn=False, keep_prob=None, phase=self.training_phase)", "def example1(N):\n\tX = np.random.rand(N)\n\tI_estm = np.mean(X**3)\n\ts_square = np.var(X**3) *N / (N-1)\n\tstd_error = np.sqrt(s_square/N) \n\tprint(\"simulation estimate:\", I_estm)\n\tprint(\"std error of estimate:\", std_error)", "def test_panel_model_var_Within_tilde(setup_fixed_model, expected_panel_model):\n calc_beta_Within_tilde, calc_var_beta_tilde, calc_sigma_v = fixed_effects_model(**setup_fixed_model)\n assert np.allclose(calc_var_beta_tilde, expected_panel_model['var_beta_tilde'])", "def evaluate_valfunc(self, opponent: Player):\n for policy in self.policy_list:\n summation = 0\n if self.turns == 0:\n policy.previous_Vs = dict(policy.Vs)\n break\n for state in policy.Vs:\n if state not in policy.previous_Vs:\n policy.previous_Vs[state] = 0\n if len(policy.previous_Vs) != len(policy.Vs):\n print(\"These dictionaries do not match!\") \n for key1 in policy.Vs:\n for key2 in policy.previous_Vs:\n if key1 == key2:\n delta = policy.Vs[key1] - policy.previous_Vs[key2]\n delta_squared = delta**2\n summation += delta_squared\n n = len(policy.Vs)\n MSE = summation/n\n RMSE = sqrt(MSE)\n self.Vval_list[policy.name_id][opponent.name].append(RMSE)\n #if MSE < self.convergence_threshold:\n #print(f\"Converged at step {self.total_steps()} for memory length {policy.memory_length} against {opponent.name}.\")\n policy.previous_Vs = dict(policy.Vs)", "def test_demand_variability(self):\n demand_variability = self._uncertain_demand.demand_variability\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_variability = lambda x, y: x / y\n test_variability = cal_variability(stdev, avg_order)\n self.assertEqual(demand_variability, test_variability)", "def test_average_potential_differences(self):\n t = self.t\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n_av(t) - self.phi_e_n_av(t), self.delta_phi_n_av(t)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p_av(t) - self.phi_e_p_av(t), self.delta_phi_p_av(t)\n )", "def fitness(pop):\n fit_val = []\n for s in range(len(pop)):\n #Grab the pop PID values\n Kp = pop[s][1]\n Ki = pop[s][2]\n Kd = pop[s][0]\n \n #Simulates the current system [s].\n def sys2PID(t,x):\n global force_constraint\n\n r=set_interp(t)\n\n #State Variables\n y = x[0] # x1 POSITION\n dydt = x[1] # x2 VELOCITY\n yi = x[2] # x3\n\n u = Kp * (r - y) + Ki * yi - Kd * dydt #PID output\n\n dxdt = [0,0,0]\n\n dxdt[0] = dydt\n dxdt[1] = (- c * dydt - k * y + u)/m\n dxdt[2] = r - y\n\n return [dxdt[0],dxdt[1],dxdt[2]]\n \n temp = round(0.00, 2)\n tev = []\n for times in range(int(20/dt)):\n tev.append(temp)\n temp = round(temp + dt, 2)\n x_ini = [0,0,0] # initial conditions\n solga = solve_ivp(sys2PID, [0, 20], x_ini, t_eval=tev)\n y_out = solga.y[0, :]\n t_out = solga.t\n\n err_val = 0.0\n for y in range(len(t_out)) :\n err_val = err_val + abs(set_point[y] - y_out[y]) \n\n fit_val.insert(s, err_val)\n return fit_val", "def simulationTwoDrugsVirusPopulations():\n #TODO", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_predict_uncertain_inputs(self):\n X = np.linspace(-5,5, 10)[:, None]\n Y = 2*X + np.random.randn(*X.shape)*1e-3\n m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)\n m.Gaussian_noise[:] = 1e-4\n m.X.mean[:] = X[:]\n m.X.variance[:] = 1e-5\n m.X.fix()\n m.optimize()\n X_pred_mu = np.random.randn(5, 1)\n X_pred_var = np.random.rand(5, 1) + 1e-5\n from GPy.core.parameterization.variational import NormalPosterior\n X_pred = NormalPosterior(X_pred_mu, X_pred_var)\n # mu = \\int f(x)q(x|mu,S) dx = \\int 2x.q(x|mu,S) dx = 2.mu\n # S = \\int (f(x) - m)^2q(x|mu,S) dx = \\int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S\n Y_mu_true = 2*X_pred_mu\n Y_var_true = 4*X_pred_var\n Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)\n np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-3)\n np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-3)", "def expected_improvement(model: ProbabilisticModel, eta: TensorType, at: TensorType) -> TensorType:\n mean, variance = model.predict(at)\n normal = tfp.distributions.Normal(mean, tf.sqrt(variance))\n return (eta - mean) * normal.cdf(eta) + variance * normal.prob(eta)", "def variational_objective(params, t, num_samples, beta=1.):\n\n # 1. draw samples from the variational posterior, eps ~ N(0,I)\n zs, ldet_sums = draw_variational_samples(params, num_samples)\n\n # 1.5 negative entropy of z0 --- likely we need this for KL though\n # not needed for optimization\n\n # 2. compute expected value of the sum of jacobian terms\n E_ldet_sum = np.mean(ldet_sums)\n\n # 3. compute data term\n lls = logprob(zs, t)\n E_logprob = np.mean(lls)\n\n if debug_print:\n print \"entropy term: \", E_ldet_sum\n print \"data term : \", E_logprob, \" (+/- \", np.std(lls), \")\", \" min = \", np.min(lls)\n\n # return lower bound\n beta = 1. if t >= len(beta_schedule) else beta_schedule[t]\n lower_bound = beta * E_logprob + E_ldet_sum\n return -lower_bound", "def calculate_vars(self):\n pass", "def V_var(self) -> Optional[np.ndarray]:\n\n def _retrieve(fm: VariationalFM) -> np.ndarray:\n return fm.V_var\n\n return runtime_error_to_optional(self, _retrieve)", "def rkm_MS_pathvar(models, s_span, X):\n W_dst_var=np.ndarray(models.shape[0],np.float64)\n for i in range(models.shape[0]):\n W = models[i,:,:]\n res = W[1:,:]-W[0:-1,:]\n W_dst=np.linalg.norm(res, axis=1)\n W_dst_var[i] = np.var(W_dst)\n\n return W_dst_var", "def policy_eval_v(policy, env, discount_factor=1.0, theta=0.00001):\n # Start with an all 0 value function\n V = np.zeros(env.nS)\n \n # loop door alle states heen \n # sla de oude state value op \n # Bereken de nieuwe state value door de SOM (kans omhoog * loop over waar je terrecht kunt komen * reward) kans omlaag..\n # kijk of je nog door moet gaan of stoppen\n delta = 1000 \n while delta > theta:\n # for x in range(2):\n delta = 0\n \n# loop throw possible states\n for state in range(env.nS):\n old_state_value = V[state]\n new_state_value = 0\n\n # loop shrow possible actions in state\n for action in range(env.nA):\n\n # print(\"kans omhoog\", policy[state][action])\n # print(\"kans omhoog uitkomen\", env.P[state][action][0][0])\n # print(\"direct reward\",env.P[state][action][0][2] )\n # print(\"value of that new state\", discount_factor * V[env.P[state][action][0][1]] )\n\n current_state_value = policy[state][action] * env.P[state][action][0][0] * ( env.P[state][action][0][2] + ( discount_factor * V[env.P[state][action][0][1]] ) ) \n# print(\"current state value\", current_state_value)\n new_state_value += current_state_value\n \n delta = max(delta, abs(old_state_value - new_state_value))\n V[state] = new_state_value\n# print(V[state])\n# print(\"delta\", delta)\n return np.array(V)", "def eval_test(self, rng_key, svi_state):\n def body_fn(i, loss_sum):\n rng_key_i = random.fold_in(rng_key, i) \n rng_key_i, rng_key_ls, rng_key_var, rng_key_sigma = random.split(rng_key_i, 4)\n \n length_i = numpyro.sample(\"length\", dist.InverseGamma(1,.1), rng_key=rng_key_ls)\n var_i = numpyro.sample(\"var\", dist.LogNormal(0,0.1), rng_key=rng_key_var)\n sigma_i = numpyro.sample(\"noise\", dist.HalfNormal(0.1), rng_key=rng_key_sigma)\n \n batch = self.gp_predictive(rng_key_i, self.x\n , ls=length_i, var=var_i, sigma=sigma_i\n )\n\n loss = self.svi.evaluate(svi_state, batch['y']) / self.batch_size\n loss_sum += loss\n return loss_sum\n\n loss = lax.fori_loop(0, self.num_test, body_fn, 0.0)\n loss = loss / self.num_test\n\n return loss", "def specsim(gr, gsm):\n\n if gsm.ndim == 2:\n yy, xx = np.meshgrid(np.arange(-gr.ny*0.5*gr.dy, gr.ny*0.5*gr.dy, gr.dy),\n np.arange(-gr.nx*0.5*gr.dx, gr.nx*0.5*gr.dx, gr.dx))\n h = ((xx / gsm.lx) ** 2 + (yy / gsm.ly) ** 2) ** 0.5 # Compute distance from origin\n\n elif gsm.ndim == 3:\n yy, xx, zz = np.meshgrid(np.arange(-gr.ny*0.5*gr.dy, gr.ny*0.5*gr.dy, gr.dy),\n np.arange(-gr.nx*0.5*gr.dx, gr.nx*0.5*gr.dx, gr.dx),\n np.arange(-gr.nz*0.5*gr.dz, gr.nz*0.5*gr.dz, gr.dz))\n\n # Compute distance from origin\n h = ((xx / gsm.lx) ** 2 + (yy / gsm.ly) ** 2 + (zz / gsm.lz) ** 2) ** 0.5\n\n ntot = np.size(xx)\n\n # Covariance matrix of variables\n if gsm.cmodel == 'Gau':\n # Gaussian covariance model\n ryy = np.exp(-h**2) * gsm.sig2\n elif gsm.cmodel == 'Exp':\n # Exponential covariance model\n ryy = np.exp(-np.abs(h)) * gsm.sig2\n else:\n ValueError('Invalid covariance model')\n\n # Power spectrum of variable\n syy = np.fft.fftn(np.fft.fftshift(ryy)) / ntot\n syy = np.abs(syy) # Remove imaginary artifacts\n if gsm.ndim == 2:\n syy[0, 0] = 0\n else:\n syy[0, 0, 0] = 0\n\n # st.norm.rvs calls cost a bit more than np.radom.randn\n # real = st.norm.rvs(size=syy.shape)\n # imag = st.norm.rvs(size=syy.shape)\n real = np.random.randn(*syy.shape)\n imag = np.random.randn(*syy.shape)\n epsilon = real + 1j*imag\n rand = epsilon * np.sqrt(syy)\n bigy = np.real(np.fft.ifftn(rand * ntot))\n\n return bigy", "def modality(v):\n \n s = st.skew(vel, bias=False)\n k = st.kurtosis(vel, bias=False)\n m = (1+s**2)/(3+k**2)\n return s, k, m", "def test_height_and_fwhm_expression_evalution_in_builtin_models():\n mod = models.GaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.LorentzianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.SplitLorentzianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, sigma_r=1.0)\n params.update_constraints()\n\n mod = models.VoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=1.0)\n params.update_constraints()\n\n mod = models.PseudoVoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, fraction=0.5)\n params.update_constraints()\n\n mod = models.MoffatModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, beta=0.0)\n params.update_constraints()\n\n mod = models.Pearson7Model()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, expon=1.0)\n params.update_constraints()\n\n mod = models.StudentsTModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.BreitWignerModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, q=0.0)\n params.update_constraints()\n\n mod = models.LognormalModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.DampedOscillatorModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9)\n params.update_constraints()\n\n mod = models.DampedHarmonicOscillatorModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.ExponentialGaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.SkewedGaussianModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.SkewedVoigtModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0,\n skew=0.0)\n params.update_constraints()\n\n mod = models.DonaichModel()\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, gamma=0.0)\n params.update_constraints()\n\n mod = models.StepModel()\n for f in ('linear', 'arctan', 'erf', 'logistic'):\n params = mod.make_params(amplitude=1.0, center=0.0, sigma=0.9, form=f)\n params.update_constraints()\n\n mod = models.RectangleModel()\n for f in ('linear', 'arctan', 'erf', 'logistic'):\n params = mod.make_params(amplitude=1.0, center1=0.0, sigma1=0.0,\n center2=0.0, sigma2=0.0, form=f)\n params.update_constraints()", "def GMMClassfierVal(GMMs,Xtest):\n prob = np.zeros((Xtest.shape[0], len(GMMs)))\n \n #pista explora los metodos de la libreria, que metodo retorna probabilidades?\n for k,v in GMMs.items():\n \n \n # la etiqueta la asignas seleccionando le maximo de probabilidad\n Yest= \n \n return Yest, prob", "def simulationTestGaussian2(params):\r\n x = gaussian(params[0], params[1], mu-3.5*sigma, mu+3.5*sigma)\r\n error = np.sum(np.power(optimal - x, 2))/optimal.shape[0]\r\n return 1/error", "def pm_variance_test(gev, no_states, n_channels):\n return gev * np.power(\n (1.0 / (n_channels - 1)) * (n_channels - 1 - no_states), -2\n )", "def testFactorDGP(self):\n N1, N0 = 2, 100\n treated_units = [0, 1]\n T0, T1 = 20, 10\n K, R, F = 5, 5, 5\n (\n Cov_control,\n Cov_treated,\n Out_pre_control,\n Out_pre_treated,\n Out_post_control,\n Out_post_treated,\n ) = factor_dgp(N0, N1, T0, T1, K, R, F)\n\n Cov = np.vstack((Cov_treated, Cov_control))\n Out_pre = np.vstack((Out_pre_treated, Out_pre_control))\n Out_post = np.vstack((Out_post_treated, Out_post_control))\n\n SC.estimate_effects(\n Out_pre,\n Out_post,\n treated_units,\n Cov,\n # constrain=\"simplex\", -- handled by argparse now..\n **command_line_options,\n )\n\n # print(fit_res)\n # est_res = SC.estimate_effects(\n # Cov, Out_pre, Out_post, treated_units, V_penalty=0, W_penalty=0.001\n # )\n # print(est_res)", "def _variance(self,gp):\r\n return self.variance", "def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )", "def variational_expectations(self, Fmu, Fvar, Y):\n return ndiagquad(self.logp, self.num_gauss_hermite_points, Fmu, Fvar, Y=Y)", "def define_variables(m):\r\n\r\n # Non-negative candidate capacity\r\n m.mu_1 = Var(m.G_C, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Solar build limits\r\n m.mu_2 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Wind build limits\r\n m.mu_3 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage build limits\r\n m.mu_4 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Min power output (all generators excluding storage units)\r\n m.sigma_1 = Var(m.G.difference(m.G_STORAGE), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing thermal\r\n m.sigma_2 = Var(m.G_E_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate thermal\r\n m.sigma_3 = Var(m.G_C_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing wind\r\n m.sigma_4 = Var(m.G_E_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate wind\r\n m.sigma_5 = Var(m.G_C_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing solar\r\n m.sigma_6 = Var(m.G_E_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate solar\r\n m.sigma_7 = Var(m.G_C_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - hydro\r\n m.sigma_8 = Var(m.G_E_HYDRO, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min charging power - storage units\r\n m.sigma_9 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min discharging power - storage_units\r\n m.sigma_10 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - existing storage\r\n m.sigma_11 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - candidate storage\r\n m.sigma_12 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - existing storage\r\n m.sigma_13 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - candidate storage\r\n m.sigma_14 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - storage units\r\n m.sigma_15 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - existing storage units\r\n m.sigma_16 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - candidate storage\r\n m.sigma_17 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - interval end\r\n m.sigma_18 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - interval end\r\n m.sigma_19 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate up (thermal and hydro generators)\r\n m.sigma_20 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate down (thermal and hydro generators)\r\n m.sigma_23 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Non-negative lost load power\r\n m.sigma_26 = Var(m.Z, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min powerflow\r\n m.sigma_27 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max powerflow\r\n m.sigma_28 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage energy transition\r\n m.zeta_1 = Var(m.G_STORAGE, m.Y, m.S, m.T, initialize=0)\r\n\r\n # Power balance (locational marginal price)\r\n m.lamb = Var(m.Z, m.Y, m.S, m.T, initialize=0)\r\n\r\n return m", "def evp(self, tmin=None, tmax=None):\n res = self.ml.residuals(tmin=tmin, tmax=tmax)\n obs = self.ml.observations(tmin=tmin, tmax=tmax)\n if obs.var() == 0.0:\n return 100.\n else:\n evp = max(0.0, 100 * (1 - (res.var() / obs.var())))\n return evp", "def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())", "def variance(operator, state):\n return (expectation(operator**2, state) - expectation(operator, state)**2)", "def test_set_vs(self):\n s = State(substance=\"water\")\n s.vs = Q_(0.4772010021515822, \"m**3/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.vs[0], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.vs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore", "def _tstat_all(self):\n return np.squeeze(self.solution) / self._se_all", "def compute_policy_v(env, policy, gamma=1.0):\n v = np.zeros(env.nS)\n eps = 1e-10\n num_iter = 0\n while True:\n prev_v = np.copy(v)\n for s in range(env.nS):\n policy_a = policy[s]\n v[s] = sum([p * (r + gamma * prev_v[s_]) for p, s_, r, _ in env.P[s][policy_a]])\n if (np.sum((np.fabs(prev_v - v))) <= eps):\n # value converged\n break\n num_iter += 1\n return v, num_iter", "def simulate(nSamples, randVar):\n expectation, variance = [], []\n\n for nSample in nSamples:\n draws = randVar.rvs(nSample)\n expectation.append(np.mean(draws))\n variance.append(np.var(draws))\n\n return expectation, variance", "def _compute_GP_variables(self):\r\n Wi = 1.0/self.W\r\n self.Sigma_tilde = np.diagflat(Wi)\r\n\r\n Y_tilde = Wi*self.Ki_f + self.f_hat\r\n\r\n self.Wi_K_i = self.W12BiW12\r\n ln_det_Wi_K = pddet(self.Sigma_tilde + self.K)\r\n lik = self.noise_model.logpdf(self.f_hat, self.data, extra_data=self.extra_data)\r\n y_Wi_K_i_y = mdot(Y_tilde.T, self.Wi_K_i, Y_tilde)\r\n\r\n Z_tilde = (+ lik\r\n - 0.5*self.ln_B_det\r\n + 0.5*ln_det_Wi_K\r\n - 0.5*self.f_Ki_f\r\n + 0.5*y_Wi_K_i_y\r\n + self.NORMAL_CONST\r\n )\r\n\r\n #Convert to float as its (1, 1) and Z must be a scalar\r\n self.Z = np.float64(Z_tilde)\r\n self.Y = Y_tilde\r\n self.YYT = np.dot(self.Y, self.Y.T)\r\n self.covariance_matrix = self.Sigma_tilde\r\n self.precision = 1.0 / np.diag(self.covariance_matrix)[:, None]\r\n\r\n #Compute dZ_dK which is how the approximated distributions gradients differ from the dL_dK computed for other likelihoods\r\n self.dZ_dK = self._Kgradients()\r\n #+ 0.5*self.Wi_K_i - 0.5*np.dot(self.Ki_f, self.Ki_f.T) #since we are not adding the K gradients explicit part theres no need to compute this again\r", "def fit_model_sum_vgm(list_model: list[str], emp_vgm_df: pd.DataFrame) -> tuple[Callable, list[float]]:\n # TODO: expand to other models than spherical, exponential, gaussian (more than 2 arguments)\n def vgm_sum(h, *args):\n fn = 0\n i = 0\n for model in list_model:\n fn += skg.models.spherical(h, args[i], args[i+1])\n # fn += vgm(h, model=model,crange=args[i],psill=args[i+1])\n i += 2\n\n return fn\n\n # use shape of empirical variogram to assess rough boundaries/first estimates\n n_average = np.ceil(len(emp_vgm_df.exp.values) / 10)\n exp_movaverage = np.convolve(emp_vgm_df.exp.values, np.ones(int(n_average))/n_average, mode='valid')\n grad = np.gradient(exp_movaverage, 2)\n # maximum variance\n max_var = np.max(exp_movaverage)\n\n # to simplify things for scipy, let's provide boundaries and first guesses\n p0 = []\n bounds = []\n for i in range(len(list_model)):\n\n # use largest boundaries possible for our problem\n psill_bound = [0, max_var]\n range_bound = [0, emp_vgm_df.bins.values[-1]]\n\n # use psill evenly distributed\n psill_p0 = ((i+1)/len(list_model))*max_var\n # use corresponding ranges\n\n # this fails when no empirical value crosses this (too wide binning/nugget)\n # ind = np.array(np.abs(exp_movaverage-psill_p0)).argmin()\n # range_p0 = emp_vgm_df.bins.values[ind]\n range_p0 = ((i+1)/len(list_model))*emp_vgm_df.bins.values[-1]\n\n # TODO: if adding other variogram models, add condition here\n\n # add bounds and guesses with same order as function arguments\n bounds.append(range_bound)\n bounds.append(psill_bound)\n\n p0.append(range_p0)\n p0.append(psill_p0)\n\n bounds = np.transpose(np.array(bounds))\n\n if np.all(np.isnan(emp_vgm_df.exp_sigma.values)):\n valid = ~np.isnan(emp_vgm_df.exp.values)\n cof, cov = curve_fit(vgm_sum, emp_vgm_df.bins.values[valid],\n emp_vgm_df.exp.values[valid], method='trf', p0=p0, bounds=bounds)\n else:\n valid = np.logical_and(~np.isnan(emp_vgm_df.exp.values), ~np.isnan(emp_vgm_df.exp_sigma.values))\n cof, cov = curve_fit(vgm_sum, emp_vgm_df.bins.values[valid], emp_vgm_df.exp.values[valid],\n method='trf', p0=p0, bounds=bounds, sigma=emp_vgm_df.exp_sigma.values[valid])\n\n # rewriting the output function: couldn't find a way to pass this with functool.partial because arguments are unordered\n def vgm_sum_fit(h):\n fn = 0\n i = 0\n for model in list_model:\n fn += skg.models.spherical(h, cof[i], cof[i+1])\n # fn += vgm(h, model=model,crange=args[i],psill=args[i+1])\n i += 2\n\n return fn\n\n return vgm_sum_fit, cof", "def test_set_pv(self):\n s = State(substance=\"water\")\n s.pv = Q_(101325.0, \"Pa\"), Q_(0.4772010021515822, \"m**3/kg\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pv[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pv[1], Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore\n s.pv = Q_(101325.0, \"Pa\"), Q_(3.189303132125469, \"m**3/kg\")\n assert np.isclose(s.T, Q_(700.9882316847855, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pv[0], Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.pv[1], Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.u, Q_(3013250.0, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(8623.283568815832, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(3.189303132125469, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(3336406.139862406, \"J/kg\")) # type: ignore\n assert s.x is None", "def test_coeffvar(self):\n self.assertEqual(coeffvar(list1, sample=False), np.std(list1) /\n np.mean(list1))\n self.assertEqual(coeffvar(list1), np.std(list1, ddof=1) /\n np.mean(list1))", "def predictionAndVariance(self,a):\n b = np.dot(a,self.x)\n vc = self.standardError2()\n #x is distributed according to a gaussian with mean self.x and\n #variance solutionCovariance. Dot product has variance\n #Var(a^T x) = a^T Var(x) a\n #add on the \n return (b, vc * (1.0 + np.dot(a,np.dot(self.AtAinv,a))))", "def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var", "def estimate(self):\n mu = self.mean()\n var = np.average((self.particles - mu) ** 2, weights=self.weights, axis=0)\n\n return mu, var", "def test_conservation(self):\n self.c_s_tot = (\n self.c_s_n_tot(self.solution.t)\n + self.c_s_p_tot(self.solution.t)\n + self.c_SEI_n_tot(self.solution.t)\n + self.c_SEI_p_tot(self.solution.t)\n + self.c_Li_n_tot(self.solution.t)\n + self.c_Li_p_tot(self.solution.t)\n )\n diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]\n if \"profile\" in self.model.options[\"particle\"]:\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"surface form\"] == \"differential\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"SEI\"] == \"ec reaction limited\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=12)\n else:\n np.testing.assert_array_almost_equal(diff, 0, decimal=15)", "def test_Gaussian_NB_estimators():", "def compute_policy_v(env, policy, gamma=1.0):\n number_of_states = env.unwrapped.nS\n v = np.zeros(number_of_states)\n env_policy = env.unwrapped.P\n\n eps = 1e-10\n i = 0\n while True:\n\n prev_v = np.copy(v)\n for s in range(number_of_states):\n policy_a = policy[s]\n v[s] = sum([p * (r + gamma * prev_v[s_]) for p, s_, r, _ in env_policy[s][policy_a]])\n if (np.sum((np.fabs(prev_v - v))) <= eps):\n # value converged\n break\n else:\n i += 1\n\n return v, i", "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))", "def update_variables_RMSProp(alpha, beta2, epsilon, var, grad, s):\n s = (s * beta2) + ((1 - beta2) * (grad ** 2))\n var = var - ((alpha * grad) / (s ** (1/2) + epsilon))\n return var, s", "def specwv(fx,tstep=2**5,nfbins=2**10,nhs=2**8,nhwv=2**9-1,ngwv=2**3-1,df=1.0):\r\n \r\n #calculate stft\r\n pst,tlst,flst=stft(fx,nh=nhs,tstep=tstep,nfbins=nfbins,df=df)\r\n \r\n #calculate new time step so WVD and STFT will align\r\n ntstep=len(fx)/(len(tlst)*2.)\r\n \r\n #calculate spwvd\r\n pwv,twv,fwv=spwvd(fx,tstep=ntstep,nfbins=nfbins,df=df,nh=nhwv,ng=ngwv)\r\n \r\n #multiply the two together normalize\r\n tfarray=pst/pst.max()*pwv/pwv.max()\r\n \r\n return tfarray,tlst,flst", "def test_rr_se(results):\n truese = np.asarray([2.09826858, 30.60745128, 108.51947421, 0.95693751,\n 0.6564318])\n test_se = results.params_se()\n assert test_se == pytest.approx(truese)", "def test_compute_model(self):\n # Set test tolerances (for infinity norm of transfer function\n # difference)\n tf_abs_tol = 1e-6\n tf_rel_tol = 1e-4\n\n # Set time parameters for discrete-time simulation\n dt = 0.1\n num_time_steps = 1000\n\n # Set size of plant and model. For test, don't reduce the system, just\n # check that it comes back close to the original plant. Also, note that\n # using more than 8 states causes poorly conditioned TF coeffs\n # (https://github.com/scipy/scipy/issues/2980)\n num_states_plant = 8\n num_states_model = num_states_plant\n\n # Loop through different numbers of inputs, numbers of outputs, and\n # sampling intervals\n for num_inputs in [1, 3]:\n for num_outputs in [1, 2]:\n for sample_interval in [1, 2, 4]:\n # Define time steps at which to save data. These will be of\n # the form [0, 1, p, p + 1, 2p, 2p + 1, ...] where p is the\n # sample interval.\n time_steps = make_time_steps(\n num_time_steps, sample_interval)\n # # Create a state space system\n # A_plant, B_plant, C_plant = util.drss(\n # num_states_plant, num_inputs, num_outputs)\n A_plant = util.load_array_text(\n join(self.data_dir, 'A_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n B_plant = util.load_array_text(\n join(self.data_dir, 'B_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n C_plant = util.load_array_text(\n join(self.data_dir, 'C_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n\n # Simulate an impulse response using the state space system.\n # This will generate Markov parameters at all timesteps [0,\n # 1, 2, 3, ...]. Only keep data at the desired time steps,\n # which are separated by a sampling interval (see above\n # comment).\n Markovs = util.impulse(\n A_plant, B_plant, C_plant,\n time_steps[-1] + 1)[time_steps]\n\n # Compute a model using ERA\n my_ERA = era.ERA(verbosity=0)\n A_model, B_model, C_model = my_ERA.compute_model(\n Markovs, num_states_model)\n\n # Save ERA model to disk\n A_path_computed = join(self.out_dir, 'A_computed.txt')\n B_path_computed = join(self.out_dir, 'B_computed.txt')\n C_path_computed = join(self.out_dir, 'C_computed.txt')\n my_ERA.put_model(\n A_path_computed, B_path_computed, C_path_computed)\n\n # Check normalized Markovs\n rtol = 1e-5 # 1e-6\n atol = 1e-5 # 1e-10\n Markovs_model = util.impulse(\n A_model, B_model, C_model,\n time_steps[-1] + 1)[time_steps]\n max_Markov = np.amax(Markovs)\n eigs_plant = np.linalg.eig(A_plant)[0]\n eigs_model = np.linalg.eig(A_model)[0]\n # print 'markovs shape', Markovs.shape\n # print 'max plant eig', np.abs(eigs_plant).max()\n # print 'max model eig', np.abs(eigs_model).max()\n # print 'max plant markov', max_Markov\n # print 'max model markov', np.amax(Markovs_model)\n # print 'markov diffs', (\n # Markovs - Markovs_model).squeeze().max()\n\n '''\n import matplotlib.pyplot as plt\n plt.figure()\n plt.semilogy(np.abs(Markovs).squeeze(), 'b')\n plt.semilogy(np.abs(Markovs_model).squeeze(), 'r--')\n plt.axis(\n [0, time_steps[-1], Markovs.min(), Markovs.max()])\n '''\n\n np.testing.assert_allclose(\n Markovs_model.squeeze(),\n Markovs.squeeze(),\n rtol=rtol, atol=atol)\n\n\n # plt.show()\n '''\n # Use Scipy to check that transfer function of ERA model is\n # close to transfer function of full model. Do so by\n # computing the infinity norm (H_inf) of the difference\n # between the transfer functions. Since Scipy can't handle\n # MIMO transfer functions, loop through each input-output\n # pair individually.\n for input_idx in range(num_inputs):\n for output_idx in range(num_outputs):\n\n # Compute transfer functions\n tf_plant = scipy.signal.StateSpace(\n A_plant, B_plant[:, input_idx:input_idx + 1],\n C_plant[output_idx:output_idx + 1, :],\n 0, dt=dt).to_tf()\n tf_model = scipy.signal.StateSpace(\n A_model,\n B_model[:, input_idx:input_idx + 1],\n C_model[output_idx:output_idx + 1, :],\n 0, dt=dt).to_tf()\n tf_diff = util.sub_transfer_functions(\n tf_plant, tf_model, dt=dt)\n\n # Compute transfer function norms\n tf_plant_inf_norm = util.compute_inf_norm_discrete(\n tf_plant, dt)\n tf_diff_inf_norm = util.compute_inf_norm_discrete(\n tf_diff, dt)\n\n # Test values\n print 'err_frac', (\n tf_diff_inf_norm / tf_plant_inf_norm)\n self.assertTrue(\n tf_diff_inf_norm / tf_plant_inf_norm <\n tf_rel_tol)\n '''\n\n # Also test that saved reduced model mats are equal to those\n # returned in memory\n np.testing.assert_equal(\n util.load_array_text(A_path_computed), A_model)\n np.testing.assert_equal(\n util.load_array_text(B_path_computed), B_model)\n np.testing.assert_equal(\n util.load_array_text(C_path_computed), C_model)", "def calculate():\n global v, vNew, n, stepsToToleranceArray\n stepsToToleranceArray = []\n for n in nArray:\n print('Currently working with n = ', n)\n initiateVMatrixes()\n step = 0\n toleranceAcqurired = False\n while not toleranceAcqurired:\n step+=1\n relax_checker()\n # Controll accuracy\n toleranceAcqurired = True # run through v and set false if not acquired\n for i in range(1,n):\n for j in range(1,n):\n if np.abs( (v[i,j]-vExact[i,j])/vExact[i,j] ) > tolerance:\n toleranceAcqurired = False\n if toleranceAcqurired:\n stepsToToleranceArray.append(step)\n if n in [5,10]: print('n =', n, 'steps =', step)", "def test_panel_model_beta_Within(setup_fixed_model, expected_panel_model):\n calc_beta_Within_tilde, calc_var_beta_tilde, calc_sigma_v = fixed_effects_model(**setup_fixed_model)\n assert np.allclose(calc_beta_Within_tilde.round(3), expected_panel_model['beta_Within_tilde'])", "def model_test(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in, models_fit, label):\n # Generate fake data with some \"true\" parameters\n (D_vec, Ninv) = gen_data(nu, fsigma_T, fsigma_P, models_in, amps_in, params_in)\n Ninv_sqrt = np.matrix(linalg.sqrtm(Ninv))\n (dust_params, sync_params, cmb_params) = params_in\n (dust_amp, sync_amp, cmb_amp) = amps_in\n \n # Beam model\n beam_mat = np.identity(3*len(nu))\n\n # Set-up MCMC\n dust_guess = np.array([1.6, 20.])\n sync_guess = np.array([-3.])\n cmb_guess = np.array([])\n guess = np.concatenate((dust_guess, sync_guess, cmb_guess))\n #ndim = len(dust_guess) + len(sync_guess) + len(cmb_guess)\n \n # Run MCMC sampler on this model\n t0 = time.time()\n dust_params_out, sync_params_out, cmb_params_out, samples \\\n = mcmc(guess, nu, D_vec, Ninv, beam_mat, models_fit, label)\n print \"MCMC run in %d sec.\" % (time.time() - t0)\n \n # Estimate error on recovered CMB amplitudes\n (F_fg, F_cmb, F) = F_matrix(nu, dust_params_out, sync_params_out, cmb_params_out, models_fit)\n H = F_fg.T*Ninv*F_fg\n x_mat = np.linalg.inv(F.T*beam_mat.T*Ninv*beam_mat*F)*F.T*beam_mat.T*Ninv*D_vec # Equation A3\n \n U, Lambda, VT = np.linalg.svd(Ninv_sqrt*F_fg, full_matrices=False) # Equation A14\n \n print \"-\"*30\n print \"F_cmb.T\", F_cmb.T.shape\n print \"Ninv_sqrt\", Ninv_sqrt.shape\n print \"F_cmb\", F_cmb.shape\n print \"I\", np.identity(U.shape[0]).shape\n print \"U\", U.shape\n print \"U.T\", U.T.shape\n print \"-\"*30\n \n \n \n N_eff_inv_cmb = F_cmb.T*Ninv_sqrt*(np.matrix(np.identity(U.shape[0])) - U*U.T)*Ninv_sqrt*F_cmb # Equation A16\n N_eff_cmb = np.linalg.inv(N_eff_inv_cmb)\n cmb_noise = np.array([N_eff_cmb[0,0], N_eff_cmb[1,1], N_eff_cmb[2,2]])\n\n gls_cmb = x_mat[0:3,0]\n cmb_chisq = (np.matrix(cmb_amp).T - gls_cmb).T*N_eff_inv_cmb*(np.matrix(cmb_amp).T - gls_cmb)\n \n # Output triangle plots for dust\n if label != None:\n if (models_fit[0] == 'mbb' and models_fit[1] == 'pow'):\n if (models_in[0] == 'mbb'):\n fig = corner.corner(samples, truths=[dust_params[0], dust_params[1], sync_params[0]],\n labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n fig = corner.corner(samples, labels=[r\"$\\beta_d$\", r\"$T_d$\",r\"$\\alpha_s$\"])\n else :\n print 'Error! Not configured for this plot!'\n exit()\n fig.savefig('triangle_' + label + '.png')\n plt.close('all')\n \n # Run multinest sampler\n #multinest(nu, D_vec, Ninv, beam_mat, ndim, models_fit, label)\n \n return gls_cmb, cmb_chisq, cmb_noise", "def _variance(self,gp):\r\n return self.gp_link.transf(gp)/self.beta", "def test_dynamics_no_tau_ref(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.tau_ref = 0.0\n\n i_values = np.linspace(0.01, 0.4, 50)\n\n for i_ext in i_values:\n # start with different initial voltages to take advantage of averaging\n # effects\n G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)\n G.i_ext_init = i_ext\n\n M = simulation.EventMonitor(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n \n rate = float(len(M.t))/n/t_max*1000.0\n # first source of uncertainty: a spike might not fit before the end of a\n # simulation\n uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0\n \n expected = 0.0\n uncertainty = uncertainty1\n if G.R*i_ext > G.v_th - G.vR:\n expected = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))\n # second source of uncertainty: spikes might move due to the granularity\n # of the simulation\n uncertainty2 = dt*expected*rate/1000.0\n uncertainty = uncertainty1 + uncertainty2\n uncertainty *= 1.5\n self.assertLess(np.abs(rate - expected), uncertainty)\n else:\n self.assertAlmostEqual(rate, 0.0)", "def _em_autoregressive(self, result, betas, tmp=None):\n if tmp is None:\n tmp = np.sqrt(result.smoothed_marginal_probabilities)\n\n resid = np.zeros((self.k_regimes, self.nobs + self.order))\n resid[:] = self.orig_endog\n if self._k_exog > 0:\n for i in range(self.k_regimes):\n resid[i] -= np.dot(self.orig_exog, betas[i])\n\n # The difference between this and `_em_exog` is that here we have a\n # different endog and exog for each regime\n coeffs = np.zeros((self.k_regimes,) + (self.order,))\n variance = np.zeros((self.k_regimes,))\n exog = np.zeros((self.nobs, self.order))\n for i in range(self.k_regimes):\n endog = resid[i, self.order:]\n exog = lagmat(resid[i], self.order)[self.order:]\n tmp_endog = tmp[i] * endog\n tmp_exog = tmp[i][:, None] * exog\n\n coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog)\n\n if self.switching_variance:\n tmp_resid = endog - np.dot(exog, coeffs[i])\n variance[i] = (np.sum(\n tmp_resid ** 2 * result.smoothed_marginal_probabilities[i]) /\n np.sum(result.smoothed_marginal_probabilities[i]))\n else:\n tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i])\n variance[i] = np.sum(tmp_resid ** 2)\n # Variances\n if not self.switching_variance:\n variance = variance.sum() / self.nobs\n return coeffs, variance", "def evaluate(self):\n weight, bias, emb = self.sess.run([self.sm_w_t, self.sm_b, self.emb])\n return utils.pp(weight, bias, emb, self.test_data)", "def VarianceOfResponse(self):\n H = []\n for i in range(len(self.omega_range)):\n \"\"\"Calculation of the Transmission matrix H\"\"\"\n H.append(np.linalg.inv((-self.omega_range[i] ** 2 * self.M\n - 1j * self.omega_range[i] * self.C\n + self.K)))\n \"\"\"squared absolute of the transmission matrix H multiplied with the diagonal of the mass matrix M (M*I)\"\"\"\n Habs2 = [(np.abs(matrix) ** 2) for matrix in H]\n PSDexc = [np.transpose(np.diagonal(self.M)) * spec_val for spec_val in self.spectrum]\n \"\"\"Response of all DOFs as PSD\"\"\"\n RespPSD = [Habs2[wincr].dot(PSDexc[wincr]) for wincr in range(len(self.spectrum))]\n \"\"\"The variance of the response can be obtained with the integral of the response PSD. \n integral(PSD_response)\"\"\"\n variance = (np.trapz(RespPSD, self.omega_range, axis=0))\n return variance", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def variational_mfvi(X, mfvi_mixture=False, n_mixture=1, name=\"\", **kwargs):\n X = tf.convert_to_tensor(X, dtype=tf.float32)\n\n N, D = X.shape.as_list()\n\n # define variational parameters\n qf_mean = tf.get_variable(shape=[N], name='{}_mean'.format(name))\n qf_sdev = tf.exp(tf.get_variable(shape=[N], name='{}_sdev'.format(name)))\n\n # define variational family\n mixture_par_list = []\n if mfvi_mixture:\n gp_dist = tfd.MultivariateNormalDiag(loc=qf_mean, scale_diag=qf_sdev,\n name=name)\n q_f, mixture_par_list = inference_util.make_mfvi_sgp_mixture_family(\n n_mixture=n_mixture, N=N, gp_dist=gp_dist, name=name)\n else:\n q_f = ed.MultivariateNormalDiag(loc=qf_mean, scale_diag=qf_sdev,\n name=name)\n\n return q_f, qf_mean, qf_sdev, mixture_par_list", "def update_variables_Adam(alpha, beta1, beta2, epsilon, var, grad, v, s, t):\n Vd = (beta1 * v) + ((1 - beta1) * grad)\n Sd = (beta2 * s) + ((1 - beta2) * grad * grad)\n\n new_prom_corr = Vd / (1 - beta1 ** t)\n new_s_corr = Sd / (1 - beta2 ** t)\n\n w = var - alpha * (new_prom_corr / ((new_s_corr ** (0.5)) + epsilon))\n return (w, Vd, Sd)\n \"\"\"\n new_prom = (beta1 * v) + ((1 - beta1) * grad)\n new_s = (beta2 * s) + ((1 - beta2) * grad * grad)\n new_prom_corr = new_prom / (1 - beta1 ** t)\n new_s_corr = new_s / (1 - beta2 ** t)\n new_var = var - alpha * (new_prom_corr / ((new_s_corr ** (0.5)) + epsilon))\n return (new_var, new_s, new_prom)\n \"\"\"", "def test_dynamics_with_tau_ref(self):\n n = 10\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n\n i_values = np.linspace(0.02, 0.4, 28)\n\n different = 0\n for i_ext in i_values:\n # start with different initial voltages to take advantage of averaging\n # effects\n G.v_init = np.linspace(G.vR, G.v_th, n, endpoint=False)\n G.i_ext_init = i_ext\n\n M = simulation.EventMonitor(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n rate = float(len(M.t))/n/t_max*1000.0\n # first source of uncertainty: a spike might not fit before the end of a\n # simulation\n uncertainty1 = 1.0/np.sqrt(n)/t_max*1000.0\n \n expected0 = 0.0\n expected = 0.0\n if G.R*i_ext > G.v_th - G.vR:\n expected0 = 1000.0/(G.tau_m*np.log(G.R*i_ext/(G.vR-G.v_th+G.R*i_ext)))\n expected = expected0/(1 + expected0*G.tau_ref/1000.0)\n\n # second source of uncertainty: spikes might move due to the granularity\n # of the simulation\n uncertainty2 = dt*expected*rate/1000.0\n uncertainty = uncertainty1 + uncertainty2\n\n self.assertLess(np.abs(rate - expected), uncertainty)\n\n if np.abs(expected - expected0) >= uncertainty:\n different += 1\n else:\n self.assertAlmostEqual(rate, 0.0)\n \n # make sure that in most cases the firing rate using the refractory period\n # was significantly different from the case without refractory period\n self.assertGreater(different, len(i_values)*2/3)", "def gamp_gmm_test(nz=200,ny=100,ns=10, snr=30, verbose=False, mse_tol=-17, plt_results=False): \n\n # Compute the dimensions\n if (ns==1):\n zshape = (nz,)\n yshape = (ny,)\n else:\n zshape = (nz,ns)\n yshape = (ny,ns)\n Ashape = (ny,nz)\n\n # GMM parameters\n zmeanc = [0, 0] # mean of each component\n zvarc = [1,0.001] # variance in each component\n pc = [0.1,0.9] # probability of each component\n ncomp= len(zmeanc)\n \n # Generate GMM data\n nztot = np.prod(zshape) \n u = np.random.choice(range(ncomp),p=pc,size=nztot)\n z = np.random.randn(nztot)\n for i in range(nztot):\n j = u[i]\n z[i] = zmeanc[j] + np.sqrt(zvarc[j])*z[i]\n z = np.reshape(z,zshape) \n\n # Create a random transform\n A = np.random.normal(0,np.sqrt(1/nz), Ashape)\n \n # Create output\n y0 = A.dot(z) \n wvar = np.power(10,-0.1*snr)*np.mean(np.abs(y0)**2)\n y = y0 + np.random.normal(0,np.sqrt(wvar),yshape)\n\n # Create a set of estimators, one for each component of the GMM\n est_list = []\n for i in range(ncomp):\n est = vp.estim.GaussEst(zmeanc[i], zvarc[i], zshape)\n est_list.append(est)\n \n # Create the GMM estimator\n est_in = vp.estim.MixEst(est_list, w=pc,name='input')\n \n # Create linear transform\n Aop = vp.trans.MatrixLT(A,zshape)\n\n # Create the output estimator\n est_out = vp.estim.GaussEst(y,wvar,yshape,name='output')\n\n # Create the solver\n solver = vp.solver.Gamp(est_in,est_out,Aop,hist_list=['z0','zvar0'],step=0.95,\\\n nit=50)\n \n # Run the solver\n solver.solve()\n \n # Compute the MSE as a function of the iteration\n z0_hist = solver.hist_dict['z0']\n zvar0_hist = solver.hist_dict['zvar0']\n nit = len(z0_hist)\n zpow = np.mean(np.abs(z)**2)\n mse = np.zeros(nit)\n mse_pred = np.zeros(nit)\n for it in range(nit):\n zerr = np.mean(np.abs(z0_hist[it]-z)**2)\n mse[it] = 10*np.log10(zerr/zpow)\n mse_pred[it] = 10*np.log10(np.mean(zvar0_hist[it])/zpow)\n\n if (plt_results):\n import matplotlib.pyplot as plt\n t = np.arange(nit)\n plt.plot(t,mse,'-o')\n plt.plot(t,mse_pred,'-s')\n plt.legend(['Actual', 'Pred'])\n plt.grid()\n \n if verbose:\n print(\"Final MSE = %f\" % mse[-1]) \n \n # Check final error if test passed\n if mse[-1] > mse_tol:\n raise vp.common.TestException(\"MSE exceeded expected value\")", "def test_basic_calculation(self):\n expected_result = np.array(\n [\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n ],\n dtype=np.float32,\n )\n result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)\n self.assertArrayAlmostEqual(result.data, expected_result)\n self.assertTrue(result.dtype == np.float32)", "def test_predict_mean_variance(self):\n lik = self._standard_likelihood()\n input_mean = Variable(TensorType([0.0]))\n input_variance = Variable(TensorType([1.0]))\n expected_output_mean = input_mean\n expected_output_variance = input_variance + self._expected_likelihood_variance\n\n # API\n output_mean, output_variance = lik.predict_mean_variance(\n input_mean, input_variance\n )\n assert isinstance(output_mean, Variable)\n assert isinstance(output_variance, Variable)\n\n # Value\n assert output_mean.data.numpy() == expected_output_mean.data.numpy()\n assert output_variance.data.numpy() == pytest.approx(\n expected_output_variance.data.numpy()\n )", "def ivrmse(self):\n return (self.model_error_iv()**2).mean()**.5", "def estimates(self):\n return self._est", "def homogenise_survey_measurements(cname, wg, parameter, ensemble_model_samples,\n database):\n\n # Get the data for this object.\n estimates = database.retrieve_table(\n \"\"\" SELECT DISTINCT ON (filename, node_id)\n cname, node_id, snr, {parameter}\n FROM results, nodes\n WHERE nodes.wg = {wg}\n AND nodes.id = results.node_id\n AND cname = '{cname}'\n AND {parameter} <> 'NaN'\n \"\"\".format(wg=wg, cname=cname, parameter=parameter))\n\n assert estimates is not None\n\n # Extract N samples for all the parameters.\n\n # For each sample, calculate:\n # 1. The total variance (systematic**2 + (alpha/SNR)**2)\n # 2. The weighted mean from all observations by that nodes.\n # --> check that this follows 1/sqrt(N)\n # 3. Construct a covariance matrix using the weighted means, uncertainties\n # and the correlation coefficients\n # 4. Draw from a Gaussian using the weighted means and your new Cov matrix\n # 5. Record the draw.\n\n pars = [\n \"var_intrinsic\",\n \"var_sys_estimator\",\n \"alpha_sq\",\n \"rho_estimators\",\n \"c0_estimators\"\n ]\n\n \n samples = ensemble_model_samples.extract(pars=pars)\n\n unique_node_ids = ensemble_model_samples.data[\"node_ids\"]\n K = len(samples[\"var_intrinsic\"])\n\n estimates = estimates.group_by(\"node_id\")\n \n # 1. Calculate the total variance in each measurement.\n var_total = np.zeros((len(estimates), K))\n for j in range(len(estimates)):\n\n # Get the node index.\n k = np.where(estimates[\"node_id\"][j] == unique_node_ids)[0][0]\n\n var_total[j, :] \\\n = samples[\"var_sys_estimator\"][:, k] \\\n + samples[\"alpha_sq\"][:, k]/estimates[\"snr\"][j]\n\n \n # 2. Calculate the weighted mean from each node.\n M = len(set(estimates[\"node_id\"]))\n weighted_node_mu = np.zeros((M, K))\n weighted_node_variance = np.zeros((M, K))\n node_ids = np.zeros(M)\n for i, si in enumerate(estimates.groups.indices[:-1]):\n ei = estimates.groups.indices[i + 1]\n\n mu = (estimates[parameter][si:ei]).reshape(-1, 1) # Biases\n variance = var_total[si:ei]\n\n weights = 1.0/variance\n normalized_weights = weights/np.sum(weights, axis=0)\n\n\n weighted_mu = np.sum(normalized_weights * mu, axis=0)\n weighted_variance = 1.0/np.sum(weights, axis=0)\n\n weighted_node_mu[i, :] = weighted_mu + samples[\"c0_estimators\"][:, i]\n weighted_node_variance[i, :] = weighted_variance\n node_ids[i] = estimates[\"node_id\"][si]\n\n posterior = np.nan * np.ones(K)\n for i in range(K):\n\n Sigma = np.eye(M) * weighted_node_variance[:, i]\n \n a = 0\n for j in range(M):\n for k in range(j + 1, M):\n term = samples[\"rho_estimators\"][i, a] * Sigma[j, j]**0.5 * Sigma[k, k]**0.5\n Sigma[j, k] = term\n Sigma[k, j] = term\n a += 1\n\n W = np.ones((M, 1))\n Cinv = np.linalg.inv(Sigma)\n var_min = 1.0/np.dot(np.dot(W.T, Cinv), W)\n posterior[i] = var_min * np.dot(np.dot(W.T, Cinv), weighted_node_mu[:, i])\n \n return posterior", "def vif_cal(data, y):\n \n x_vars=data.drop([y], axis=1)\n xvar_names=x_vars.columns.tolist()\n x_var_col, vif_list = [], []\n str_gap = max([len(c) for c in xvar_names])+2\n\n # print(\"{:*^20s}\".format(\"VIF Summary\"))\n str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary ')\n star_str = '*'*int(str_len/2)\n str_to_print = ''.join((star_str,' VIF Summary ',star_str))\n print(str_to_print)\n\n for xvar in xvar_names:\n y=xvar \n x=xvar_names.copy()\n x.remove(xvar)\n\n formula = \"{} ~ {} + 1\".format(y, ' + '.join(x))\n rsq=smf.ols(formula, data=x_vars).fit().rsquared \n if rsq==1: vif=np.inf\n else: vif=round(1/(1-rsq),10)\n x_var_col.append(xvar)\n vif_list.append(vif)\n print('vif of {:<{width}} = {:.6}'.format(xvar, vif, width=str_gap))\n\n str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary END ')\n star_str = '*'*int(str_len/2)\n str_to_print = ''.join((star_str,' VIF Summary END ',star_str))\n print(str_to_print)\n\n vif_df = pd.DataFrame({'x_variable': x_var_col, 'vif': vif_list})\n vif_df = vif_df[['x_variable', 'vif']]\n return vif_df", "def test_predict_func(self):\n ve = VariogramEstimator(n_lags=15, normalize=False).fit(self.c, self.v)\n v = ve.variogram\n\n x = np.linspace(0, ve.range_, 100)\n\n assert_array_almost_equal(ve.predict(x), v.transform(x), decimal=6)" ]
[ "0.72795737", "0.66067874", "0.62614894", "0.6069039", "0.60592204", "0.6042628", "0.6035716", "0.60188943", "0.5992968", "0.5962066", "0.591116", "0.58926576", "0.58375585", "0.58221173", "0.58154273", "0.57872385", "0.5785738", "0.5771455", "0.57044655", "0.56976736", "0.566744", "0.5663293", "0.5655009", "0.5639529", "0.5629549", "0.5621699", "0.5614126", "0.55693775", "0.5561022", "0.553009", "0.55255365", "0.5518357", "0.5515679", "0.5488291", "0.5481375", "0.54670894", "0.54552317", "0.5449185", "0.54420066", "0.54385734", "0.5437121", "0.5434361", "0.5431141", "0.54287094", "0.542558", "0.5408825", "0.5402907", "0.5402253", "0.53891283", "0.538885", "0.5388534", "0.5372287", "0.5364768", "0.536379", "0.5361498", "0.5359933", "0.5359684", "0.5359222", "0.53470474", "0.5345626", "0.53436536", "0.53251684", "0.53109807", "0.53047425", "0.5301667", "0.53006953", "0.52955025", "0.52777755", "0.5270142", "0.52625185", "0.5262371", "0.5261673", "0.5259237", "0.5256597", "0.5254395", "0.52505726", "0.5250223", "0.52497184", "0.52490926", "0.5247538", "0.52379346", "0.5231223", "0.523034", "0.5229847", "0.52284616", "0.5228438", "0.5221331", "0.5220596", "0.5210764", "0.5210668", "0.52091753", "0.5207666", "0.51992387", "0.5198708", "0.51974463", "0.5193547", "0.51922", "0.5191246", "0.5190015", "0.51855123" ]
0.6047066
5
Creates a logger of given level and saves logs to a file
def create_logger( project_name: str, level: str = "INFO", log_dir: str = "/tmp/logs", file_name: Optional[str] = None, do_print: bool = True, simple_logging: bool = False, log_to_file: bool = False, rich_logging: bool = False, time_zone: Optional[str] = None, ): import __main__ if file_name is None: try: file_name = ntpath.basename(__main__.__file__).split(".")[0] except: file_name = "logs" logger = logging.getLogger(file_name) logger.handlers.clear() logger.setLevel(getattr(logging, level)) if time_zone: from pytz import timezone, utc def time_formatter(*args): # TODO: Doesnt work with rich formatter utc_dt = utc.localize(datetime.datetime.utcnow()) my_tz = timezone(time_zone) converted = utc_dt.astimezone(my_tz) return converted.timetuple() logging.Formatter.converter = time_formatter if rich_logging: from rich.logging import RichHandler stream_format = f"{project_name}:%(module)s:%(funcName)s: %(message)s" stream_handler = RichHandler(omit_repeated_times=False) else: stream_format = f"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s" stream_handler = logging.StreamHandler() file_formatter = stream_formatter = logging.Formatter( stream_format, "%Y-%m-%d %H:%M:%S" ) if simple_logging: file_formatter = logging.Formatter("%(message)s") stream_formatter = logging.Formatter("%(message)s") if log_to_file: date = datetime.date.today() date = "%s-%s-%s" % (date.day, date.month, date.year) log_file_path = os.path.join(log_dir, "%s-%s.log" % (file_name, date)) create_folder(log_dir) file_handler = logging.FileHandler(log_file_path) file_handler.setFormatter(file_formatter) logger.addHandler(file_handler) if do_print: stream_handler.setFormatter(stream_formatter) logger.addHandler(stream_handler) logger.propagate = False return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logger(level, log_info):\n log_path = getconfig(\"log\", \"LOG_PATH\")\n log_level = getconfig(\"log\", \"LOG_LEVEL\")\n log_enable = getconfig(\"log\", \"LOG_ENABLE\")\n log_fname = getconfig(\"log\", \"LOG_FNAME\")\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_file = os.path.join(log_path, log_fname)\n # base on input string \"DEBUG\",\"ERROR\"... get level number\n lvl = l_type_lst.index(level)\n\n # now, begin to write into log file\n log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n log_pid = os.getpid()\n log_script = sys._getframe().f_back.f_code.co_filename.split('/')[-1]\n log_method = sys._getframe().f_back.f_code.co_name\n log_line = sys._getframe().f_back.f_lineno\n with open(log_file, \"a\") as log:\n if lvl <= int(log_level) and bool(log_enable):\n log.write(\"%s %s %s %s:%s:%s %s\\\n\\n\" % (log_time, log_pid, level, log_script, log_method, log_line, log_info))", "def log_file(level):\n # 设置日志的记录等级\n logging.basicConfig(level=level) # 调试debug级\n\n # 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限\n file_log_handler = RotatingFileHandler(\"logs/log\", maxBytes=1024 * 1024 * 100, backupCount=10)\n\n # 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息\n formatter = logging.Formatter('%(levelname)s %(filename)s:%(lineno)d %(message)s')\n\n # 为刚创建的日志记录器设置日志记录格式\n file_log_handler.setFormatter(formatter)\n\n # 为全局的日志工具对象(flask app使用的)添加日志记录器\n logging.getLogger().addHandler(file_log_handler)", "def _setup_logger(self, level, log_file):\n level = getattr(logging, level.upper())\n logger.setLevel(level)\n formatter = logging.Formatter(\n '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')\n handler = logging.StreamHandler()\n logger.addHandler(handler)\n handler.setFormatter(formatter)\n if not log_file:\n return\n try:\n handler = TimedRotatingFileHandler(log_file)\n except IOError:\n logger.error(\"Could not write to %s, falling back to stdout\",\n log_file)\n else:\n logger.addHandler(handler)\n handler.setFormatter(formatter)", "def _configure_logging(self, path, level):\n logging_format = (\n \"%(asctime)s : %(levelname)s : %(module)s.%(lineno)s : %(message)s\"\n )\n date_format = \"%Y/%m/%d %I:%M:%S %p\"\n\n log_formatter = logging.Formatter(logging_format, date_format)\n file_handler = logging.FileHandler(path, mode=\"w\", encoding=\"UTF-8\")\n file_handler.setFormatter(log_formatter)\n self.logger.addHandler(file_handler)\n self.logger.setLevel(self._logging_levels(level))", "def logToFile(path, level=logging.INFO):\n logger = logging.getLogger()\n if logger.handlers:\n logging.getLogger('ib_insync').setLevel(level)\n else:\n logger.setLevel(level)\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n handler = logging.FileHandler(path)\n handler.setFormatter(formatter)\n logger.addHandler(handler)", "def set_logger(path_cfg, logging_level=None):\n\n config_dic = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"simple\": {\n \"format\": \"[%(levelname)s] %(name)s: %(message)s\"\n }\n },\n\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"simple\",\n \"stream\": \"ext://sys.stdout\"\n },\n\n },\n\n \"loggers\": {\n },\n\n \"root\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"]\n }\n }\n\n if logging_level:\n\n try:\n level = getattr(logging, logging_level.upper())\n except (AttributeError, TypeError):\n logging_level = 'DEBUG'\n level = 'DEBUG'\n finally:\n file_log = os.path.join(path_cfg, 'output', f'{logging_level}.log')\n added_file_handler = {\"added_file_handler\": {\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"level\": level,\n \"formatter\": \"simple\",\n \"filename\": file_log,\n \"encoding\": \"utf8\",\n \"mode\": \"w\"}\n }\n config_dic['handlers'].update(added_file_handler)\n config_dic['root']['handlers'].append('added_file_handler')\n config_dic['root']['level'] = \"DEBUG\"\n\n logging.config.dictConfig(config_dic)", "def set_logs(level=None, file=None, format=None):\r\n\r\n global HANDLER, LOG_LEVEL, LOG_FILE, LOG_FORMAT\r\n LOG_LEVEL = (level or LOG_LEVEL).upper()\r\n LOG_FILE = file or LOG_FILE\r\n LOG_FORMAT = format or LOG_FORMAT\r\n\r\n logging.basicConfig(\r\n level=getattr(logging, LOG_LEVEL),\r\n format=LOG_FORMAT)\r\n\r\n if LOG_FILE:\r\n parent_makedirs(LOG_FILE)\r\n HANDLER = logging.FileHandler(LOG_FILE)\r\n else:\r\n HANDLER = None", "def setup_logger(level):\n\n logger = logging.getLogger('splunk.appserver.insteon.controllers.WoeidLookup')\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\n logger.setLevel(level)\n\n file_handler = logging.handlers.RotatingFileHandler(make_splunkhome_path(['var', 'log', 'splunk', 'insteon_woeid_controller.log']), maxBytes=25000000, backupCount=5)\n\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def _init_file(*, name: str, level: int) -> None:\n\n # Try to make a Logs directory if one does not exist\n try:\n os.mkdir('Logs')\n except OSError:\n pass\n\n logging_instance: logging.Logger = logging.getLogger(name)\n logging_instance.setLevel(level)\n\n file_name: str = name.lower() + '-log'\n\n # Handler\n handler = logging.FileHandler(f'Logs/{file_name}.log')\n handler.setLevel(level)\n\n # Formatter\n formatter: logging.Formatter = logging.Formatter(\n fmt=logging_format,\n datefmt=datefmt\n )\n\n handler.setFormatter(formatter)\n logging_instance.addHandler(handler)\n\n return logging_instance", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def config_logger(log_level):\n try:\n logfile = os.path.expanduser(os.path.join(\"~\", \".parallelcluster\", \"awsbatch-cli.log\"))\n logdir = os.path.dirname(logfile)\n os.makedirs(logdir)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(logdir):\n pass\n else:\n fail(\"Cannot create log file (%s). Failed with exception: %s\" % (logfile, e))\n\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s\")\n\n logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)\n logfile_handler.setFormatter(formatter)\n\n logger = logging.getLogger(\"awsbatch-cli\")\n logger.addHandler(logfile_handler)\n try:\n logger.setLevel(log_level.upper())\n except (TypeError, ValueError) as e:\n fail(\"Error setting log level. Failed with exception: %s\" % e)\n\n return logger", "def log_to_file(file, level=logging.WARNING,\n handler_class=logging.StreamHandler):\n handler = handler_class(file)\n handler.setLevel(level)\n handler.setFormatter(\n utils.AgentFormatter(\n \"%(asctime)s %(composite_name)s %(levelname)s: %(message)s\"\n )\n )\n root = logging.getLogger()\n root.setLevel(level)\n root.addHandler(handler)", "def setup_logger(logger_name, level=\"INFO\", log_file: str = None):\n assert level in LOG_LEVELS\n\n formatter = logging.Formatter('%(message)s')\n if log_file:\n handler = logging.FileHandler(log_file, mode=\"w\")\n else:\n handler = logging.StreamHandler(stdout)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(getattr(logging, level))\n logger.addHandler(handler)\n return logger", "def setup_logger(save_dir, distributed_rank=0, filename=\"log.txt\", mode=\"a\"):\n save_file = os.path.join(save_dir, filename)\n if mode == \"o\" and os.path.exists(save_file):\n os.remove(save_file)\n if distributed_rank > 0:\n logger.remove()\n logger.add(\n save_file, format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\", filter=\"\", level=\"INFO\", enqueue=True\n )\n\n return logger", "def init(filename, level, logname=None):\n\n global logfilename\n\n def openFileHandler(fname):\n mkdir.parents(os.path.dirname(fname), stat.S_IRWXU)\n return XendRotatingFileHandler(fname, mode = 'a',\n maxBytes = MAX_BYTES,\n backupCount = BACKUP_COUNT)\n\n # Rather unintuitively, getLevelName will get the number corresponding to\n # a level name, as well as getting the name corresponding to a level\n # number. setLevel seems to take the number only though, so convert if we\n # are given a string.\n if isinstance(level, types.StringType):\n level = logging.getLevelName(level)\n\n if logname:\n logname.setLevel(level)\n else:\n log.setLevel(level)\n\n try:\n fileHandler = openFileHandler(filename)\n logfilename = filename\n except IOError:\n try:\n logfilename = tempfile.mkstemp(\"-libvirt.log\")[1]\n except IOError:\n print >>sys.stderr, ('libvirt/OnceLogging.py: Unable to open standard or temporary log file for libvirt')\n os._exit(1)\n fileHandler = openFileHandler(logfilename)\n\n fileHandler.setFormatter(logging.Formatter(LOGFILE_FORMAT, DATE_FORMAT))\n if logname:\n logname.addHandler(fileHandler)\n else:\n log.addHandler(fileHandler)\n\n stderrHandler = logging.StreamHandler()\n stderrHandler.setFormatter(logging.Formatter(STDERR_FORMAT,\n DATE_FORMAT))\n if logname:\n logname.addHandler(fileHandler)\n else:\n log.addHandler(fileHandler)", "def custom_logger(log_level, log_name=None):\n\n # Gets the name of the class / method from where this method is called from\n logger_name = inspect.stack()[1][3]\n\n if log_name is None:\n logger = logging.getLogger(logger_name)\n else:\n logger = logging.getLogger(log_name)\n\n logger.setLevel(logging.DEBUG)\n\n if log_name is None:\n file_handler = logging.FileHandler('Automation.log', mode='a')\n else:\n file_handler = logging.FileHandler('{0}.log'.format(log_name))\n\n file_handler.setLevel(log_level)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S.%p')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def my_custom_logger(logger_name, level=logging.INFO):\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n format_string = ('%(asctime)s, %(levelname)s, %(filename)s, %(message)s')\n log_format = logging.Formatter(format_string)\n # Creating and adding the console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_format)\n logger.addHandler(console_handler)\n # Creating and adding the file handler\n file_handler = logging.FileHandler(logger_name, mode='a')\n file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n return logger", "def setup_logger(logger, level):\n\n logger.setLevel({\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }.get(level.lower(), logging.DEBUG))\n logger_handler = logging.StreamHandler(sys.stdout)\n\n if logger.level <= logging.DEBUG:\n fmt = \"%(asctime)s.%(msecs).03d||%(levelname).3s||%(filename)s:%(lineno)d||%(message)s\"\n else:\n fmt = \"[%(asctime)s.%(msecs).03d] %(levelname).4s %(message)s\"\n logger_handler.setFormatter(logging.Formatter(\n fmt=fmt,\n datefmt=\"%Z %Y-%m-%dT%H:%M:%S\")\n )\n logger.addHandler(logger_handler)\n\n # remove previous handlers if present\n for h in list(logger.handlers):\n logger.removeHandler(h)\n logger.addHandler(logger_handler)", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def create_logger(logger_type=1, filename=\"./DMRender.log\",\n console_loglevel=\"INFO\", file_loglevel=\"DEBUG\"):\n if logger_type == 0:\n logger = logging.getLogger('DMlog')\n NullHandler = logging.NullHandler()\n logger.addHandler(NullHandler)\n\n else:\n try:\n numeric_file_loglevel = getattr(logging, file_loglevel.upper())\n numeric_console_loglevel = getattr(\n logging, console_loglevel.upper())\n except AttributeError as e:\n print(\"LoggingError: Invalid logLevel -> {}\".format(e))\n sys.exit(1)\n\n logger = logging.getLogger('DMlog')\n logger.setLevel(logging.DEBUG)\n\n # create console handler which logs to stdout\n if logger_type in [1, 3]:\n consoleLogger = logging.StreamHandler(stream=sys.stdout)\n consoleLogger.setLevel(numeric_console_loglevel)\n if sys.version_info[0] >= 3:\n consoleFormatter = logging.Formatter(\"{name:<5} - {levelname} \\\n - {message}\", style='{')\n else:\n consoleFormatter = logging.Formatter(\"%(name)-5s - \\\n %(levelname)s - %(message)s\")\n consoleLogger.setFormatter(consoleFormatter)\n logger.addHandler(consoleLogger)\n\n # create file handler which logs to a file\n if logger_type in [2, 3]:\n fileLogger = logging.FileHandler(filename, mode='w')\n fileLogger.setLevel(numeric_file_loglevel)\n if sys.version_info[0] >= 3:\n fileFormatter = logging.Formatter(\"{asctime}|{name:<5}|\\\n {levelname:^9} - {message}\", datefmt='%H:%M:%S', style='{')\n else:\n fileFormatter = logging.Formatter(\"%(asctime)s|%(name)-5s|\\\n %(levelname)-9s - %(message)s\", datefmt='%H:%M:%S')\n fileLogger.setFormatter(fileFormatter)\n logger.addHandler(fileLogger)\n\n # Silence the matplotlib logger\n mpl_logger = logging.getLogger(\"matplotlib\")\n mpl_logger.setLevel(logging.WARNING)\n\n return logger", "def run_logging(level, filepath):\n logger = logging.getLogger()\n\n if filepath:\n logfile_path = os.path.join(os.getcwd(), filepath)\n\n if logfile_path.endswith(('log', 'txt')):\n ensure_dir(os.path.dirname(logfile_path))\n else:\n ensure_dir(logfile_path)\n logfile_path = os.path.join(logfile_path, 'debug.log')\n\n handler_logfile = logging.FileHandler(logfile_path, mode='w')\n logfile_level = LEVEL_CONFIG[DEFAULT_LOGFILE_LEVEL]\n handler_logfile.setLevel(logfile_level)\n format_for_logfile = logging.Formatter(\n '%(asctime)s | %(filename)-18.18s | %(levelname)-5.5s | '\n '%(message)s')\n handler_logfile.setFormatter(format_for_logfile)\n logger.addHandler(handler_logfile)\n\n handler_stdout = logging.StreamHandler()\n stdout_level = LEVEL_CONFIG[level.lower()]\n handler_stdout.setLevel(stdout_level)\n format_for_stdout = logging.Formatter('%(message)s')\n handler_stdout.setFormatter(format_for_stdout)\n logger.addHandler(handler_stdout)\n\n logging.getLogger().setLevel(logging.DEBUG)\n logging.debug('Logging works fine')", "def setup_logger(name, log_file, level=logging.DEBUG):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def log(\n message,\n level=None,\n name=None,\n filename=None,\n log_dir=None,\n):\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None and settings.log_file:\n filename = settings.log_filename\n # get the current logger (or create a new one, if none), then log\n # message at requested level\n if settings.log_file or settings.log_console:\n logger = get_logger(name=name, filename=filename, log_dir=log_dir)\n else:\n logger = logging.getLogger(name)\n if level == lg.DEBUG:\n logger.debug(message)\n elif level == lg.INFO:\n logger.info(message)\n elif level == lg.WARNING:\n logger.warning(message)\n elif level == lg.ERROR:\n logger.error(message)\n\n return logger", "def setup_logger(level):\n logger = loguru.logger\n logger.remove()\n\n # Hearth logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Hearth,\n format=LoggerFormats.Hearth\n )\n\n # Stethoscope logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Stethoscope,\n format=LoggerFormats.Stethoscope\n )\n\n return logger", "def set_file_logger(path, log_level=logging.DEBUG, format_string=None, logger_name=\"smc\"):\n if format_string is None:\n format_string = LOG_FORMAT\n\n log = logging.getLogger(logger_name)\n log.setLevel(log_level)\n\n # create file handler and set level\n ch = logging.FileHandler(path)\n ch.setLevel(log_level)\n # create formatter\n formatter = logging.Formatter(format_string)\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n log.addHandler(ch)", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def log_msg(level, msg):\n now = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n level = (level+' '*5)[:5]\n msg = msg.replace('\\r', '').replace('\\n', '|')\n\n line = '[{}][{}]: {}\\n'.format(now, level.upper(), msg)\n with open(CONFIG['reportFile'], 'a') as logfp:\n logfp.write(line)", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(logging_formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def setup_logger(level, name, use_rotating_handler=True):\r\n \r\n logger = logging.getLogger(name)\r\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n logger.setLevel(level)\r\n \r\n log_file_path = os.path.join( os.environ['SPLUNK_HOME'], 'var', 'log', 'splunk', 'radius_auth_rest_handler.log' )\r\n \r\n if use_rotating_handler:\r\n file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000, backupCount=5)\r\n else:\r\n file_handler = logging.FileHandler(log_file_path)\r\n \r\n formatter = logging.Formatter('%(asctime)s %(levelname)s ' + name + ' - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n \r\n logger.addHandler(file_handler)\r\n \r\n return logger", "def start_log_to_file(path, name=\"rt1_filehandler\", level=logging.INFO):\n try:\n # check if file-handler already exists, and if yes stop and remove it\n stop_log_to_file(name=name)\n\n log = setup_logger()\n # get formatting from consolehandler (always present)\n hc = [val for val in log.handlers if val.name == \"rt1_consolehandler\"][0]\n\n # setup a new filehandler\n fh = logging.FileHandler(path, \"a\")\n fh.setFormatter(hc.formatter)\n fh.set_name(name)\n # initialize the file-handler with level 1 to get all infos\n fh.setLevel(level)\n\n log.addHandler(fh)\n\n log.debug(\n f\"log-file for handler {name} added at location\" + f' \"{fh.baseFilename}\"!'\n )\n\n except IndexError as err:\n log.exception(err)", "def setup_logging(\n level,\n console_level,\n file_level,\n):\n global _LOGGING_INITIALIZED\n if _LOGGING_INITIALIZED:\n logging.debug('SetupLogging: logging system already initialized')\n return\n\n program_name = get_program_name()\n logging.addLevelName(LogLevel.DEBUG_VERBOSE, 'DEBUG_VERBOSE')\n logging.addLevelName(LogLevel.ALL, 'ALL')\n\n # Initialize the logging system:\n\n log_formatter = logging.Formatter(\n fmt='%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s',\n )\n\n log_formatter.formatTime = _format_time\n\n logging.root.handlers.clear()\n logging.root.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(console_level)\n logging.root.addHandler(console_handler)\n\n # Initialize log dir:\n tstamp = timestamp()\n pid = os.getpid()\n\n if FLAGS.log_dir is None:\n tmp_dir = os.path.join('/tmp', getpass.getuser(), program_name)\n make_dir(tmp_dir)\n FLAGS.log_dir = tempfile.mkdtemp(\n prefix='%s.%d.' % (tstamp, pid),\n dir=tmp_dir)\n\n # Link current log dir to latest:\n latest_path = os.path.join(tmp_dir, \"latest\")\n remove(latest_path)\n os.symlink(src=os.path.basename(FLAGS.log_dir), dst=latest_path)\n\n logging.info('Using log dir: %s', FLAGS.log_dir)\n make_dir(FLAGS.log_dir)\n\n log_file = os.path.join(FLAGS.log_dir, '%s.%s.%d.log' % (program_name, tstamp, pid))\n\n # Link current log file to latest.log:\n latest_path = os.path.join(FLAGS.log_dir, \"latest.log\")\n remove(latest_path)\n os.symlink(src=log_file, dst=latest_path)\n\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(file_level)\n logging.root.addHandler(file_handler)\n\n from base import log\n log.set_logger(log.Logger(level=log.Level.ALL))\n\n _LOGGING_INITIALIZED = True", "def write_log(self, level, message): \n \n level = level.lower()\n #print(level, message,str(self.logger))\n if level == 'debug':\n self.logger.debug('%s', message)\n elif level == 'error':\n self.logger.error('%s', message)\n elif level == 'critical':\n self.logger.critical('%s', message)\n elif level == 'warning':\n self.logger.warning('%s', message)\n else:\n self.logger.info('%s', message)", "def SetupLogging(level=logging.WARNING, log_file_name=None):\n logging.basicConfig(\n format='%(levelname)-8s %(asctime)-8s %(message)s',\n datefmt='%H:%M:%S',\n level=level,\n **({'filename': log_file_name} if log_file_name else {}))\n logging.Formatter.converter = time.gmtime\n logging.info(time.strftime('%Y.%m.%d %Z', time.gmtime()))", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def create_logger(logging, tool_name, level):\n logger = logging.getLogger(tool_name)\n\n # Create handlers\n handler = logging.StreamHandler()\n handler.setLevel(level)\n\n # Create formatters and add it to handlers\n logformat = logging.Formatter(\n '[%(name)s - %(asctime)s] %(levelname)s: %(message)s')\n handler.setFormatter(logformat)\n\n # Add handlers to the logger\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def create_logger(log_level):\n log_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_TIMESTAMP_FORMAT)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n logger = logging.getLogger('blockip')\n logger.setLevel(log_level)\n logger.addHandler(console_handler)\n return logger", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(\n log_filename, mode=\"a+\", backupCount=3\n )\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def setup_logger(logger_filename, loglevel=\"INFO\", file_loglevel=\"INFO\", name='log'):\n \n # Setup the logger\n \n \n # instanciate the logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Filehandlier\n form_File = logging.Formatter('%(asctime)s - %(module)s - %(funcName)s - %(lineno)s - %(levelname)s - '\n '%(message)s')\n fh = logging.FileHandler(logger_filename)\n #fh.setLevel(logging.DEBUG)\n\n # If SAME, use the same loglevel as VERBOSE for file_loglevel\n if file_loglevel == \"SAME\":\n file_loglevel = loglevel\n\n if not file_loglevel in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"SAME\"]:\n logger.error(\"Error : wrong log level : \", loglevel)\n sys.exit(1)\n if file_loglevel == \"DEBUG\":\n fh.setLevel(logging.DEBUG)\n elif file_loglevel == \"INFO\":\n fh.setLevel(logging.INFO)\n elif file_loglevel == \"WARNING\":\n fh.setLevel(logging.WARNING)\n elif file_loglevel == \"ERROR\":\n fh.setLevel(logging.ERROR)\n else:\n logger.error(\"Error : wrong log level\")\n sys.exit(1)\n fh.setFormatter(form_File)\n\n # ConsoleHandler\n ch = logging.StreamHandler()\n form_Console = logging.Formatter('%(module)s - %(message)s')\n ch.setFormatter(form_Console)\n\n # Get the log level\n if not loglevel in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]:\n logger.error(\"Error : wrong log level : \", loglevel)\n sys.exit(1)\n if loglevel == \"DEBUG\":\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(form_File)\n elif loglevel == \"INFO\":\n ch.setLevel(logging.INFO)\n elif loglevel == \"WARNING\":\n ch.setLevel(logging.WARNING)\n elif loglevel == \"ERROR\":\n ch.setLevel(logging.ERROR)\n else:\n logger.error(\"Error : wrong log level\")\n sys.exit(1)\n\n # Add Handlers\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def setup_logbook(name, extension='.txt', level=logging.INFO, soloDir = True):\n formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d (%(name)s) - %(message)s', datefmt='%d-%m-%y %H:%M:%S')\n date = datetime.today().strftime('%Y-%m-%d')\n if soloDir:\n log_path = str(settings.DATA_DIR + name + '/' + name.replace('_', '') +'_' + date + extension)\n else:\n log_path = str(settings.DATA_DIR + name +'_' + date + extension)\n handler = RotatingFileHandler(log_path, maxBytes=settings.MAX_FILE_SIZE, backupCount=1)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def set(cls, log_level, log_filename, append):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # Log to sys.stderr using log level passed through command line\n if log_level != logging.NOTSET:\n log_handler = logging.StreamHandler(sys.stdout)\n if sys.platform.find('linux') >= 0:\n formatter = ColoredFormatter(cls.COLOR_FORMAT)\n else:\n formatter = ColoredFormatter(cls.NO_COLOR_FORMAT, False)\n log_handler.setFormatter(formatter)\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n\n # Log to rotating file using DEBUG log level\n log_handler = logging.handlers.RotatingFileHandler(log_filename,\n mode='a+',\n backupCount=3)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s '\n '%(message)s')\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n logger.addHandler(log_handler)\n\n if not append:\n # Create a new log file on every new\n # (i.e. not scheduled) invocation\n log_handler.doRollover()", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def set_logger(self, level):\n if level > 4:\n level = 4\n\n levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']\n logger = logging.Logger('json_tool')\n handler = logging.StreamHandler()\n handler.setLevel(levels[level])\n logger.addHandler(handler)\n logger.info('Logger initilized')\n return logger", "def define_logger(logs_path, log_level='INFO'):\n try:\n if not os.path.exists(logs_path):\n os.mkdir(logs_path)\n except OSError:\n # Need this to log into stderr for tracking problems.\n # On Apache, this will be redirect to the ErrorLog.\n print >>sys.stderr, 'Cannot create {0} folder'.format(logs_path)\n print >>sys.stderr, 'Exiting...'\n sys.exit(-1)\n\n logging_dict = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(levelname)s %(asctime)s %(name)s.%(module)s.'\n '%(funcName)s:L%(lineno)d ProcessNo:%(process)d/'\n 'ThreadNo:%(thread)d \"%(message)s\"',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n 'normative': {\n 'format': '%(levelname)s %(asctime)s %(module)s.'\n '%(funcName)s:L%(lineno)d \"%(message)s\"',\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'default': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': sys.stdout\n },\n 'errors': {\n 'level': 'WARNING',\n 'class': 'prometheus.utils.string_helpers.SplitStreamHandler',\n 'formatter': 'normative'\n },\n 'default_file': {\n 'level': 'DEBUG',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': join(logs_path, 'prometheus.log'),\n 'maxBytes': 1024 * 1024 * 5, # 5 MB\n 'backupCount': 5,\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default'],\n 'level': 'INFO',\n 'propagate': True\n },\n 'default': {\n 'handlers': ['default_file', 'errors', 'console'],\n 'level': log_level,\n 'propagate': False,\n },\n }\n }\n\n if log_level == 'DEBUG':\n # make all loggers use the console.\n for logger in logging_dict['loggers']:\n logging_dict['loggers'][logger]['handlers'] = ['console']\n\n return logging_dict", "def _log_to_file(path: str) -> None:\n if path:\n fh = logging.FileHandler(path)\n LOGGER.addHandler(fh)\n LOGGER.setLevel(logging.DEBUG)", "def initialize_logger(filename=None, level=logging.DEBUG, filemode='w'):\n log_format = '%(asctime)s %(levelname)s\\n' + \\\n ' %(filename)s:%(lineno)s: %(name)s %(message)s'\n\n if filename is None:\n handler = logging.StreamHandler()\n else:\n handler = logging.handlers.RotatingFileHandler(\n filename=filename, mode=filemode)\n\n handler.setFormatter(logging.Formatter(log_format))\n logger = logging.getLogger('LOG')\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger, handler", "def create_logger(**kwargs):\n\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n # Create Log Format(s)\n f_format = logging.Formatter('%(asctime)s:%(processName)s:%(name)s:%(levelname)s:%(message)s')\n\n # Create Handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.INFO)\n c_handler.setFormatter(f_format)\n log.addHandler(c_handler)\n\n for filename, level in kwargs.items():\n handler = logging.FileHandler(filename=filename)\n handler.setLevel(level)\n handler.setFormatter(f_format)\n log.addHandler(handler)\n\n return log", "def __CreateLog(self, log_name, log_level=NOTSET, log_handler=FILE,\n stream=sys.stderr):\n logger = logging.getLogger(log_name)\n\n # Update log level to reflect changes. If a higher log level is given\n # the logger should raise it's boundary.\n if log_level < logger.level or logger.level == logging.NOTSET:\n logger.setLevel(log_level)\n\n if (log_name in self.__log_table and\n self.__log_table[log_name] == Logger.FILE_AND_CONSOLE):\n # Don't add any more handlers.\n return\n\n # Create an entry for log name.\n if log_name not in self.__log_table:\n self.__log_table[log_name] = Logger.NONE\n\n if log_handler != Logger.NONE:\n fmt = ('[%(asctime)s::%(levelname)s::' + self.__lib_sig +\n '] %(message)s')\n # Add FILE handler if needed.\n if (log_handler == Logger.FILE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.FILE):\n if not os.path.exists(self.__log_path):\n os.makedirs(self.__log_path)\n fh = logging.FileHandler(os.path.join(self.__log_path,\n '%s.log' % log_name))\n fh.setLevel(log_level)\n fh.setFormatter(logging.Formatter(fmt))\n logger.addHandler(fh)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.FILE\n\n # Add CONSOLE handler if needed.\n if (log_handler == Logger.CONSOLE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.CONSOLE):\n ch = logging.StreamHandler(stream)\n ch.setLevel(log_level)\n ch.setFormatter(logging.Formatter(fmt))\n logger.addHandler(ch)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.CONSOLE", "def init_logging(file_name, log_path=None, level=logging.INFO):\n global logger\n\n try:\n if not log_path:\n log_path = '../logs'\n except KeyError as e:\n raise LoggerException(message=str(e))\n\n log_path = os.path.join(log_path, file_name + \"-\" + datetime.utcnow().strftime(\"%Y%m%d-%H%M%S\") + '.log')\n\n logging.basicConfig(\n filename=log_path,\n format='%(asctime)s - %(levelname)s: %(message)s',\n level=level,\n )", "def __create_logger(who, level):\n global loggers\n global toconsole\n global LEVELS\n global console\n global logfile\n loggers[who] = logging.getLogger(who)\n loggers[who].setLevel(level)\n format = logging.Formatter(\"%(asctime)s - %(name)s - \"\\\n \"%(levelname)s - %(message)s\")\n if (toconsole):\n if (console == None):\n console = logging.StreamHandler()\n console.setFormatter(format)\n loggers[who].addHandler(console)\n else:\n if (logfile == None):\n logfile = logging.handlers.RotatingFileHandler('/var/log/yapc.log',\n maxBytes=10485760,\n backupCount=10)\n logfile.setFormatter(format)\n loggers[who].addHandler(logfile)\n loggers[GENERIC_LOG_NAME].log(LEVELS[\"VDBG\"],\n \"Add logger for \"+who+\" at level \"+str(level))", "def create_logger(log_file=None, file_=True, console=True,\n with_time=False, file_level=2, console_level=2,\n propagate=False, clear_exist_handlers=False, name=None):\n if file_:\n prefix = strftime('%Y%m%d%H%M%S', localtime(time()))\n if log_file is None:\n log_file = os.path.join(os.path.dirname(__file__), prefix)\n elif with_time:\n log_file = os.path.join(os.path.dirname(log_file), prefix + \"_\" + os.path.basename(log_file))\n\n logger = logging.getLogger(name)\n\n if clear_exist_handlers:\n logger.handlers.clear()\n\n logger.setLevel(levels[1])\n logger.propagate = propagate\n\n formatter = MyFormatter(\"(User) %(asctime)s: %(levelname).1s %(message)s\")\n\n if file_:\n # Create file handler\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(levels[file_level])\n file_handler.setFormatter(formatter)\n # Register handler\n logger.addHandler(file_handler)\n\n if console:\n # Create console handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(levels[console_level])\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "def __logtofile(self, log_name):\n logger = logging.getLogger(log_name)\n\n file_path = os.path.join(self.log_file_path, log_name + '.txt')\n\n formatter = logging.Formatter('<%(asctime)s> %(levelname)-8s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S')\n self.file_handlers[logger] = logging.FileHandler(file_path, mode='w')\n self.file_handlers[logger].setFormatter(formatter)\n self.file_handlers[logger].setLevel(logging.DEBUG)\n logger.addHandler(self.file_handlers[logger])\n\n logger.info('SAVING LOGS IN: %s' % file_path)", "def setup_logger(name, log_file, level=logging.INFO):\n if name in ( \"\", None ):\n raise \"No name\"\n return\n\n if log_file in ( \"\", None ):\n raise \"No log_file\"\n return\n\n formatter = logging.Formatter(\n fmt = '%(asctime)s.%(msecs)03d %(levelname)s File: \"%(pathname)s\", line %(lineno)d, in %(module)s - %(funcName)s: %(message)s',\n datefmt= '%Y-%m-%d %H:%M:%S'\n )\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def initLogger(filename, dir_name='data/logs/'):\n \n filename = os.path.join(dir_name, filename)\n \n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n \n if os.path.exists(filename):\n os.remove(filename)\n \n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', \n '%m-%d-%Y %H:%M:%S')\n fh = logging.FileHandler(filename)\n fh.setFormatter(formatter)\n \n logger.addHandler(fh)", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "def create_logger(level=logging.NOTSET):\n _test = os.path.join(os.path.join(os.getcwd(), 'gnupg'), 'test')\n _now = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n _fn = os.path.join(_test, \"%s_test_gnupg.log\" % _now)\n _fmt = \"%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s\"\n\n ## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:\n logging.addLevelName(GNUPG_STATUS_LEVEL, \"GNUPG\")\n logging.Logger.status = status\n\n if level > logging.NOTSET:\n logging.basicConfig(level=level, filename=_fn,\n filemode=\"a\", format=_fmt)\n logging.logThreads = True\n if hasattr(logging,'captureWarnings'):\n logging.captureWarnings(True)\n colouriser = _ansistrm.ColorizingStreamHandler\n colouriser.level_map[9] = (None, 'blue', False)\n colouriser.level_map[10] = (None, 'cyan', False)\n handler = colouriser(sys.stderr)\n handler.setLevel(level)\n\n formatr = logging.Formatter(_fmt)\n handler.setFormatter(formatr)\n else:\n handler = NullHandler()\n\n log = logging.getLogger('gnupg')\n log.addHandler(handler)\n log.setLevel(level)\n log.info(\"Log opened: %s UTC\" % datetime.ctime(datetime.utcnow()))\n return log", "def set_logger(**kwargs):\n # create logger\n if not os.path.exists(kwargs.get('log_dir_path')):\n os.makedirs(kwargs.get('log_dir_path'))\n logger = logging.getLogger(kwargs.get('logger_name'))\n if kwargs.get('log_level').lower() == 'info':\n log_level = 20\n elif kwargs.get('log_level').lower() == 'warning':\n log_level = 30\n elif kwargs.get('log_level').lower() == 'error':\n log_level = 40\n elif kwargs.get('log_level').lower() == 'critical':\n log_level = 50\n else:\n log_level = 10\n logger.setLevel(log_level)\n # Create a file handler\n log_file_path = os.path.join(kwargs.get('log_dir_path'), kwargs.get('log_file_name'))\n handler = logging.FileHandler(log_file_path)\n handler.setLevel(log_level)\n # Create a logging format\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(levelname)s[%(lineno)d] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n # Add the handlers to the logger\n logger.addHandler(handler)\n return logger", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def setup_logger(name=None, level=None):\r\n from .config import Config\r\n\r\n logger = logging.getLogger(name)\r\n logger.handlers = []\r\n level = level or Config[\"logging.level\"].upper() or logging.ERROR\r\n if Config[\"logging.std\"]:\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.std_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n if Config[\"logging.file\"]:\r\n handler = logging.FileHandler(Config[\"logging.file\"])\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.file_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n return logger", "def __init__(self, logging_level, root_dir, logs_path):\n logging.basicConfig(level=logging_level,\n filename=os.path.join(root_dir, logs_path))", "def set_logger(level='info'):\n levels = {\n 'notset': logging.NOTSET,\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARN,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n }\n if level not in levels:\n level = 'info'\n return {'format': '[%(levelname)-s] %(asctime)s %(message)s', 'level': levels.get(level.lower()), 'datefmt': '%Y-%m-%d %H:%M:%S'}", "def initLogger(level=logging.DEBUG, logdir='logfiles'):\n\n #Check if logdir exists. Make if not.\n ################################################################\n if not isdir(logdir): makedirs(logdir)\n\n #Init Logger\n ################################################################\n logging.basicConfig(level=level)\n logfileHandler = TimedRotatingFileHandler(join(logdir, 'NESSILog'),\n when='d')\n logfileFormatter = logging.Formatter(\n '[%(asctime)s] %(filename)s:%(funcName)s - %(message)s')\n logfileHandler.setLevel(level) \n logfileHandler.setFormatter(logfileFormatter)\n\n logging.getLogger('').addHandler(logfileHandler)", "def create_logger(logger_name,\n log_format=None,\n log_level=logging.INFO,\n log_path=None):\n logger = logging.getLogger(logger_name)\n assert (len(logger.handlers) == 0)\n logger.setLevel(log_level)\n if log_path is None:\n handler = logging.StreamHandler()\n else:\n os.stat(os.path.dirname(os.path.abspath(log_path)))\n handler = logging.FileHandler(log_path)\n handler.setLevel(log_level)\n if log_format is not None:\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(name, level='INFO', terminal_log=True, file_log=False,\n file_name=None, file_max_bytes=1048576, file_backup_count=3,\n email_on_warnings=True, email_on_errors=True):\n # Get the root logger and set the level\n log_level = getattr(logging, level.upper())\n root_logger = logging.getLogger('')\n root_logger.setLevel(log_level)\n\n handlers = []\n # Form the handler(s) and set the level\n if terminal_log:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n # Create email warning handler\n if email_on_warnings:\n # Note, the placeholder in the subject will be replaced by the hostname\n warning_email_handler = CustomSMTPWarningHandler(\n mailhost=MAIL_HOST, fromaddr=WARNING_EMAIL,\n toaddrs=[WARNING_EMAIL], subject='Warning from: {}')\n warning_email_handler.setLevel(logging.WARNING)\n handlers.append(warning_email_handler)\n\n # Create email error handler\n if email_on_errors:\n # Note, the placeholder in the subject will be replaced by the hostname\n error_email_handler = CustomSMTPHandler(\n mailhost=MAIL_HOST, fromaddr=ERROR_EMAIL,\n toaddrs=[ERROR_EMAIL], subject='Error from: {}')\n error_email_handler.setLevel(logging.ERROR)\n handlers.append(error_email_handler)\n\n # Create rotating file handler\n if file_log:\n if file_name is None:\n file_name = name + '.log'\n file_handler = RotatingFileHandler(file_name, maxBytes=file_max_bytes,\n backupCount=file_backup_count)\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n # Add formatters to the handlers and add the handlers to the root_logger\n formatter = logging.Formatter(\n '%(asctime)s:%(name)s: %(levelname)s: %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Create a named logger and return it\n logger = logging.getLogger(name)\n return logger", "def log(level=EVENT_LEVELS.Info, usr=None, msg=''):\n level = level if level in EVENT_LEVELS else EVENT_LEVELS.Info\n usr = None if usr.is_anonymous else usr\n\n if level in log_levels:\n print(f\"{level} Log: {usr} - {msg}\")\n EventLog.objects.create(\n user=usr,\n level=level,\n message=msg\n )", "def createMovieLogger(self, level, message):\n logger = logging.getLogger(__name__ + \".MovieGenerator\")\n method = getattr(logger, level, None)\n if method is not None:\n method(message)", "def create_logger(args, save_dir, fname=None):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n if fname is None:\n fname = 'stdout.log'\n hdlr = logging.FileHandler(os.path.join(save_dir, fname))\n hdlr.setLevel(logging.INFO)\n msg_format = '%(asctime)s [%(levelname)s] %(message)s'\n formatter = logging.Formatter(msg_format)\n ch.setFormatter(formatter)\n hdlr.setFormatter(formatter)\n root.addHandler(ch)\n root.addHandler(hdlr)\n logging.info(sys.version_info)\n logging.info(args)\n\n return logging", "def getLogger(log_file, level=logging.INFO):\n name = \"new_logger\"\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n stream = logging.StreamHandler()\n stream.setFormatter(formatter)\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n logger.addHandler(stream)\n return logger", "def set_log(self, level=logging.INFO, file=\"\"):\n\n self._logging_level = level\n\n if file:\n self._log_file = file", "def init(config_file: str, level: str):\n logger = logging.getLogger()\n formatter = logging.Formatter(\"%(pathname)s : %(asctime)s : %(levelname)s : Function(%(funcName)s) \\t %(message)s\")\n file_handler = logging.FileHandler(config_file)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n logger.handlers = [file_handler]", "def logger(level, format='%(levelname)s %(message)s'):\n\n # Remove previous handlers\n root = logging.getLogger()\n if root.handlers:\n for handler in root.handlers:\n root.removeHandler(handler)\n\n # Create logger\n logger = logging.getLogger()\n logger.setLevel(getattr(logging, level.upper()))\n\n # Create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(getattr(logging, level.upper()))\n\n # Create formatter\n formatter = Formatter(format)\n\n # Add formatter to ch\n ch.setFormatter(formatter)\n\n # Add console handler to logger\n logger.addHandler(ch)\n\n return logger", "def get_logger(log_file, lvl=DEBUG_LEVEL_NUM):\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(log_file[:-4])\n logger.setLevel(lvl)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(log_file: str = DEFAULT_LOG_FILE, level: t.Optional[int] = None):\n # create logger with 'spam_application'\n logger = logging.getLogger()\n root_log_level = level if (level is not None) else logging.DEBUG\n logger.setLevel(root_log_level)\n # create file handler which logs even debug messages\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(root_log_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)", "def get_logger(level):\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"[%(name)s|%(asctime)s] %(message)s\")\n ch.setFormatter(formatter)\n\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level)\n logger.addHandler(ch)\n return logger", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def configLogger(file, size, num, syslog, level):\n\n print \"Setting logging level to \" + level\n levels = {\n \"CRITICAL\": logging.CRITICAL,\n \"ERROR\" : logging.ERROR,\n \"WARNING\" : logging.WARNING,\n \"INFO\" : logging.INFO,\n \"DEBUG\" : logging.DEBUG,\n \"NOTSET\" : logging.NOTSET\n }\n logger.setLevel(levels.get(level, logging.NOTSET))\n\n if syslog != \"YES\":\n print \"Configuring logger: file = \" + file + \" size = \" + str(size) + \" num = \" + str(num)\n fh = logging.handlers.RotatingFileHandler(file, mode='a', maxBytes=size, backupCount=num)\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n elif syslog == \"YES\":\n print \"Configuring syslogging\"\n sh = logging.handlers.SysLogHandler('/dev/log', facility=logging.handlers.SysLogHandler.LOG_SYSLOG)\n sh.encodePriority(sh.LOG_SYSLOG, sh.LOG_INFO)\n slFormatter = logging.Formatter('[sensorReporter] %(levelname)s - %(message)s')\n sh.setFormatter(slFormatter)\n logger.addHandler(sh)\n logger.info(\"---------------Started\")", "def init_log(path):\n file = open(path, 'w+')\n file.close()", "def set_logger(\n filename: str,\n mode: str = \"a\",\n level: int = logging.DEBUG,\n maxbytes: int = 1024 * 1024 * 10, # default: 10Mbyte\n backupcnt: int = 100,\n) -> None:\n logger = logging.getLogger(\"model_compression\")\n logger.setLevel(level)\n\n chdlr = logging.StreamHandler(sys.stdout)\n chdlr.setLevel(logging.DEBUG)\n cfmts = \"%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s - %(message)s\"\n chdlr.setFormatter(logging.Formatter(cfmts))\n logger.addHandler(chdlr)\n\n fhdlr = logging.handlers.RotatingFileHandler(\n filename, mode=mode, maxBytes=maxbytes, backupCount=backupcnt\n )\n fhdlr.setLevel(logging.DEBUG)\n ffmts = \"%(asctime)s - \"\n ffmts += \"%(processName)s - %(threadName)s - \"\n ffmts += \"%(filename)s:%(lineno)d - %(levelname)s - %(message)s\"\n fhdlr.setFormatter(logging.Formatter(ffmts))\n logger.addHandler(fhdlr)", "def init_log_file(folder_path, suffix=None, log_level=logging.INFO):\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n log_format = '[%(levelname)s]: %(asctime)s %(message)s'\n\n if suffix is not None:\n file_name = timestamp + '_' + suffix\n else:\n file_name = timestamp\n\n file_path = os.path.join(folder_path, file_name)\n logging.basicConfig(filename=file_path, level=log_level, format=log_format)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n return file_path", "def set_logger_config(level):\n # Get the root logger\n logger_obj = logging.getLogger(__name__)\n # Have to set the root logger level, it defaults to logging.WARNING\n logger_obj.setLevel(level)\n # route INFO and DEBUG logging to stdout from stderr\n logging_handler_out = logging.StreamHandler(sys.stdout)\n logging_handler_out.setLevel(logging.DEBUG)\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(threadName)s - %(levelname)s - %(message)s')\n logging_handler_out.setFormatter(formatter)\n logger_obj.addHandler(logging_handler_out)\n\n logging_handler_err = logging.StreamHandler(sys.stderr)\n logging_handler_err.setLevel(logging.WARNING)\n logger_obj.addHandler(logging_handler_err)\n return logger_obj", "def write_level(self,level):\n self.last_level = level\n self.last_level_date = datetime.now(tzlocal()).strftime(\"%Y-%m-%d %H:%M:%S\")\n self.write_config()", "def setup_logger(logger: logging.Logger, file_name: str):\n log_fmt = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\n # Console Handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_fmt)\n # File Handler\n fh = RotatingFileHandler(\n filename=f'log/{file_name}.log',\n maxBytes=int(1e6), backupCount=3,\n encoding='utf-8', mode='a'\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(log_fmt)\n logger.addHandler(fh)\n logger.addHandler(ch)", "def get_logger(log_path=None, stream_lvl=0, file_lvl=logging.INFO):\n # pylint: disable=line-too-long\n assert stream_lvl in (logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)\n # pylint: disable=line-too-long\n assert file_lvl in (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)\n assert isinstance(log_path, str) or isinstance(log_path, unicode)\n assert os.path.isabs(log_path)\n assert os.path.isdir(os.path.dirname(log_path))\n\n # Determine the most verbose log level so both file and stream messages\n # can be accepted by the logging object.\n # Cannot simply take min() of the params because a value of zero means \"don't log\"\n if stream_lvl:\n base_lvl = min(stream_lvl, file_lvl)\n else:\n base_lvl = file_lvl\n\n log = logging.getLogger(name=log_path)\n if log.handlers:\n # Calling for the same log multiple times would set multiple handlers\n # If you have 2 duplicate file handers, you write twice to the log file\n return log\n log.setLevel(base_lvl)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(file_lvl)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(stream_lvl)\n\n log.addHandler(file_handler)\n log.addHandler(stream_handler)\n return log", "def set_logging(log_file, log_level=logging.DEBUG):\n log_format = '[%(asctime)s] - [%(name)s] - [%(levelname)s] - %(message)s'\n logging.basicConfig(level=log_level, format=log_format, filename=log_file)\n return logging.getLogger()", "def setup_logger(filename, write_file=True):\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n _log = logging.getLogger()\n _log.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(message)s')\n formatter_console = logging.Formatter('%(levelname)s - %(message)s')\n\n if write_file:\n fh = logging.FileHandler(filename, mode='a')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n _log.addHandler(fh)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter_console)\n _log.addHandler(ch)\n\n return _log", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def add_logger_filehandler(logger=set_default_logger(), logger_level=logging.INFO, filename='default.log', format=None):\n if format is None:\n _format = logging.Formatter(u\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n else:\n _format = logging.Formatter(format)\n try:\n fh = logging.FileHandler(filename)\n fh.setLevel(logger_level)\n fh.setFormatter(_format)\n logger.addHandler(fh)\n except Exception as e:\n logger.error(\"Failed to set %s as log file handler. Error: %s\" % (filename, e))\n finally:\n return logger", "def _set_logger(self, level):\n log.basicConfig(\n format='[%(asctime)s %(levelname)s]: %(message)s',\n level=log._nameToLevel.get(level.upper(), log.DEBUG))", "def log_to_file(text, status='INFO'):\n outfile = open(LogName, 'a')\n outfile.write(timestamp()+' - '+status+' - '+str(text)+'\\n')\n outfile.close()", "def __setup_logger(name, log_file, level=logging.WARNING, stream=True):\n log_format = logging.Formatter(\"%(asctime)s%(filename)s:%(lineno)-3d %(levelname)s %(message)s\")\n handler = logging.FileHandler(log_file)\n handler.setFormatter(log_format)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n if stream is True:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_format)\n logger.addHandler(stream_handler)\n return logger", "def launch_log(level, message, attachment=None):\n logger.write(message, level, attachment=attachment, launch_log=True)", "def file_logger(path: Union[str, Path], level: str = \"DEBUG\", overwrite: bool = True):\n path = Path(path)\n if overwrite and path.is_file():\n os.remove(path)\n logger_id = logger.add(path, level=level)\n try:\n yield None\n finally:\n logger.remove(logger_id)", "def init_log(log_path,\r\n name=None,\r\n level=logging.INFO,\r\n when=\"D\",\r\n backup=7,\r\n format=\"%(name)s:%(levelname)s:%(asctime)s:%(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(format, datefmt)\r\n logger = logging.getLogger(name)\r\n logger.setLevel(level)\r\n\r\n dir = os.path.dirname(log_path)\r\n if not os.path.isdir(dir):\r\n os.makedirs(dir)\r\n\r\n # 输出info以上的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 只输出warning的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log.wf\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 标准输出流\r\n stdout_handler = logging.StreamHandler(stream=sys.stdout)\r\n stdout_handler.setLevel(level)\r\n stdout_handler.setFormatter(formatter)\r\n logger.addHandler(stdout_handler)\r\n\r\n return logger" ]
[ "0.8126159", "0.76459956", "0.684709", "0.68146396", "0.6813477", "0.6811436", "0.6630617", "0.6629015", "0.6616148", "0.65982366", "0.65957934", "0.65776724", "0.6575906", "0.657348", "0.6556677", "0.6537464", "0.6526564", "0.65043306", "0.64900917", "0.6485572", "0.64745295", "0.6462916", "0.64584225", "0.6456113", "0.6452447", "0.6442021", "0.64370537", "0.6435097", "0.6435097", "0.6435097", "0.6435097", "0.6431839", "0.64296365", "0.64113086", "0.6404221", "0.64039797", "0.63946545", "0.6388158", "0.6380182", "0.63677263", "0.636211", "0.63555914", "0.63285387", "0.63278747", "0.6321267", "0.6311886", "0.6305629", "0.62915254", "0.6277281", "0.62769294", "0.6271242", "0.62252456", "0.6221997", "0.62201357", "0.62092483", "0.62059295", "0.6205589", "0.6183431", "0.61719346", "0.61528397", "0.61378044", "0.61376536", "0.6127768", "0.612496", "0.6123913", "0.61047894", "0.6092755", "0.6071876", "0.6040878", "0.60199195", "0.6018479", "0.60153604", "0.60127664", "0.60023916", "0.59975344", "0.5994267", "0.59865683", "0.5971682", "0.5971317", "0.5968354", "0.59652853", "0.5963712", "0.5952983", "0.59525716", "0.59435654", "0.5935027", "0.5929497", "0.59281194", "0.59268504", "0.59226936", "0.59186745", "0.5904058", "0.58846414", "0.5880206", "0.5878531", "0.5874211", "0.5869388", "0.58644474", "0.58640873", "0.5852453" ]
0.63525015
42
Initialize the cell to only point to itself. header is the column header for the cell
def __init__(self, header, name): self.up = self self.down = self self.left = self self.right = self self.header = header self.name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.reset_row()", "def __init__(cell, value):\n\t\tcell.value = value\n\t\tcell.parent = None\n\t\tcell.visited = False", "def __init__(self, numcols):\n # Start by making a root cell\n # This isn't part of the matrix, but it gives an entry point to the matrix\n # root.right is the first column header, root.left is the last\n # root.up and root.down just wrap around to itself\n root = Column(\"root\")\n self.root = root\n self.numcols = numcols\n self.numrows = 0\n # Now make all of the column headers\n for col in range(numcols):\n c = Column(\"header-\" + str(col))\n # Insert this column to the right side of the matrix\n root.left.right = c\n c.left = root.left\n c.right = root\n root.left = c", "def __init__(self, cells):\n self.cells = cells\n # Used by MakeTableData for layout.\n self.idx = None\n self.group = None\n self.rows_in_group = None\n self.starred = None", "def __init__(self, caption, tag, top_header, left_header):\n super(LatexChart, self).__init__(caption, tag)\n self._top_header = top_header\n self._num_cols = len(self._top_header)\n self._left_header = left_header\n self._num_rows = len(self._left_header)\n self._cells = {}\n for top_elt in self._top_header:\n self._cells[top_elt] = {}\n for left_elt in self._left_header:\n self._cells[top_elt][left_elt] = \" \"", "def set_cell_by_index(self, column_index, cell):\n while len(self) <= column_index:\n self.append(None)\n self[column_index] = cell", "def set_cell(frame, data):\n\twith data.cell_:\n\t\tdata.cell_[:,0] = [L, 0., 0.]\n\t\tdata.cell_[:,1] = [0., L, 0.]\n\t\tdata.cell_[:,2] = [0., 0., L]\n\t\t#cell origin\n\t\tdata.cell_[:,3] = [0, 0 , 0]\n\t\t#set periodic boundary conditions\n\t\tdata.cell_.pbc = (True, True, True)", "def __init__(self, caption, tag, header, flip=False):\n super(LatexTable, self).__init__(caption, tag)\n self._header = header\n self._content = []\n self._flip = flip", "def __init__(self, x, y, type, id):\n Cell.__init__(self, x, y, type, id, \"S\")", "def __init__(\n self, rows, columns, header_row=False, header_column=False, style=''\n ):\n self.rows = rows\n self.columns = columns\n self.header_row = header_row\n self.header_column = header_column\n self._id = _util.get_locally_unique_id()\n self._style = style\n super(Grid, self).__init__()", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def __init__(self):\n self.title = None\n self.table = pd.DataFrame()\n self.column_widths = None;", "def __init__(self, grid, column_id, column_span):\n self._column_id = column_id\n self._row_index = 0\n self._column_span = column_span\n\n self._grid = grid\n self._items = []\n\n self._create_title_label()\n\n self._connected_items = []\n self._last_item = None", "def __init__(self, width = 7, height = 7):\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def __init__(self):\n self._grid = [[None]]", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def __init__(self, value = None):\n self.column_number = -1\n self.value = value\n self.next_entry = None", "def __init__(self, cell_contents: Tuple[CellContent, ...] = cn.DEFAULT_CELL_CONTENTS, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.cell_contents = cell_contents\n for cell in self.cell_contents:\n cell.signal_ids = tuple(self.add_new_signal(cell.row, cell.col) for _ in range(cell.count))\n\n for i in range(cell.count):\n self.set_signal_color(cell.signal_ids[i], cn.COLORS.DEFAULT_SIGNAL_COLORS[i])", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def __init__(self, grid, column_id, column_span=1):\n super(GraphVisualizerConnectedcolumn, self).__init__(\n grid, column_id, column_span\n )\n\n for i in range(column_span):\n self._grid.setColumnStretch(self._column_id + i, 1)", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def __init__(self, cell_type, explicit_values,\n derived_values=None, non_column_labels=None, align='',\n sort_values=True):\n self.type = cell_type\n self.align = align\n self.col_index = 0 # Is set afterward\n self.values = []\n if non_column_labels:\n self.non_column_labels = [\n template_helpers.EZTItem(value=v, is_derived=ezt.boolean(d))\n for v, d in non_column_labels]\n else:\n self.non_column_labels = []\n\n for v in (sorted(explicit_values) if sort_values else explicit_values):\n self.values.append(CellItem(v))\n\n if derived_values:\n for v in (sorted(derived_values) if sort_values else derived_values):\n self.values.append(CellItem(v, is_derived=True))", "def __init__(self, row = 0, col = 0):\n self.row = row\n self.col = col\n self.now = {}\n self.next = {}\n self.init_status([])", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def __init__(self, title=\"\", scaling=\"\", column_list=None):\n if column_list is None:\n column_list = []\n\n self.__title = title\n self.__column_list = column_list\n self.__scaling = scaling\n if self.__title:\n self.__nonzero = True\n else:\n self.__nonzero = False\n # Store a reference to the parent table\n self.__parent_table = None", "def __init__(self, row = 0, col = 0):\n self.row = row\n self.col = col", "def __init__(self):\n self.row_number = -1\n self.next_row = None\n self.row_sentinel = SparseMatrix.MatrixEntry()", "def cell_setLeaf(self, curr):\r\n curr.n_count = 0\r\n return", "def initialize(self, col):\n\t\treturn []", "def __init__(self, header):\n self.database = pd.DataFrame(columns=header)\n self.database[\"date\"] = self.database[\"date\"].astype('datetime64[ns]')", "def __init__(self, master, x, y, size):\n self.master = master\n self.abs = x\n self.ord = y\n self.size= size\n self.fill= Cell.EMPTY_COLOR_BG\n self.active=False", "def constructCell():\n\t\tself.weightGenerate()", "def initialize_position(self):\n self.x = self.cell_xl + self.cell_dx * np.random.rand(1)[0]", "def cell(self):\n return self._cell", "def addemptycolumn(self, colname, coltype):\n setattr(self,colname,N.zeros((len(self),),coltype))\n self._modflag=True\n self._type[colname]=coltype\n\n #Looks strange here because we count columns from 1 but\n #Python counts them from 0\n self._ncolumns+=1\n self._d[colname]=self._ncolumns\n self._colnames.append(colname)\n self._header+='# %d %s\\n'%(self._ncolumns,colname)", "def __init__(self):\n self.index = 0", "def init_blank(self, T):\n self.headings = []\n self.table = {}\n self.rowcount = 0\n for e in T.entries:\n self.headings.append(e.name)\n self.table[e.name] = []", "def set_empty(self, row, col):\n self._cells[row][col] = EMPTY", "def initialize(self):\r\n for cell in self.free_cell_list:\r\n cell.unlock()\r\n self.add_cell(cell)\r\n self.free_cell_list.clear()", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def add_cell(self,**kwargs):\n i=None\n if '_index' in kwargs:\n i=kwargs.pop('_index')\n if i==len(self.cells): # had been self.edges, seems wrong\n # this is the index we'd get anyway.\n i=None\n else:\n assert len(self.cells)>i\n assert self.cells[i]['deleted']\n\n if i is None:\n c=np.zeros( (),dtype=self.cell_dtype)\n self.cells=array_append(self.cells,c)\n i=len(self.cells)-1\n else:\n pass\n\n # default values for native fields\n self.cells['_center'][i]=np.nan\n self.cells['_area'][i]=np.nan\n self.cells['edges'][i]=self.UNKNOWN \n\n for k,v in six.iteritems(kwargs):\n if k in ['edges','nodes']: # may have to make this more generic..\n self.cells[k][i][:len(v)] = v\n self.cells[k][i][len(v):] = self.UNDEFINED # -1\n else:\n self.cells[k][i]=v\n\n # Avoids issue with bogus value of 'deleted' coming in with kwargs\n self.cells['deleted'][i]=False\n\n if self._node_to_cells is not None:\n for n in self.cell_to_nodes(i):\n self._node_to_cells[n].append(i)\n\n if self._cell_center_index is not None:\n if self.cell_center_index_point=='circumcenter':\n cc=self.cells_center()[i]\n else: # centroid\n cc=self.cells_centroid([i])[0]\n self._cell_center_index.insert(i,cc[self.xxyy])\n\n # updated 2016-08-25 - not positive here.\n # This whole chunk needs testing.\n # maybe some confusion over when edges has to be set\n edges=self.cell_to_edges(i)\n \n if 'edges' not in kwargs:\n # wait - is this circular??\n self.cells['edges'][i,:len(edges)]=edges\n self.cells['edges'][i,len(edges):]=self.UNDEFINED\n\n nodes=self.cell_to_nodes(i)\n \n for side in range(len(edges)):\n j=edges[side]\n n1=nodes[side]\n n2=nodes[ (side+1)%len(nodes) ]\n \n if ( (n1==self.edges['nodes'][j,0]) and\n (n2==self.edges['nodes'][j,1]) ):\n # this cell is on the 'left' side of the edge\n assert self.edges['cells'][j,0]<0\n # TODO: probably this ought to be using modify_edge\n self.edges['cells'][j,0]=i\n elif ( (n1==self.edges['nodes'][j,1]) and\n (n2==self.edges['nodes'][j,0]) ):\n # this cell is on the 'right' side of the edge\n assert self.edges['cells'][j,1]<0\n # TODO: probably this ought to be using modify_edge\n self.edges['cells'][j,1]=i\n else:\n assert False\n \n self.push_op(self.unadd_cell,i)\n\n return i", "def __init__(self, grid, column_id, column_span=1):\n super(GraphVisualizerTimeline, self).__init__(\n grid, column_id, column_span)\n for i in range(column_span):\n self._grid.setColumnStretch(self._column_id + i, 0)", "def __init__(self_, definition: NotebookMarkdownCellDefinition, **kwargs):\n super().__init__(kwargs)\n\n self_.definition = definition", "def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)", "def init_matrix_cell(value):\n return {\n 'value': value,\n 'accumulated_value': None,\n 'parent': None\n }", "def __init__(self, columns):\n self.columns = columns\n self.rows = []", "def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]", "def __init__(self):\n\n self._dict = OrderedDict(zip(const.BFHCOLS, [0] * 111))", "def __init__(self,data,chead=None,rhead=None,label=None,edit=True,parent=None):\n QtGui.QTableView.__init__(self,parent)\n if isinstance(data,ndarray):\n self.tm = ArrayModel(data,chead,rhead,edit=edit)\n else:\n self.tm = TableModel(data,chead,rhead,edit=edit)\n self.setModel(self.tm)\n self.horizontalHeader().setVisible(chead is not None)\n self.verticalHeader().setVisible(rhead is not None)\n self.resizeColumnsToContents()\n self.setCornerButtonEnabled", "def __init__(self, init_grid=None):\n\n self.height = len(init_grid)\n self.width = len(init_grid[0])\n\n self.grid = [[Cell(self, c) for c in row]\n for row in init_grid]\n\n self.g = nx.Graph()\n self.tangle()", "def __init__(self, data):\n\n self.__data = np.array(data, dtype=object)\n\n # Get number of rows / columns\n self.__nrows, self.__ncols = self.__data.shape\n\n # Construct the cells\n grid = []\n for i in range(self.__nrows):\n row = []\n for j in range(self.__ncols):\n dcol = self.__data[i, j]\n if dcol is None:\n row.append(cell(i, j, black=True))\n elif dcol == 0:\n row.append(cell(i, j))\n else:\n bot, rig = dcol\n if bot is not None:\n cs = []\n for ii in range(i + 1, self.__nrows):\n if self.__data[ii, j] != 0:\n break\n cs.append((ii, j))\n bot = (bot, tuple(cs))\n if rig is not None:\n cs = []\n for jj in range(j + 1, self.__ncols):\n if self.__data[i, jj] != 0:\n break\n cs.append((i, jj))\n rig = (rig, tuple(cs))\n row.append(cell(i, j, bottom=bot, right=rig))\n grid.append(row)\n self.__tuple = tuple(tuple(row) for row in grid)", "def setUp(self):\n self.cell = Cell()\n self.livingCell = Cell(True)\n self.deadCell = Cell(False)", "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def __init__(self,cell1,site1,cell2,site2,color=None,label=None,linewidth=None,linetype=None):\n self.cell1 = np.array(cell1)\n self.site1 = site1\n self.cell2 = np.array(cell2)\n self.site2 = site2\n coordinate1 = None\n coordinate2 = None\n self.color = color\n self.label = label\n self.linewidth = linewidth\n self.linetype = linetype", "def __init__(self, title=None, comment=None, ase_atoms=None,\n origin=np.array([0.0, 0.0, 0.0]), cell=None, cell_n=None, data=None):\n self.title = title\n self.comment = comment\n self.ase_atoms = ase_atoms\n self.origin = origin\n self.cell = cell\n self.data = data\n if data is not None:\n self.cell_n = data.shape\n else:\n self.cell_n = None", "def emptyCell (self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n self.addObject(emptyValue, row, column, gameGrid=gameGrid)", "def cell_type(self, row, col):\n if col == 0: return 'heading'\n else: return 'data'", "def __init__(self, row, column):\n self.__row = row\n self.__column = column\n self.living = False\n self.__neighbors = []\n self.__style = 'default'", "def cells(self, cells):\n\n self.container['cells'] = cells", "def cell(self, cell_id):\r\n return Cell(self, cell_id)", "def visit_table(self, node: docutils.nodes.reference) -> None:\n self.entry = {}\n self.header = {}", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def __pos__(self):\n ret = copy.deepcopy(self)\n for row in ret:\n if __debug__:\n assert hasattr(row, '__iter__'), repr(row) + \" | \" + repr(ret)\n assert len(row) <= len(ret.header), 'header needs to be larger or equal to all! ({},{})'.\\\n format(row, ret.header)\n for i in range(len(ret.header) - len(row)):\n row.append(None)\n return ret", "def init_table(row_num):\n # Initialize the number of rows in table\n table = []\n for i in range(row_num):\n row = []\n table.append(row)\n\n # Append the default first cell to the table\n table[0].append(\"Curreny Type\")\n\n return table", "def __init__(self, name):\n super(Column, self).__init__(self, name)\n self.sum = 0", "def __init__(self, name=None, compile_paths=combined_path):\n Hay2011Cell.__init__(self, name=name, compile_paths=compile_paths)", "def __init__(self, append_first_body_row=False):\n self.append_body = append_first_body_row", "def __init__(self, dim, connections_per_row=3):\n _table.STable_swiginit(self, _table.new_STable(dim, connections_per_row))", "def __init__(self, header=None):\r\n\r\n self.data = []", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def __init__(self, *args, **kwargs):\n\n super(AxialPinCell, self).__init__(*args, **kwargs)\n self.axials = []\n self.pincells = []\n self.finalized = False\n self.outermost = None", "def init_grid(self):\n self.headlabel.collection = self.books\n self.headlabel.set_label_text()\n self.warnlabel.set_label_text('Welcome to the Reading Tracker 2.0!')\n self.building_grid(None, 'Author')", "def __init__(self, columns, rows, floor_char, wall_char):\n\t\tsuper().__init__(columns, rows, None)\n\t\tself.floor_char = floor_char\n\t\tself.wall_char = wall_char\n\t\tself.create()", "def reset_row(self):\n\n self.line = [0] * LINE_LENGTH\n self.current_index = 0", "def parseColHeader(self, i, j) :\n cell_content = self.processString(self.source_cell.value)\n if self.isEmpty(i,j):\n if self.insideMergeBox(i,j):\n k, l = self.getMergeBoxCoord(i,j)\n \n # If we are in a vertical merge box, skip adding the dimension\n if l == j:\n return\n\n # Update cell content \n cell_content = self.processString(self.r_sheet.cell(k,l).value)\n else:\n return\n\n # Add the value qname to the column_dimensions list for that column\n self.column_dimensions.setdefault(j,[self.sheet_qname]).append(cell_content)\n \n # Add the data to the graph\n resource = self.getColHeaderValueURI(self.column_dimensions[j])\n self.graph.add((resource, RDF.type, self.namespaces['tablink']['ColumnHeader']))\n self.graph.add((resource, self.namespaces['skos']['prefLabel'], Literal(cell_content)))\n self.graph.add((resource, self.namespaces['tablink']['cell'], Literal(self.source_cell_name)))\n return", "def reset(self):\n self._tkdct = {}\n self._sortdct = {}\n self.adjust_row_number(0)\n if not self.has_header_row():\n self.add_header_row()\n kattrdct = {'class': \"w3-tag w3-blue\"}\n for colnum, txt in [(AddScanList._RFID_COL, \"RFID label\"),\n (AddScanList._ACTIV_COL, \"Selected?\")]:\n kcell = self.getheader(colnum)\n if kcell is not None:\n html.label(kcell, \"\", kattrdct, txt, None)", "def __init__(self, grid):\n self.grid = grid\n (self.H, self.W) = self.grid.shape\n\n # Store the empty cells to simplify `random_state`\n self.empty_cells = set()\n for y, row in enumerate(grid):\n for x, is_wall in enumerate(row):\n if not is_wall:\n self.empty_cells.add((x, y))\n # Getting random empty cells uses a list.\n self.empty_cell_list = list(self.empty_cells)", "def upgrade_cell(cell):\n cell.setdefault(\"metadata\", NotebookNode())\n cell.id = random_cell_id()\n if cell.cell_type == \"code\":\n cell.pop(\"language\", \"\")\n if \"collapsed\" in cell:\n cell.metadata[\"collapsed\"] = cell.pop(\"collapsed\")\n cell.source = cell.pop(\"input\", \"\")\n cell.execution_count = cell.pop(\"prompt_number\", None)\n cell.outputs = upgrade_outputs(cell.outputs)\n elif cell.cell_type == \"heading\":\n cell.cell_type = \"markdown\"\n level = cell.pop(\"level\", 1)\n cell.source = \"{hashes} {single_line}\".format(\n hashes=\"#\" * level,\n single_line=\" \".join(cell.get(\"source\", \"\").splitlines()),\n )\n elif cell.cell_type == \"html\":\n # Technically, this exists. It will never happen in practice.\n cell.cell_type = \"markdown\"\n return cell", "def first_cell(self) -> Cell:\n if self._first_cell is not None:\n return self._first_cell\n if not self._pre_check():\n raise RuntimeError(\n \"Pre-check failed. No component fusion possible and no first cell\"\n )\n assert self._first_cell is not None\n return self._first_cell", "def __init__(self, data):\n self.data = data\n self.columns = Columns(data)\n self.rows = Rows(data)", "def __init__(self, row:int, col:int):\n\t\tself.__row = row\n\t\tself.__col = col", "def __init__(self, cell_size, nrows, ncols, **kwds):\n #\n # Python 3 update\n #\n super().__init__(**kwds)\n self.cell_size = cell_size\n w, h = cell_size\n d = 2 * self.margin\n self.size = (w * ncols + d, h * nrows + d)\n self.cell_size = cell_size", "def __init__(self, rows=[]):\n self._rows, self._columns = Rows(self), Columns(self)\n self._m = 0 # Number of columns per row, see Table.insert().\n self.extend(rows)", "def _create_cell(self,row_number,cell_number):\n cell = Cell(self)\n cell.x = cell_number * self.cell_width\n cell.y = row_number * self.cell_width\n cell.rect.x = cell.x\n cell.rect.y = cell.y\n return cell", "def create_foothold(self):\n sel = self.selected()\n cell = sel[0]\n if cell.contents == Contents.bomb:\n cell.contents = Contents.empty\n for adj in cell.get_adjacent():\n if adj.contents == Contents.bomb:\n adj.contents = Contents.empty\n self.set_bomb_contacts()", "def __init__(self, num_rows):\r\n self.num_rows = num_rows\r\n\r\n # Make the linear array where we store items.\r\n num_items = self.num_cells_for_rows(self.num_rows)\r\n self.values = [None for i in range(num_items)]", "def header(self):\n print 'dimensions',self.data.shape\n print 'llcorner', self.xllcorner, self.yllcorner\n print 'cell size', self.cellsize", "def header(self):\n self.set_font(self.police, 'B', 15)\n self.cell(w=0, h=10, txt=f\"CV de {self.name}\", border=1, ln=1, align='C')", "def __init__(self, obj=None):\n if obj:\n self.width = obj.width\n self.columns = obj.columns\n self.colwid = obj.colwid[:]\n else:\n self.width = 0\n self.columns = 0\n self.colwid = [ 0 ] * 100", "def header(self):\n ...", "def __init__(self, columns, values=[], row_number=None, source_row_number=None):\n self.columns = columns\n self.values = copy.copy(values)\n self.row_number = row_number\n self.source_row_number = source_row_number", "def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())", "def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def __init__(self):\n self.empty_seats = [row * 8 + col for row in self.rows for col in self.cols]", "def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)", "def header(self, header):\n\n self._header = header", "def header(self, header):\n\n self._header = header", "def empty_cell(cls):\n return SPACE" ]
[ "0.6575508", "0.6537204", "0.6273338", "0.62425", "0.6164589", "0.61148554", "0.6096474", "0.59903026", "0.59864986", "0.5979671", "0.597449", "0.58925885", "0.58822554", "0.5868053", "0.58567417", "0.58472", "0.5832728", "0.5818981", "0.58085716", "0.5777822", "0.57610726", "0.574778", "0.57433647", "0.5738441", "0.5735225", "0.57340425", "0.57205456", "0.5705765", "0.56922114", "0.5677369", "0.56737536", "0.5659283", "0.56475186", "0.5644393", "0.563137", "0.5623489", "0.5622038", "0.5616788", "0.5615981", "0.5610917", "0.5609513", "0.5579259", "0.5575517", "0.55706924", "0.557041", "0.55522877", "0.5548133", "0.55318975", "0.5521273", "0.5517896", "0.5510427", "0.547561", "0.54650533", "0.5462812", "0.5459161", "0.5457913", "0.54502666", "0.5439348", "0.5432606", "0.5428402", "0.5424228", "0.5416462", "0.54161495", "0.5414132", "0.54033095", "0.5403143", "0.5402035", "0.5397409", "0.5383606", "0.53798246", "0.5379206", "0.5379206", "0.53734136", "0.53713584", "0.5366356", "0.53597164", "0.53557867", "0.53469545", "0.53466743", "0.5346654", "0.5346555", "0.5339379", "0.5334938", "0.53267205", "0.53162557", "0.53152525", "0.5314668", "0.5310103", "0.5308475", "0.5306017", "0.5303518", "0.5302099", "0.52968985", "0.52965134", "0.52954715", "0.5287298", "0.5279199", "0.527786", "0.527786", "0.5273788" ]
0.5778897
19
Initialize the sum to zero
def __init__(self, name): super(Column, self).__init__(self, name) self.sum = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_sum(self):\n self.sum_e = 0", "def initial(self):\n return zero", "def zero(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def reset_sum(self):\n self._error_sum = 0", "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0", "def reset(self):\n self._total_value = 0.0\n self._count = 0", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []", "def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0", "def zero(self):\n return np.zeros([self.nx])", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def zero(self, value):\n raise NotImplementedError", "def faca_zero(self):\n n = self.n\n\n for i in range(n):\n if comozero(self[i]):\n self[i] = 0\n\n return self", "def nullValueToZero(self) -> None:\n self.cpp.nullValueToZero()", "def initializeWeightsToZero(self):\n\t\t## YOUR CODE BELOW\n\t\t\n\t\tutil.raiseNotDefined()\n\t\treturn", "def reset(self):\n self._lastRoll = None\n self._initialSum = 0\n self._rollCount = 0", "def zero(*_, **__) -> None:\n return", "def zero_vector(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.wzero = np.zeros((Turbine.N, ), dtype=float)", "def set_zero_vector(self):\n self.vector = np.zeros(self.dimension, dtype = float)", "def fill_zero(self, weights):\n if self.value is None:\n # if we have a list of weights, allocate a list of zeros for the accumulator\n if isinstance(weights, list):\n self.value = list()\n # for each weight in the list, allocate zeros with the same shape as the weight\n for i in range(0, len(weights)):\n self.value.append(np.zeros_like(weights[i]))\n else:\n self.value = np.zeros_like(weights)", "def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)", "def zero(self):\n return self.create()", "def setZeroTheory(self):\n\t\tself.theo = np.zeros((self.totalBins), dtype = complex)", "def zero(self):\n return self.State.zero()", "def zero_grad(self):\n self.grad.zero_()", "def zero_qnumbers(self):\n self.qd.fill(0)\n for i in range(len(self.qD)):\n self.qD[i].fill(0)\n # enable chaining\n return self", "def reset(self) -> None:\n self.true_positives = 0\n self.actual_positives = 0", "def reset(self) -> None:\n self.true_positives = 0\n self.all_positives = 0", "def sum_sum(t, init):\n return sum(t, init)", "def grad_zero(self):\r\n pass", "def grad_zero(self):\r\n pass", "def reset(self):\n self.tot = 0\n self.cnt = [0.0 for _ in range( self.alpha.getLen() )]", "def zero_grad(self):\n for p, dp in self.params:\n dp.zero_()", "def zero(klass):\n return RatTerm(RatNum(0, 1), 0)", "def grad_zero(self):\r\n self.dw = torch.zeros_like(self.w)\r\n self.db = torch.zeros_like(self.b)", "def zero_negative_weights(self):\n for k in range(len(self)):\n self[k] *= 0 if self[k] < 0 else 1\n self.finalized = True\n return self", "def sum_normed (self):\n return self / self.sum", "def zero_grad(self):\n for p, grad, v, square_grad_avg, delta_x_acc in self.params:\n grad.zero_()", "def zero_grad(self):\n for p, grad, v, square_grad_avg, delta_x_acc in self.params:\n grad.zero_()", "def zero(self):\n q = pinocchio.neutral(self.model)\n v = np.zeros(self.model.nv)\n return np.concatenate([q.flat, v])", "def dim_zero_sum(x: Tensor) ->Tensor:\n return torch.sum(x, dim=0)", "def reset(self):\n self.z[:] = 0", "def reset(self):\n self.c_count = 0\n self.a_count = -1\n self.epsilon = self.init_epsilon", "def zero(self):\n keys = \"Mech,Account,MatchesPlayed,Kills,Deaths,Wins,Losses,TimePlayed,Time,DamageDone,XPEarned,Ratio\".split(\n \",\")\n values = [\"0\" for x in keys]\n stats = dict(zip(keys, values))\n self._init_from_data(stats)\n return self", "def zeroise_results(self):\n \n self.result_total_peak.set(0.0)\n self.result_total_avge.set(0.0)\n self.result_total_sd.set(0.0)\n return", "def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")", "def reset(self):\n\t\tself._initial = None\n\t\tself._start = None\n\t\tself._time = 0\n\t\tself._total = 0\n\t\treturn self", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def zero_weight():\n return Weight(kg=0)", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def fsum(iterable):\n return 0.0", "def zero(x):\n # TODO: get dtype from Expr and Matrix:\n return x * 0", "def __init__(self):\n super().__init__()\n self._value = 0", "def zsum(s, *args, **kwargs):\n return 0 if s.empty else s.sum(*args, **kwargs)", "def reset(self):\n self.visited = False\n self.calculated = False\n self.past_value = self.value\n self.value = 0", "def set_zero(self, loc=None):\n self.Y[loc] -= self.Y[loc]", "def __initialize__(self):\n self.initialized = True\n if self.ids_to_consider is None:\n self.V_tot = np.sum(self.V[self.p_ids-1])\n else:\n self.V_tot = np.sum(self.V[self.ids_to_consider-1])\n return", "def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None", "def set_step_zero(self):\n self.gcam_landmatrix, self.ixr_ixm_ixg = self.cst.apply_constraints_zero()", "def zero_init(self, shape):\n return np.zeros((shape[0],shape[1]))", "def zero_grad(self):\n self.grad = Tensor(data=np.zeros_like(self.data, dtype=np.float))", "def sum_u(self):\r\n try:\r\n # add the velocity to the sum\r\n self.usum.vector()[:] += self.u.vector()[:]\r\n except AttributeError:\r\n # initialize the sum\r\n self.usum = self.u.copy(deepcopy=True)", "def test_result_zero(self, init_wealth, n_bars):\n series_wealth = init_wealth + np.zeros(n_bars, dtype=float)\n result = self.MetricClass()._result_from_wealth(series_wealth)\n expected = init_wealth\n assert result == expected", "def reset(self):\n self.integral = 0.0\n self.previous_error = 0.0", "def zeros(num):\n if num < 1:\n raise IndexError('num must be >= 1.')\n return Vector.fromSequence([0] * num)", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def set_dacs_zero(self):\n # First set all \"parameters\" to zero.\n # this ensures that the safe slow rampdown is used and that the\n # correct values are known to the instrument.\n for ch in self.channel_map:\n self.set(ch, 0)\n\n # \"brute-set\" all sources in known modules to zero, this is because\n # this is also a safety method that should ensure we are in an all\n # zero state.\n for s in self.current_sources.values():\n for dac in range(4):\n s.set_current(dac, 0.0)", "def sum(self):\n return self.vsum", "def test_op_zero_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0,\n \"Array should be all zeros.\")", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def test_result_zero(self, rate, n, init_wealth, n_bars):\n series_wealth = init_wealth + np.zeros(n_bars)\n result = self.MetricClass(rate=rate, n=n)._result_from_wealth(series_wealth)\n expected = 0\n assert np.allclose(result, expected)", "def clear(self):\n self.xi[:] = 0\n self.meanlogr[:] = 0\n self.weight[:] = 0\n self.npairs[:] = 0", "def zeros_like(self):\n raise NotImplementedError", "def zero():\n return Vec2d(0, 0)", "def normalize_initial(self):\n self._i /= self._i.sum()", "def init_weights(self) -> None:\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def zero_grad(self):\n for p in self.parameters():\n if p.grad is not None:\n p.grad.data.zero_()", "def reset(self):\n self.correct_count = 0\n self.total_count = 0", "def calculate_amount_payable_rule_zero(self, total):\n return self.amount_raised * Decimal(1)", "def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def sum(self) -> int:\n return self.root.sum", "def zero_grad(self, *args, **kwargs) -> None:\n\n if self.accumulate_step == 0:\n self.optim.zero_grad(*args, **kwargs)", "def zero(self) -> 'PFElement':\n return self(0)", "def zero_grad(self):\n self.optimizer.zero_grad()", "def zeros(cls, n):\n return cls._createVetor([0] * n)", "def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0", "def value_zeros(self, shape):\r\n return numpy.zeros(shape, dtype=self.dtype)", "def __init__(self, weights):\r\n self.totals = []\r\n running_total = 0\r\n for w in weights:\r\n running_total += w\r\n self.totals.append(running_total)", "def summation(self):\n return sum(self.read_ints())", "def zeros(self):\n super(TimeCube, self).zeros()\n self.data = np.zeros([self.time_range[1]-self.time_range[0]]+self.cubesize, np.uint8)", "def __init__(self, nums):\n d = [0] if nums else []\n for i in xrange(len(nums)):\n d.append(d[i] + nums[i])\n self.d = d", "def test_op_zero_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0.0,\n \"Array should be all zeros.\")", "def _negative_weights_set_sum(self, value):\n weights = self # simpler to change to data attribute and nicer to read\n value = abs(value) # simplify code, prevent erroneous assertion error\n assert weights[self.mu] <= 0\n if not weights[-1] < 0:\n # breaks if mu == lambda\n # we could also just return here\n # return\n istart = max((self.mu, int(self.lambda_ / 2)))\n for i in range(istart, self.lambda_):\n weights[i] = -value / (self.lambda_ - istart)\n factor = abs(value / sum(weights[self.mu:]))\n for i in range(self.mu, self.lambda_):\n weights[i] *= factor\n assert 1 - value - 1e-5 < sum(weights) < 1 - value + 1e-5\n if self.debug:\n print(\"sum w = %.2f, sum w^- = %.2f\" %\n (sum(weights), -sum(weights[self.mu:])))", "def zero(self, deg):\n return coch.Form(\n deg,\n self,\n np.zeros(self.N[deg], dtype=np.float)\n )", "def clr(self):\n self.a = 0.0", "def zero(self):\n if self._chart.manifold().base_field_type() in ['real', 'complex']:\n elt = SR.zero()\n else:\n elt = self._chart.manifold().base_field().zero()\n return self.element_class(self, elt)" ]
[ "0.7703882", "0.71630156", "0.69994444", "0.6945991", "0.6897512", "0.67463124", "0.6605019", "0.65537924", "0.653141", "0.65299124", "0.6520557", "0.65204495", "0.6479404", "0.6479189", "0.6431233", "0.64236397", "0.6403008", "0.63670397", "0.63195443", "0.62968844", "0.62649363", "0.6201463", "0.6196857", "0.6188233", "0.6185457", "0.6163054", "0.6150844", "0.6148095", "0.6147996", "0.6141154", "0.611593", "0.611593", "0.6073287", "0.6069795", "0.6064687", "0.6061461", "0.60402465", "0.6039896", "0.60086155", "0.60086155", "0.6008084", "0.5985611", "0.59742457", "0.59726447", "0.59677345", "0.596425", "0.5962744", "0.59614646", "0.5947022", "0.59368455", "0.5934717", "0.5922409", "0.5918801", "0.59066343", "0.5878771", "0.5873459", "0.58677214", "0.58674675", "0.5867197", "0.5866621", "0.5858128", "0.58467585", "0.5846341", "0.58435386", "0.58417237", "0.5832045", "0.58106047", "0.57955366", "0.5788631", "0.5784524", "0.57796687", "0.57796687", "0.5775304", "0.5772167", "0.5765621", "0.57631785", "0.57479334", "0.5746049", "0.5737576", "0.5736356", "0.573211", "0.5713744", "0.57130784", "0.57113886", "0.5698117", "0.5692378", "0.5687514", "0.5666799", "0.56497115", "0.5644991", "0.5640642", "0.5632593", "0.5624772", "0.5615237", "0.56120926", "0.5601116", "0.5601063", "0.5593881", "0.5588909", "0.5588041", "0.5586615" ]
0.0
-1
Initialize the dancling links matrix with a given number of columns
def __init__(self, numcols): # Start by making a root cell # This isn't part of the matrix, but it gives an entry point to the matrix # root.right is the first column header, root.left is the last # root.up and root.down just wrap around to itself root = Column("root") self.root = root self.numcols = numcols self.numrows = 0 # Now make all of the column headers for col in range(numcols): c = Column("header-" + str(col)) # Insert this column to the right side of the matrix root.left.right = c c.left = root.left c.right = root root.left = c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, n):\n self.row = [0]*n\n self.col =[0]*n\n self.diagonal = 0\n self.antidiag = 0\n self.n = n", "def setup(self, length):\n self.matrix = [None] * length\n for x in range(0,length):\n self.matrix[x] = [None] * length\n self.i = self.k = self.j = 0", "def init_Dist_Matrix(length):\r\n dist_matrix = []\r\n \r\n while len(dist_matrix) < length:\r\n dist_matrix.append([])\r\n while len(dist_matrix[-1]) < length:\r\n dist_matrix[-1].append(float(0))\r\n \r\n # print_matrix(dist_matrix) #just for the visuals can be removed later\r\n return(dist_matrix)", "def fill_matrix(self):\n\n print(\"Creating Needleman-Wunsch matrix..\")\n\n for i in range(self.matrix.shape[0]):\n for j in range(self.matrix.shape[1]):\n\n if i < len(self.seq_2) and j < len(self.seq_1):\n self.matrix[0, i + 2] = self.seq_2[i]\n self.matrix[j + 2, 0] = self.seq_1[j]\n\n if i > 1 and j > 1:\n self.matrix[1, j] = self.matrix[1, j - 1] + self.GAP\n self.matrix[i, 1] = self.matrix[i - 1, 1] + self.GAP\n\n diag = (self.matrix[i - 1, j - 1] + self.compare(self.matrix[0, j], self.matrix[i, 0]))\n up = (self.matrix[i, j - 1] + self.GAP)\n left = (self.matrix[i - 1, j] + self.GAP)\n\n selected = max(diag, up, left)\n\n self.add_arrow(i, j, diag, up, left, selected)\n\n self.matrix[i, j] = selected", "def _links_as_columns(self):\n return Link(*map(np.array, zip(*self.links)))", "def create_adjacency_matrix(n_people):\n adjacency_array = []\n for i in range(n_people):\n try:\n row = np.loadtxt('./data/{}.csv'.format(i), usecols=[1], delimiter=',')\n except IOError:\n row = np.array(n_people*[0])\n adjacency_array.append(row)\n return np.matrix(adjacency_array)", "def __init__(self, link_matrix, users, index_id_map, is_sparse=False):\n self.__is_sparse = is_sparse\n self.__link_matrix = link_matrix\n self.__link_matrix_tr = link_matrix.transpose()\n self.__n = self.__link_matrix.shape[0]\n self.__hubs = np.ones(self.__n)\n self.__auths = np.ones(self.__n)\n self.__size = 30\n self.__names = [users[index_id_map[i]]['screen_name'] for i in range(0, self.__size)]\n self.__index_id_map = index_id_map\n self.__users = users\n self.all_hubs = []\n self.all_auths = []", "def laplace_matrix(self):\n n = self.number_of_vertices\n laplace_matrix = np.zeros((n, n))\n for i in range(n):\n laplace_matrix[i][i] = 1\n vertice = self.list_of_vertices[i]\n for edge in vertice.edges_list:\n laplace_matrix[i][edge.linked[1].index] = 1\n return laplace_matrix", "def __init__(self, n):\n self.rows = [0] * n\n self.cols = [0] * n\n self.diagonal1 = 0\n self.diagonal2 = 0\n self.n = n", "def __init__(self, n):\n self.row = [0] * n\n self.col = [0] * n\n self.diagonal = 0\n self.antidiagonal = 0\n self.winning = False", "def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1", "def __init__(self, n):\n\t\tself._matr = []\n\t\tfor i in range(n):\n\t\t\tself._matr.append([])\n\t\t\tfor j in range(n):\n\t\t\t\tself._matr[i].append(False)", "def __init__(self, n):\n self.row, self.col, self.diag, self.anti_diag, self.n = [0] * n, [0] * n, 0, 0, n", "def create_fabric_matrix(rows, columns):\n return [['.'] * columns for i in range(rows)]", "def __init__(self, num_nodes):\n self.graph = sps.lil_matrix((num_nodes, num_nodes))\n self.size = num_nodes", "def __init__(self, n):\n self.n = n\n self.rows = [0 for _ in range(n)]\n self.colums = [0 for _ in range(n)]\n self.diag = [0,0]", "def initialize_action_matrix(self):\n for start_i, cols in enumerate(self.A):\n for next_j, value in enumerate(cols):\n start_state = self.get_state_from_row(start_i)\n # (row_number, red loc, blue loc, green loc)\n next_state = self.get_state_from_row(next_j)\n\n # Valid states include moving exactly 1 dumbbell to a block that's\n # not already occupied, or to origin.\n num_moves, moves = count_moves_from_states(\n start_state, next_state)\n \n # key: location (where 0 = origin, 1/2/3 = block #), value: count\n next_state_loc_counts = Counter(next_state[1:])\n del next_state_loc_counts[0] # ignore origin location\n \n # There must not be more than 1 dumbbell at each block\n is_valid_next_state = all(\n map(lambda x: x <= 1, next_state_loc_counts.values()))\n \n if (num_moves == 1 and is_valid_next_state):\n move = moves[0] # Only 1 move\n # doing nothing or moving to origin is not valid\n if (start_state == next_state or move[1] == 0):\n continue\n self.A[start_i][next_j] = self.get_col_from_action(move)", "def __init__(self, n):\r\n self.matr = []\r\n self.n = n\r\n for i in range(n):\r\n self.matr.append([])\r\n for j in range(n):\r\n self.matr[i].append(False)", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]", "def compile_links(cls, board):\n\n # compute 1 row width\n width = len(board[0])\n\n # flatten board to a 1d list\n flat_board = list(itertools.chain(*board))\n\n # compute total board length\n board_width = len(flat_board)\n\n # allocate a frame of 0s with proper columns and index\n df = pd.DataFrame(0, columns=flat_board, index=flat_board)\n\n # form links, one full loop of the board\n for y in range(board_width - 1):\n\n # 2 main skipping chains\n df.ix[y][y + 1] = df.ix[y + 1][y] = (y + 1) % width\n\n try:\n # 2 solid side chains\n df.ix[y][y + width] = df.ix[y + width][y] = y + width < board_width\n except IndexError:\n pass\n\n # make sure we cast any ints to bool on exit\n return df.astype(bool)", "def init_needleman_wunsch_matrix(self):\r\n empty_matrix = self.empty_matrix() # Building on the previous definition, this will give you an empty matrix\r\n for i in range(len(self.s2)+1):\r\n for j in range(len(self.s1)+1):\r\n empty_matrix[0][i] = -i\r\n empty_matrix[j][0] = -j\r\n return empty_matrix", "def __init__(self, colinds):\n self._colinds = colinds", "def make_branch_matrix(self):\n self.bm = []\n for pod in self.pods:\n for lb in pod.limbs:\n row = []\n for br in lb.branches:\n row.append(br)\n self.bm.append(row)\n #print \"appended %d-element row %d\" % (len(row),len(self.bm))", "def pfd_initialize (r, a) :\n\ts = r.readline()\n\tif s == \"\" :\n\t\treturn False\n\tl = s.split()\n\tif len(l) > 2:\n\t\treturn False\n\ta[0] = int(l[0])\n\ta[1] = int(l[1])\n\tassert a[0] > 0\n\tassert a[1] > 0\n\tsize[0] = a[0]\n\tglobal matrix\n\tglobal dependencies_list\n\tdependencies_list = [0]*size[0]\n\tmatrix = []*size[0]\n\n\tassert len(dependencies_list) > 0\n\n\tfor i in xrange(a[0]):\n\t\tmatrix.append([0]*a[0])\n\n\tassert len(matrix) > 0\n\n\tfor i in xrange(a[1]):\n\t\ts = r.readline()\n\t\tl = s.split()\n\n\t\trow_index = int(l[0])\n\t\tnum_dependencies = int(l[1])\n\t\tdependencies_list[row_index-1]= num_dependencies\n\n\t\tfor n in xrange(2,len(l)):\n\t\t\tmatrix[row_index-1][int(l[n])-1] = 1\n\treturn True", "def fullyConnected(numnodes):\n a=np.ones((numnodes,numnodes))\n for i in range(numnodes):\n a[i,i]=0.0\n return a.astype(int)", "def generateRandomStandardizedLinkMatrix(size, empty, autoRefLink):\n\n #We start by generating our matrix\n res = np.zeros((size,size),dtype=float);\n\n #If we want to work with a sparse matrix\n #We Generate the index vector (witch vector to populate?)\n emptyVecIndexes = np.random.choice(2,size, p=[EMPTINESS_RATIO,1-EMPTINESS_RATIO])\n\n\n for i in range(size):\n\n ## SPARSE MATRIX ##\n if(empty):\n\n #We generate random vectors for only few columns\n if(emptyVecIndexes[i]==1):\n res[i] = generateProbabilisticVector(size,True)\n\n #We postprocess the non empty columns to ensure certain properties (diag = 0 | sum = (strict) 1 )\n if(res[i].sum()!=0):\n index = np.random.choice(size,1)\n\n while(index==i):\n index = np.random.choice(size,1)\n\n\n if(autoRefLink==False):\n res[i][index]+=res[i][i]\n res[i][i]=0\n\n #float precision sum problem => we ensure normalization of columns\n if(isProbabilisticVector(res[i]) == False):\n diff = 1-res[i].sum()\n res[i][index]+=diff\n\n #for vectors with no link => Same chances to go anywhere\n else:\n #fullfill empty vectors with the same prob\n res[i]= np.full(size,1/size)\n\n ## NORMAL MATRIX ##\n else:\n res[i] = generateProbabilisticVector(size,False)\n\n #Very unlikely but we do it just to be sure\n if res[i].sum()==0:\n\n #fullfill empty vectors with the same prob\n res[i]= np.full(size,1/size)\n\n\n #We postprocess the non empty columns to ensure certain properties (diag = 0 | sum = (strict) 1 )\n else:\n index = np.random.choice(size,1)\n\n while(index==i):\n index = np.random.choice(size,1)\n\n if(autoRefLink==False):\n res[i][index]+=res[i][i]\n res[i][i]=0\n\n #float precision sum problem => we ensure normalization of columns\n if(isProbabilisticVector(res[i]) == False):\n diff = 1-res[i].sum()\n res[i][index]+=diff\n\n #to remove\n #print(np.transpose(res));\n return np.transpose(res)", "def board_init():\n board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]\n return board", "def create_matrix(list_of_edges, n):\n matrix = [[0 for i in range(n)] for j in range(n)]\n ind = 0\n for i in range(n):\n for j in range(i):\n matrix[i][j] = list_of_edges[ind]\n matrix[j][i] = list_of_edges[ind]\n ind += 1\n return matrix", "def _make_random_matrix(self, n_components, n_features):", "def populate_link_cells(lc, xyz, Lx, Nx):\n N = len(xyz)\n for i in range(N):\n num = xyz[i] // Lx % Nx\n lc[id_from_coord(num, Nx)].append(i)", "def matrix_init(sizex, sizey):\n return [[0]*sizey for i in range(sizex)]", "def __init__(self, n_rows: int = 2, n_columns: int = 2):\n self.set_uniform(n_rows, n_columns)", "def get_feedforward_adj_mat(num_layers):\n ret = dok_matrix((num_layers, num_layers))\n for i in range(num_layers - 1):\n ret[i, i + 1] = 1\n return ret", "def get5x5matrix(self): #modified from nxvasc get3x3matrix()\n try:\n i = na.identity(3)\n \n self.d124 = i.copy()\n self.ds124 = na.zeros(124,na.float64)\n \n for k in range(1,124):\n self.d124 = na.concatenate((self.d124,i))\n# print len(self.d124)\n count = 0\n a = []\n for k in range(-2,3):\n for j in range(-2,3):\n for i in range(-2,3):\n if( i != 0 or j != 0 or k != 0 ):\n self.ds124[count] = math.sqrt(i**2+j**2+k**2)\n count += 1\n a.append(i)\n a.append(j)\n a.append(k)\n# print len(a)\n a = na.reshape(na.array(a),(372,1))\n# print len(self.d124)\n self.d124 = na.concatenate((self.d124,a),axis=1)\n except Exception as error:\n print(\"failed in get5x5matrix(): \", error)", "def __init__(self, n: int):\n self.n = n\n self.rows_1 = [0 for _ in range(n + 1)]\n self.rows_2 = [0 for _ in range(n + 1)]\n self.cols_1 = [0 for _ in range(n + 1)]\n self.cols_2 = [0 for _ in range(n + 1)]\n self.diag1 = [0 for _ in range(n + 1)]\n self.diag2 = [0 for _ in range(n + 1)]", "def set_identity(self, dimension: int = 2):\n self.c_matrix = []\n self.prior = BaseDistribution(dist=[1.0/dimension for d in range(dimension)])\n self.n_rows = dimension\n self.n_columns = dimension\n\n for d in range(dimension):\n c_dist = [0.0]*dimension\n c_dist[d] = 1.0\n self.c_matrix.append(BaseDistribution(dist=c_dist))\n return self", "def __init__(self, sample_size, neighbours, lengths, offsets, seed=0):\n self.sample_size = sample_size\n self.seed, self.seed2 = random_seed.get_seed(seed)\n self.neighbours = neighbours\n self.lengths = lengths\n self.offsets = offsets\n super(UniformEdgeDataset, self).__init__()", "def __init__(self,nrows,ncols,fill=None):\n self.nrows = nrows \n self.ncols = ncols\n self.swapcount = 1\n self.initargs = str((self.nrows,self.ncols,fill))\n\n if type(nrows) != int or type(ncols) != int:\n raise TypeError('matrix number of rows and columns must be ints')\n if nrows <= 0 or ncols <= 0:\n raise ValueError('matrix number of rows and columns must be positive')\n \n self.matrix = [[0 for i in range(self.ncols)] for j in range(self.nrows)]\n if fill != None:\n self.fill(fill)", "def init_board(self, size):\n # One entry for every node, if diamond all will be filled with pieces, if triange half of matrix including \n # diagonal from top left to bottom right will be filled\n self.board = [[False for i in range(size)] for j in range(size)] \n\n # One entry for every node pair (i, j), where cM(i, j) = direction enum if there is a connection from i to j. \n # (i, i) does not have a connection\n self.connection_matrix = [[False for i in range(size*size)] for j in range(size*size)]\n if self.shape == ShapeType.DIAMOND:\n for node_i in range(size*size):\n top_boundry = node_i < size # Check if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n \n # See docs/Diamond_Connection_Matrix.png for visualization\n if not top_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not top_boundry and not right_boundry:\n self.connection_matrix[node_i][node_i-size+1] = DirectionType.RIGHT\n if not right_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not bottom_boundry and not left_boundry:\n self.connection_matrix[node_i][node_i+size-1] = DirectionType.LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.UP_LEFT\n \n elif self.shape == ShapeType.TRIANGLE:\n for node_i in range(size*size):\n # check if node_i is in the empty triangle. \n # No proof for this but some sketching suggested the formula, and the formula worked with empirical testing\n # for many different sizes\n # == gives on diagonal to the right of main diagonal through matrix, greater gives the numbers on the rest of the row\n # basic intuition: size-node_i//size-1 gives how many of the nodes on a row in the board matrix are empty, \n # and the rest checks if the node_i is in such an area\n triangle_check = node_i%size >= size - (size - node_i//size - 1) \n if triangle_check: # If it is in the empty side there should be no connections so skip ahead\n continue\n\n top_boundry = node_i < size # Checks if node is on top of board\n left_boundry = node_i % size == 0 # Check if node is in leftmost column in board\n right_boundry = (node_i + 1) % size == 0 # Check if node is in rightmost column in board\n bottom_boundry = node_i > size*size-1-size # Check if node is in bottommost coulmn in board\n diagonal_boundry = node_i%(size+1) == 0 # Check if node is on diagonal in board\n\n # See docs/Triangle_Connection_Matrix.png for visualization\n if not top_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i-size] = DirectionType.UP_RIGHT\n if not right_boundry and not diagonal_boundry:\n self.connection_matrix[node_i][node_i+1] = DirectionType.RIGHT\n if not right_boundry and not bottom_boundry:\n self.connection_matrix[node_i][node_i+size+1] = DirectionType.DOWN_RIGHT\n if not bottom_boundry:\n self.connection_matrix[node_i][node_i+size] = DirectionType.DOWN_LEFT\n if not left_boundry:\n self.connection_matrix[node_i][node_i-1] = DirectionType.LEFT\n if not left_boundry and not top_boundry:\n self.connection_matrix[node_i][node_i-size-1] = DirectionType.UP_LEFT", "def rel_matrix(df_long: pd.DataFrame) -> None:\n pass", "def expansion_matrix_dl(self):\n\n row = self._base_nlp._lower_d_map\n nnz = len(self._base_nlp._lower_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))", "def link(self):\n print(\" ** Performing linkage\")\n df = pd.DataFrame(self.vectors)\n Z = H.linkage(df.iloc[:, 2:], 'ward')\n Z = pd.DataFrame(Z, columns=('node1', 'node2', 'distance', 'count'))\n\n return Z", "def initialize(self):\n N=self.N\n M=[]\n a=random.rand(self.d,1,self.D)\n M.append(a)\n for i in range(1,N-1):\n a=random.rand(self.d,self.D,self.D)\n M.append(a)\n a=random.rand(self.d,self.D,1)\n M.append(a)\n return M", "def fill_matrix(matrix, label, num):\n m, n = matrix.shape\n mat = np.zeros((num, n))\n label=list(label)\n for i in range(num):\n if i < m:\n mat[i, :] = matrix[i, :]\n\n # get a random vector\n else:\n vector_id = random.randint(0, i-1)\n vector1 = mat[vector_id, :]\n l1 = label[vector_id]\n\n # get a neighbors\n nid = get_nearest_indices(l1, mat[0:i, :])\n v2 = mat[nid, :]\n l2 = label[nid]\n\n n_vector, n_label = generate_data(vector1, v2, l1, l2)\n\n mat[i,:]=n_vector\n label.append(n_label)\n\n return mat,np.array(label)", "def __init__(self, n, prey_cnt=0, predator_cnt=0):\n # print n, prey_cnt, predator_cnt\n self.grid_size = n\n self.grid = []\n for i in range(n):\n row = [0]*n # row is a list of n zeros\n self.grid.append(row)\n self.init_animals(prey_cnt, predator_cnt)", "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def set_up_matrix():\n matrix= []\n row= \"1 9 3 4 5\"\n row= to_int(row)\n matrix.append(row)\n row= \"2 30 4 5 6\"\n row= to_int(row)\n matrix.append(row)\n row= \"3 8 5 6 7\"\n row= to_int(row)\n matrix.append(row)\n row= \"4 5 6 7 8\"\n row= to_int(row)\n matrix.append(row)\n row= \"5 6 7 8 9\"\n row= to_int(row)\n matrix.append(row)\n return matrix", "def create_sites_matrix(all_pages):\n answer = input(\"Do you want to overwrite 'all_links.json' ? -- \")\n if answer == \"yes\":\n create_links_dict(all_pages)\n\n with open(os.path.join(\"files\", \"all_links.json\"), \"r\", encoding=\"utf-8\") as f:\n links_dict = json.load(f)\n\n pages_matrix = np.array([[]])\n for j in range(N_PROCESSES):\n for i, page in enumerate(all_pages[j]):\n link = reduce_to_domain(page[\"link\"])\n if len(link) < MIN_LINK_LEN:\n continue\n\n n_page = links_dict[link]\n\n for j, child_link in enumerate(page[\"hyperlinks\"]):\n child_link = reduce_to_domain(child_link)\n if len(child_link) < MIN_LINK_LEN:\n continue\n\n if pages_matrix.size == 0:\n pages_matrix = np.array([[n_page, links_dict[child_link]]])\n else:\n if n_page != links_dict[child_link]:\n pages_matrix = np.append(pages_matrix, [[n_page, links_dict[child_link]]], axis=0)\n\n return pages_matrix, len(links_dict)", "def initialize(self, col):\n\t\treturn []", "def __init__(self, n):\n self.matrix = [[0 for i in range(n)] for j in range(n)]\n self.winning = False", "def adjacencyMatrixplot(nodes):\n adMat = np.zeros((len(nodes), len(nodes)), int)\n for node in nodes:\n if (node.id == 0):continue\n parent, child = node.parent, node.id # -1 -> tally with list indices\n adMat[parent, child] = 1\n return adMat", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def __init__(self, columns):\n self.columns = columns\n self.rows = []", "def make_matrix(rows, columns):\n\tmatrix = []\n\tfor row in range(rows):\n\t\tmatrix += [[0] * columns]\n\t\t\n\treturn matrix", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, num_nodes):\n self._num_nodes = num_nodes\n self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]", "def __init__(self, n_neighbors=2):\n self.n_neighbors = n_neighbors", "def create_matrix(n, m):\n matrix = [[None]*m for i in range(n)]\n return matrix", "def __init__(self, dim, connections_per_row=3):\n _table.STable_swiginit(self, _table.new_STable(dim, connections_per_row))", "def __init__(self, lines, cols):\n self.lines = lines\n self.cols = cols\n self.generation = 0\n initial_list = [[self.cell_state['dead']] * cols for x in range(lines)]\n super().__init__(initial_list)", "def __init__(self, size):\n self.size = size\n self.matrix = [[False] * size for _ in range(size)]\n self.clear_flags()", "def generate_full_adj(self):\n edges = np.zeros(shape=(self.n_balls, self.n_balls))\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n for n in range(self.nn[l]):\n edges[row_idx, col_idx:col_idx + self.nc[l]] = 1\n # Increase counters after filling connections for a parent node\n col_idx += self.nc[l]\n row_idx += 1\n return edges", "def create_adjacency_matrix(self, edges):\n matrix = np.zeros([self.max_words, self.max_words * self.edge_types * 2])\n for edge in edges:\n src = edge[0]\n e_type = edge[1]\n dest = edge[2]\n self.set_matrix(matrix, src, dest, e_type, 1)\n return matrix", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def _build_adjacency_matrix_1(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0\n \n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n\n self.adjacency1 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if down_neighbour[row] == row:\n downMat[row,row] = 0.0\n \n self.adjacency1 = downMat.T.tocsr() \n \n return", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def __create_connections(self):\n \"\"\"\n When adding diagonals, each node adds only diagonals to nodes below it.\n This prevents a case where two nodes add diagonals with each other, s.t. both diagonals are added.\n \"\"\"\n # top left corner:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((0, 1)).left)\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 0)).up)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, 0)).right, self.get_junc((1, 1)).up)\n else:\n self.add_connection(self.get_junc((0, 0)).down, self.get_junc((1, 1)).left)\n # top row:\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((0, wi + 1)).left)\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((0, wi - 1)).right)\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).left, self.get_junc((1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, wi)).right, self.get_junc((1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((0, wi)).down, self.get_junc((1, wi + 1)).left)\n # top right corner:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((0, -2)).right)\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -1)).up)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((0, -1)).left, self.get_junc((1, -2)).up)\n else:\n self.add_connection(self.get_junc((0, -1)).down, self.get_junc((1, -2)).right)\n # middle rows:\n for hi in range(1, self.height - 1):\n # left node\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi, 1)).left)\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 0)).up)\n self.add_connection(self.get_junc((hi, 0)).up, self.get_junc((hi - 1, 0)).down)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, 0)).right, self.get_junc((hi + 1, 1)).up)\n else:\n self.add_connection(self.get_junc((hi, 0)).down, self.get_junc((hi + 1, 1)).left)\n # middle nodes\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi, wi + 1)).left)\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi, wi - 1)).right)\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi)).up)\n self.add_connection(self.get_junc((hi, wi)).up, self.get_junc((hi - 1, wi)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).left, self.get_junc((hi + 1, wi - 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi - 1)).right)\n # diagonal to down right\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, wi)).right, self.get_junc((hi + 1, wi + 1)).up)\n else:\n self.add_connection(self.get_junc((hi, wi)).down, self.get_junc((hi + 1, wi + 1)).left)\n # right node:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi, -2)).right)\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -1)).up)\n self.add_connection(self.get_junc((hi, -1)).up, self.get_junc((hi - 1, -1)).down)\n # diagonal to down left\n if randint(0, 1) == 0:\n self.add_connection(self.get_junc((hi, -1)).left, self.get_junc((hi + 1, -2)).up)\n else:\n self.add_connection(self.get_junc((hi, -1)).down, self.get_junc((hi + 1, -2)).right)\n # bottom left corner:\n self.add_connection(self.get_junc((-1, 0)).right, self.get_junc((-1, 1)).left)\n self.add_connection(self.get_junc((-1, 0)).up, self.get_junc((-2, 0)).down)\n # bottom row\n for wi in range(1, self.width - 1):\n self.add_connection(self.get_junc((-1, wi)).right, self.get_junc((-1, wi + 1)).left)\n self.add_connection(self.get_junc((-1, wi)).left, self.get_junc((-1, wi - 1)).right)\n self.add_connection(self.get_junc((-1, wi)).up, self.get_junc((-2, wi)).down)\n # bottom right corner:\n self.add_connection(self.get_junc((-1, -1)).left, self.get_junc((-1, -2)).right)\n self.add_connection(self.get_junc((-1, -1)).up, self.get_junc((-2, -1)).down)", "def Init_Adaptive_Grid(self,):\n self.Indicator[0] = 0\n self.Old[0] = 0\n \n for i in range(self.num_dim):\n self.Active[i] = i+1\n self.Indicator[i+1] = 1\n self.N_Forward[i,0] = i+2\n self.N_Backward[i,i+1] = 1\n self.Idx[i,i+1] = 1", "def _init_lattice(self, link_type, rand):\n if link_type == 'SU2':\n self.link_shape = (2, 2)\n link_dtype = np.complex64\n\n if link_type == 'SU3':\n self.link_shape = (3, 3)\n link_dtype = np.complex64\n\n if link_type == 'U1':\n self.link_shape = ()\n link_dtype = np.float32\n\n sites_shape = tuple(\n [self.time_size]\n + [self.space_size for _ in range(self.dim-1)]\n + list(self.link_shape)\n )\n\n links_shape = tuple(\n [self.time_size]\n + [self.space_size for _ in range(self.dim-1)]\n + [self.dim]\n + list(self.link_shape)\n )\n\n self.sites = np.zeros(sites_shape, dtype=link_dtype)\n self.links = np.zeros(links_shape, dtype=link_dtype)\n\n self.num_sites = np.cumprod(self.sites.shape)[-1]\n self.num_links = self.num_sites * self.dim\n if self.link_type != 'U1':\n # Indices for individual sites and links\n self.site_idxs = self.sites.shape[:-2]\n self.link_idxs = self.links.shape[:-2]\n else:\n self.site_idxs = self.sites.shape\n self.link_idxs = self.links.shape\n\n if rand:\n self.links = np.array(np.random.uniform(0, 2*np.pi,\n links_shape),\n dtype=np.float32)\n\n # Construct list containing the indices of the link variables for each\n # plaquette in the lattice to use as a lookup table instead of having\n # to perform nested loops\n t = np.arange(self.time_size)\n x = np.arange(self.space_size)\n u = np.arange(self.dim)\n v = np.arange(self.dim)\n s_tups = [(i, j) for i in t for j in x] # site tuples\n self.plaquette_idxs = [\n list(s) + [i] + [j] for s in s_tups for i in u for j in v if j > i\n ]", "def __init__(self,columns_to_fix=[],rows_to_scan='all',keep_dummies=False):\n self.columns_to_fix = columns_to_fix\n self.rows_to_scan = rows_to_scan\n self.keep_dummies = keep_dummies", "def __init__(self, center, leads, connections):\n\n if not center.is_square():\n raise ValueError(\"Center is not a square TightBinding\")\n\n self.center = center.copy()\n self.dims = center.dims\n self.leads = []\n self.connections = []\n\n for l,c in zip(leads,connections):\n self.append_lead(l,c)", "def __init__(self, num_elements):\n\n # Create array of linked lists\n self.main_array = [LinkedList() for i in range(num_elements)]\n self.num_elements = num_elements", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def __init__(self, dimensions=2):\n assert dimensions > 0\n for d in range(0,dimensions+1):\n self.weight.append(0)", "def __init__(self, rows, cols, depth):\n\n self.rows = rows\n self.cols = cols\n self.depth = depth\n self.table = np.zeros((depth, rows, cols))", "def __init__(self, n: int):\n self.n = n\n self.newList = [[0 for _ in range(n)] for _ in range(n)]\n self.colSum = [0 for _ in range(n)]\n self.rowSum = [0 for _ in range(n)]\n self.diag = 0\n self.revDiag = 0", "def generate_ref_matrix(self, n_holes, n_particles):\n\n gs_config = np.append(np.ones(n_holes),\n np.zeros(n_particles))\n gs_config_str = ''.join([str(int(state)) for state in gs_config])\n\n refs = list(map(\"\".join, itertools.permutations(gs_config_str)))\n refs = list(dict.fromkeys(refs)) # remove duplicates\n refs = [list(map(int, list(ref))) for ref in refs]\n\n # # COLLECT TOTAL SPIN = 0 REFS\n refss0 = []\n for ref in refs:\n S = 0\n for i in range(len(ref)):\n if ref[i] == 1:\n if i%2 == 0:\n S += 1\n else:\n S += -1\n else: continue\n if S == 0:\n refss0.append(ref)\n\n \n refs = refss0\n\n refs.sort(key=self.number_of_pairs, reverse=True) \n #refs = refs[0:num_states_in_ref]\n \n #indices = np.r_[0,1,5,27,28]\n\n #refs = refs[0,1,5,27,28]\n #refs = np.asarray(refs)\n #refs = refs[np.r_[0,1,5,27,28], :]\n\n ref_matrix = np.asarray(refs)\n\n return ref_matrix", "def make_table(m, n):\n return [[0] * n for _ in range(m)]", "def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def __init__(self, initial, lables):\n\t\tself.lables = lables\n\t\t# default setup\n\t\tself.matrix = [0] * len(lables)\n\t\tfor i in range(0, len(self.matrix)):\n\t\t\tself.matrix[i] = [initial] * len(lables)\n\t\t\n\t\t# Similar to the state class, data access is only possible\n\t\t# with row and column lables, dicts enable an acces in linear\n\t\t# time\n\t\tself.access = dict()\n\t\tcounter = 0\n\t\tfor i in self.lables:\n\t\t\tself.access[i] = counter\n\t\t\tcounter += 1", "def __init__(self, rows, cols):\n if rows <= 0:\n raise ValueError('Number of matrix rows must be greater than zero.')\n if cols <= 0:\n raise ValueError('Number of matrix cols must be greater than zero.')\n\n self.__rows = rows\n self.__cols = cols\n\n # Create the matrix and initialize all elements to zero\n self.__m = []\n for i in range(1, self.__rows + 1):\n row = []\n for j in range(1, self.__cols + 1):\n row.append(0)\n self.__m.append(row)", "def initialize_cost_matrix(self):\n self.cost_matrix = np.zeros(\n (len(self.anchor_points), len(self.patch_centers)), dtype=np.float64\n )\n target_point = self.anchor_points[0]\n for i in range(len(self.patch_centers)):\n source_point = self.patch_centers[i]\n Es_point = self.get_Es_point(source_point, target_point)\n Ei_point = self.get_Ei_point(source_point, target_point)\n self.cost_matrix[0][i] = self.get_E1_point(Es_point, Ei_point)", "def __init__(self, n: int):\n self.size = n\n self.board = [[CellValues.EMPTY.value] * n for _ in range(n)]\n self.num_empty_cells = n * n", "def Distmatrix(self):\n self.Dismatrix = np.zeros((self.nodenum, self.nodenum))\n for i in range(len(self.Dismatrix)):\n for j in range(len(self.Dismatrix)):\n self.Dismatrix[i, j] = sf.dist(self.y[i], self.x[i], self.y[j], self.x[j])\n self.Dismatrix[j, i] = self.Dismatrix[i, j]", "def __init__(self, size):\n _PysparseMatrixFromShape.__init__(self, rows=size, cols=size, bandwidth = 1)\n ids = numerix.arange(size)\n self.put(numerix.ones(size, 'd'), ids, ids)", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def generator_matrix(self):\n self.generator_mat = np.zeros((self.k, self.n), dtype=int)\n A_matrix = np.ones((self.k, self.n-self.k), dtype=int)\n\n identity_i = np.identity(self.k, dtype=int)\n self.generator_mat[:, :self.k] = identity_i\n\n # This loop edits the A_matrix to make the column vectors linearly ind.\n for x in range(self.n-self.k):\n A_matrix[x, x] = 0\n\n self.generator_mat[:, self.k:] = A_matrix\n\n# for i in range(self.k):\n# print(self.generator_mat[i,:])\n\n return self.generator_mat", "def _build_adjacency_matrix_2(self):\n\n from scipy import sparse as sparse\n \n down_neighbour = np.empty(self.tri.npoints)\n down_neighbour1 = np.empty(self.tri.npoints)\n\n for node in range (0,self.tri.npoints):\n down_neighbour[node] = self.neighbour_array_lo_hi[node][0]\n down_neighbour1[node] = self.neighbour_array_lo_hi[node][1]\n\n # Build a matrix of downhill-ness - one entry per node ! \n \n size = self.tri.npoints\n row_array = np.empty(size)\n col_array = np.empty(size)\n down_array = np.ones(size)\n\n # Catch cases where node is local low point (i.e. it is its own low neighbour)\n for row in range(0, self.tri.npoints): \n row_array[row] = row\n col_array[row] = down_neighbour1[row]\n if row == down_neighbour[row]:\n down_array[row] = 0.0 \n if row == down_neighbour1[row]:\n col_array[row] = down_neighbour[row]\n\n\n downMCOO = sparse.coo_matrix( (down_array, (row_array, col_array)), shape=(size,size) ).T \n self.adjacency2 = downMCOO.tocsr() \n\n # Catch pathological cases - sometimes if there is a flat spot on the boundary, then \n # the filling method above will produce a non-square matrix. This is caused by\n # repetition of values in the COO list which are summed on conversion.\n\n if downMCOO.shape[0] != downMCOO.shape[1]:\n # This approach works but is a lot slower\n\n print \"\"\"\n Warning: the downhill matrices require a slow build method. This is probably\n Because there are degeneracies in the slope - particularly at the boundaries\n A small random perturbation is usually enough to fix this problem\n \"\"\"\n downMat = sparse.lil_matrix((size, size))\n\n for row in range(0, self.tri.npoints): \n downMat[down_neighbour[row],row] = 1.0\n\n for row in range(0, self.tri.npoints): \n if row == down_neighbour[row] or row == down_neighbour1[row]:\n downMat[row,row] = 0.0\n \n self.adjacency2 = downMat.T.tocsr() \n\n return", "def initializeMatrix(self, seqs):\n currentSequence = seqs[0]\n if len(seqs) == 1:\n # Base case in the recursion, only 1 sequence left\n return [None] * (len(currentSequence) + 1)\n\n else:\n return [self.initializeMatrix(seqs[1:]) for x in range(len(currentSequence) + 1)]", "def init(self):\n self.padre = self.id\n self.sinVisitar = [] \n self.visited = False\n print (\"inicializo algoritmo\")\n for i in range (len(self.neighbors)): #De esta forma se pueden manipular las listas por aparte\n self.sinVisitar.append(self.neighbors[i])", "def __init__(self):\n self.row_number = -1\n self.next_row = None\n self.row_sentinel = SparseMatrix.MatrixEntry()", "def __init__(self, num_rows):\r\n self.num_rows = num_rows\r\n\r\n # Make the linear array where we store items.\r\n num_items = self.num_cells_for_rows(self.num_rows)\r\n self.values = [None for i in range(num_items)]", "def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a", "def __init__(self, init_grid=None):\n\n self.height = len(init_grid)\n self.width = len(init_grid[0])\n\n self.grid = [[Cell(self, c) for c in row]\n for row in init_grid]\n\n self.g = nx.Graph()\n self.tangle()" ]
[ "0.6169552", "0.61102796", "0.6080898", "0.5925205", "0.5843429", "0.5816062", "0.5752628", "0.5748185", "0.57156444", "0.57052106", "0.56688374", "0.5662722", "0.5662606", "0.5644519", "0.564434", "0.5636628", "0.56212884", "0.5586612", "0.5582982", "0.5580839", "0.554456", "0.55443394", "0.553317", "0.5530493", "0.55257773", "0.54865324", "0.5480157", "0.54767764", "0.545282", "0.54520917", "0.5449615", "0.5441681", "0.54296666", "0.5417854", "0.54001015", "0.53856736", "0.5357469", "0.5351238", "0.53474873", "0.5341821", "0.5334882", "0.53257024", "0.5307553", "0.52976376", "0.5278434", "0.5277567", "0.5272268", "0.5255823", "0.5254843", "0.5251378", "0.5243471", "0.5243312", "0.5242094", "0.5233101", "0.52329564", "0.523135", "0.523135", "0.523135", "0.523135", "0.523135", "0.523135", "0.52109665", "0.52095973", "0.5195247", "0.51921386", "0.51903766", "0.5183904", "0.5180364", "0.5176823", "0.51738304", "0.516276", "0.51608425", "0.5160789", "0.5155112", "0.51494884", "0.51366955", "0.5134353", "0.5134225", "0.5133696", "0.5132594", "0.51176083", "0.5110707", "0.510611", "0.5101479", "0.50989354", "0.50957817", "0.5095608", "0.5086246", "0.50828224", "0.5080316", "0.5072369", "0.50686264", "0.50631136", "0.5058301", "0.50558907", "0.5052779", "0.5046936", "0.50461245", "0.5044396", "0.50423753" ]
0.5835481
5
Add a row to the matrix. cols is a sorted list of the column numbers that have a 1, indexed from 0.
def add_row(self, cols, name=None): def get_header(col_current, col_shift): """ Starting at the current column header, shift to the right col_shift times """ header = col_current for i in range(col_shift): header = header.right return header # Update the number of rows self.numrows += 1 if name is None: name = self.numrows # Get the first column header head = self.root.right head = get_header(head, cols[0]) # Place the first cell cell = Cell(head, name) cell.up = head.up cell.down = head head.up.down = cell head.up = cell head.sum += 1 oldcell = cell oldcol = cols[0] # Loop over all of the other entries for col in cols[1:]: # Shift to get the header head = get_header(head, col - oldcol) # Add in the cell cell = Cell(head, name) cell.up = head.up cell.down = head head.up.down = cell head.up = cell # Now add the left/right links cell.left = oldcell cell.right = oldcell.right cell.right.left = cell cell.left.right = cell # Add to the header sum head.sum += 1 # Keep the old cell for reference oldcell = cell oldcol = col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_row(matrix):\n\tl = len(matrix[0])\n\ttemp = matrix[:]\n\ttemp += [[0]*l]\n\treturn temp", "def add_row(matrix):\n import numpy as np\n shape = np.shape(matrix)\n if matrix is np.zeros(shape):\n return matrix.append(np.zeros(shape[0]))", "def addRow(self, row):\n nc = len(row)\n if nc != self.data.shape[0]:\n msg=\"Row of wrong length : %s instead of %s\"%(nc,self.data.shape[0])\n raise msg\n new_row = numpy.reshape(numpy.array(row),(-1,1))\n self.data = numpy.concatenate((self.data, new_row),1)\n return", "def add_row(self, row):\r\n self.rows.append(row)", "def add(self, row):\n for t in [self.x, self.y]:\n for col in t:\n col.add(row.cells[col.at])", "def append_row(self, row=None):\n self.set_row(self.size, row)", "def append(self, *row):\n\n if len(row) != len(self.columns):\n raise ValueError, \"row has %d values for %d columns\" \\\n % (len(row), len(self.columns))\n # Distribute the values in the row to the columns.\n for column, value in zip(self.columns, row):\n column.append(value)", "def _add_from_list(self, row) :\n\n data = [0]\n data.extend(row[:len(self.cols)-1])\n cols = self.cols[:len(data)]\n self._insert_internal(cols, data)", "def rows(self, row):\n self.row += row", "def matrix_add():", "def add_row(self, row):\n ...", "def add_dummy_location_to_matrix(matrix):\n matrix = [row + [0] for row in matrix]\n last_row = [0 for _ in range(len(matrix) + 1)]\n matrix.append(last_row)\n return matrix", "def addRow(self, *row):\n self.insertRow(self._height, *row)", "def addrow(self, rowlabel=None, cvalues=None):\n \n if cvalues is None:\n cvalues = []\n self.rowcount += 1\n if rowlabel is None:\n self.rowlabels.append(str(self.rowcount))\n else:\n self.rowlabels.append(rowlabel)\n if not spssaux._isseq(cvalues):\n cvalues = [cvalues]\n self.columnvalues.extend(cvalues)", "def add_row(df, row):\n df.loc[df.shape[0]] = row", "def add_column(matrix):\n import numpy as np\n shape = np.shape(matrix)\n if matrix is np.zeros(shape):\n pass", "def add_row(self, values):\n if len(values) != len(self.column_names):\n raise ValueError(\n 'Number of values does not match number of columns'\n )\n\n self.rows.append(values)", "def Append(self, row):\n self._rows.append(row)", "def add_row(M, a, i1, i2):\n P = zeros(*M.shape)\n P[i2, i1] = 1\n return M + a * P * M", "def add_col(M, a, j1, j2):\n Q = zeros(*M.shape)\n Q[j1, j2] = 1\n return M + a * M * Q", "def add_row(self):\n if len(self._grid) == 0 or len(self._grid[0]) == 1:\n self._grid.append([None])\n elif len(self._grid[0]) > 1:\n row = [None for _ in range(len(self._grid[0]))]\n self._grid.append(row)\n return True", "def extend_rows(self, matrix):\n row_count, column_count = matrix.size[:2]\n if column_count != self.column_count:\n raise ValueError\n self.row_count += row_count\n self.size = (self.row_count, self.column_count)\n base_row_count = self.row_count\n for key, value in matrix.data.items():\n row, column = key[:2]\n self.set((base_row_count + row, column), value)\n return self", "def insertRow(self, index, *row):\n if ((len(row) == 1) and (type(row[0]) in MATRIX_VALID_COLLECTIONS)):\n row = row[0]\n if self._width:\n if not (len(row) == self._width):\n raise ValueError('Improper length for new row: %d, should be %d' % (len(row), self._width))\n else:\n self._width = len(row)\n self._height += 1\n # make a deep copy\n newrow = list()\n for item in row:\n if not (type(item) in MATRIX_VALID_TYPES):\n message = \"Values must be of type \"\n for t in range(len(MATRIX_VALID_TYPENAMES)):\n if t:\n message += ' or '\n message += \"'%s'\" % MATRIX_VALID_TYPENAMES[t]\n raise TypeError(message)\n newrow.append(item)\n self._value.insert(index, newrow)", "def cols(self, col):\n self.col += col", "def add_row(self, *column_data):\n raise NotImplementedError", "def add_to_row(M, i, j):\n N = copy.deepcopy(M)\n N[i] = 1 * np.logical_xor(N[i], N[j])\n return N", "def addMine(self, row, col):\n if not self.isMine(row, col):\n self.board[row, col] = 1\n # Update neighbors array\n for neighborRow, neighborCol in self.getNeighbors(row, col):\n self.mines[neighborRow, neighborCol] += 1", "def addRow(self, index: int) -> None:\n ...", "def append_row(self, values):\n self.range(self._op.max_row + 1, 1, len(values)).values = values", "def add_row(self, row_id):", "def newrow(self, rowlist):\n if len(rowlist) > 0:\n if islist(rowlist[0]):\n for row in rowlist:\n self.newrow(row)\n elif len(rowlist) == self.x:\n for x in xrange(0, len(rowlist)):\n rowlist[x] = self.prepare(rowlist[x])\n self.a.append(rowlist)\n self.y += 1\n else:\n raise IndexError(\"Unequal matrix row lengths for newrow of \"+str(self.x)+\" and \"+str(len(rowlist)))", "def fill_row(row, x):\n row.append(x)\n return row", "def add_to_column(M, i, j):\n N = M.transpose()\n return add_to_row(N, i, j).transpose()", "def set_matrix_row(matrix, new_vector, row):\n set_matrix_cell(matrix, new_vector.x, row, 0)\n set_matrix_cell(matrix, new_vector.y, row, 1)\n set_matrix_cell(matrix, new_vector.z, row, 2)", "def new_row( self, delta_row = 1, ):\n self.ix_row += delta_row\n self.ix_col = 0", "def concatMatrix(self, a, l, cols):\n l_i = l * np.identity(cols)\n concat = np.concatenate((a, l_i))\n\n return concat", "def add_row(self, row_id):\n TODO('https://github.com/posterior/treecat/issues/27')", "def addrow(self, y, addlist):\n for x in xrange(0, self.x):\n self.store(y,x, self.retrieve(y,x)+addlist[x])", "def insert(self, row_values):\n if len(row_values) != len(self.columns):\n raise TypeError(\"wrong number of elements\")\n\n self.rows += [dict(zip(self.columns, row_values))]", "def madd(self, matrix):\n try:\n result_matrix = [[0 for col in range(len(matrix[0]))] for row in range(len(matrix))]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n result_matrix[i][j] = self.matrix[i][j] + matrix[i][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass", "def addRowSIntoRowD(A,rs,rd):\n for col in range(len(A[rd])):\n A[rd][col] += A[rs][col]", "def add_zero(matrix):\n if len(matrix.shape) == 2:\n matrix = numpy.insert(matrix, 0, 0, axis=1)\n matrix = numpy.insert(matrix, matrix.shape[1], 0, axis=1)\n row01 = numpy.zeros((matrix.shape[1]), numpy.uint8)\n matrix = numpy.insert(matrix, 0, row01, axis=0)\n matrix = numpy.insert(matrix, matrix.shape[0], row01, axis=0)\n\n if len(matrix.shape) == 3:\n pixel = numpy.zeros((matrix.shape[2]), numpy.uint8)\n matrix = numpy.insert(matrix, 0, pixel, axis=1)\n matrix = numpy.insert(matrix, matrix.shape[1], pixel, axis=1)\n row = numpy.zeros((1, matrix.shape[1], 3), numpy.uint8)\n matrix = numpy.insert(matrix, 0, row, axis=0)\n matrix = numpy.insert(matrix, matrix.shape[0], row, axis=0)\n\n return matrix", "def add(self, *args):\n if (len(args) == 1) and type(args[0]) in [list, tuple]:\n row = args[0]\n else:\n row = args\n if self.headings:\n assert len(row) == len(self.headings), 'Expected {} columns but got {}'.format(len(self.headings), len(row))\n self.root.append({name: str(row[pos]) for (pos, name) in enumerate(self.headings)})\n else:\n self.root.append([col for col in row])", "def append_rows(self, rows):\n for row in rows:\n self.append_row(row)", "def addMofRowSIntoRowD(A,m,rs,rd):\n for col in range(len(A[rd])):\n A[rd][col] += (A[rs][col])*m", "def push_row(row, left=True):\n r = 0\n row = row[:] if left else row[::-1]\n new_row = [item for item in row if item]\n for i in range(len(new_row)-1):\n if new_row[i] and new_row[i] == new_row[i+1]:\n r += new_row[i] * 2\n new_row[i], new_row[i+1:] = new_row[i]*2, new_row[i+2:]+[0]\n new_row += [0]*(len(row)-len(new_row))\n return (new_row if left else new_row[::-1]), r", "def write(self, row):\n self.append_rows.append(tuple(row))", "def addRow(self, row):\n self.__data.append(row.copy())\n # We may need to resize the table, to fit the new data\n for key in row.keys():\n if len(row[key]) > self.__widths[key]:\n self.__widths[key] = len(row[key])\n self.__makeFormatString()\n self.__refreshContent()\n if self.__selectedRow == -1:\n self.__selectedRow = 0\n lines = len(self.__data)\n if self.__firstShownLine <= lines - self.height + 2 and \\\n self.__autoScroll:\n # We need to scroll everything upwards\n self.scrollDown()\n if self.__selectedRow < self.__firstShownLine:\n self.__selectedRow = self.__firstShownLine\n if self._focused:\n self._window.attron(curses.A_BOLD)\n self.__printRow(self.__firstShownLine)\n self._window.attroff(curses.A_BOLD)\n else:\n if self._focused and self.__selectedRow == lines - 1:\n self._window.attron(curses.A_BOLD)\n self.__printRow(lines - 1)\n self._window.attroff(curses.A_BOLD)", "def _plus_minus_matrix(num_rows):\n n = num_rows - 1\n mask = np.array(\n [\n [x == '1' for x in np.binary_repr(k, width=(n + 1))]\n for k in range(2**n)\n ]\n ).T\n m = np.ones(shape=mask.shape)\n m[mask] = -1\n return m", "def _add_row(self, index):\n if index is None:\n index = self.size\n\n if index < self.size:\n raise ValueError(f\"Duplicate row index: {index}\")\n\n for empty in range(self.size, index):\n self._add_row(empty)\n\n self._data.append([None] * len(self._columns))\n\n return self.size - 1", "def add_rows(array, rows=1):\n # TODO: error handling\n cols = array.shape[1]\n new_rows = np.empty((rows, cols), dtype=np.object)\n new_array = np.concatenate((array, new_rows),\n axis=0)\n return new_array", "def _fcn_add_score_row(self):\n # Increase length :\n self._scoreTable.setRowCount(self._scoreTable.rowCount() + 1)", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def add_square(self, row, col):\n square = []\n r, c = row, col\n while r < row + self.r_size:\n while c < col + self.c_size:\n square.append((r, c))\n c += 1\n r += 1\n c = col\n return square", "def _add_column(self, column):\n if column is None:\n column = len(self._columns)\n\n if column in self._columns:\n raise ValueError(f\"Duplicate column name: {column}\")\n\n if isinstance(column, int):\n assert column >= len(self._columns)\n for empty in range(len(self._columns), column):\n self._add_column(empty)\n\n self._columns.append(column)\n for idx in self.index:\n row = self._data[idx]\n row.append(None)\n\n return len(self._columns) - 1", "def append_row(self, **kws):\n\t\tcolumns = []\n\t\t#NOTE: we have to initialize all columns, otherwise gtk returns None as cell value\n\t\tfor i, (name, _, _) in enumerate(self.COLUMNS):\n\t\t\tvalue = kws.pop(name, '')\n\t\t\tcolumns.append(i)\n\t\t\tcolumns.append(value)\n\t\tself.set(self.append(), *columns)\n\t\tif kws:\n\t\t\traise ValueError('no such column. %s' % kws.keys()[0])", "def sum_row(self, row):\n # Start by removing the zeros as they do not have any effect\n row_trimmed = [el for el in row if el != 0]\n # if the row has zero or one element, it stays identical\n if len(row_trimmed) == 0 or len(row_trimmed) == 1:\n new_row = row_trimmed\n # else if the row has more than two elements\n else:\n new_row = []\n # the points will be added if the element is equal to the next one.\n # We thus need to know if the current position was already added at\n # the previous iteration or if it needs to be added now\n already_added = False\n for i in range(len(row_trimmed[:-1])):\n if already_added:\n already_added = False\n else:\n if row_trimmed[i] == row_trimmed[i + 1]:\n # here we alse add the next element\n new_row.append(2 * row_trimmed[i])\n self.score += 2 * row_trimmed[i]\n already_added = True\n else:\n new_row.append(row_trimmed[i])\n # As we loop until the second to last element, one needs to check\n # whether the last element was added or not\n if not already_added:\n new_row.append(row_trimmed[-1])\n\n # we might need to add zeros for the new_row to be of the right size\n return new_row + [0] * (self.grid_size - len(new_row))", "def add_row(self, row):\n \n new_row = pd.DataFrame(data=[row], columns = self.table.columns) \n self.table = self.table.append(new_row, ignore_index=True)", "def addedrow(self, y, addlist):\n out = self[y]\n for x in xrange(0, len(out)):\n out[x] = out[x]+addlist[x]\n return out", "def insert(self, row, col, value):\n if self.valid_square(row, col, value) or value == 0:\n self.puzzle[row][col] = value\n return True\n return False", "def append(self, row_or_table):\n row, table, inc = row_or_table, row_or_table, 1\n if not row:\n return\n if isinstance(table, Table):\n row, inc = table.get_columns(*self.column_labels), table.num_rows\n for i, column in enumerate(self._columns):\n self._columns[column] = np.append(self[column], row[i])\n self._num_rows = self.num_rows + inc\n return self", "def add_rows(self, rows) :\n\n converted = [self._convert_row(row) for row in rows]\n self._bulk_add_rows(converted)\n self.version += 1", "def InsertRow(rowIndex,name=\"\",label=\"\",Matrix=None):\n if(Matrix == None):\n from globals import Matrix\n rowToInsertBefore = Matrix[rowIndex].Member\n\n newRow = Matrix.InsertBlankRowAfter(rowToInsertBefore,name,label)\n Matrix.SwitchRows(rowToInsertBefore.DataIndex,newRow.DataIndex)\n return Matrix[rowIndex]", "def _add_from_dict(self, row) :\n\n data = [row.get(col, None) for col in self.cols]\n self._insert_internal(self.cols, data)", "def add_row(self, values, vers_inc=1) :\n\n if type(values) == type({}) :\n self._add_from_dict(values)\n elif type(values) == type([]) :\n self._add_from_list(values)\n elif type(values) == type(()) :\n self._add_from_list(list(values))\n elif type(values) == RowReference :\n self._add_from_dict(values.as_dict())\n else :\n raise Exception('Don''t know how to add %s ' % str(values))\n\n self.version += vers_inc", "def appendRow(self, contents = None):\n\n\t\t\t\t#Find the last row\n\t\t\t\trow = len(tuple(self.thing.iter_rows())) + 1\n\n\t\t\t\t#Write to cells\n\t\t\t\tif ((contents != None) and (len(contents) != 0)):\n\t\t\t\t\tfor column, item in enumerate(contents):\n\t\t\t\t\t\tself.setCell(row, column + 1, item)\n\t\t\t\telse:\n\t\t\t\t\tself.setCell(row, 1, \" \")", "def add_human(self, row, col):\r\n self._human_list.append((row, col))", "def fill_col(col, x):\n col.append(x)\n return col", "def makeRow(self, row):\n for col in range(self.width):\n count = self.fractal.calculateIterations(row, col)\n color = self.grad.getColor(count)\n self.img.put(color, (col, row))", "def add_human(self, row, col):\n self._human_list.append((row,col))", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def upload_matrix(row_names, col_names, matrix):", "def AppendRows(self, numRows = 1):\n for i in range(numRows):\n self.data = numpy.vstack((self.data,\n numpy.array([''] * self.data.shape[1], dtype = numpy.object),\n ))\n self.rowmask = numpy.append(self.rowmask, numpy.zeros((numRows,), dtype = numpy.bool))\n\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n numRows)\n #if not self._batchcount:\n # self.GetView().ProcessTableMessage(msg)\n self.GetView().ProcessTableMessage(msg)\n return True", "def add_human(self, row, col):\n self._human_list.append((row, col))", "def make_matrix(rows, columns):\n\tmatrix = []\n\tfor row in range(rows):\n\t\tmatrix += [[0] * columns]\n\t\t\n\treturn matrix", "def addColumnValues(self, column):\n nr1 = self.data.shape[1]\n nr = len(column)\n if nr1 == 0:\n # case 1: empty table\n if nr == 0:\n # case 1a: we're just adding a name\n self.data = numpy.reshape(self.data, (1, 0))\n pass\n else:\n # case 1b: we're adding a column of values\n self.data = numpy.reshape(numpy.array(column), (1, nr))\n pass\n pass\n else:\n # case 2: non-empty table\n if nr1 > 0 and nr != nr1:\n raise Exception(\"New column must have the same length as existing ones %s %s\"%(nr1,nr))\n new_column = numpy.reshape(numpy.array(column), (1, nr))\n self.data = numpy.concatenate((self.data, new_column))\n pass\n return", "def addOnes(x,m):\n n = x.size/m\n one = np.ones((m,1))\n x = x.reshape((m,n))\n judge = np.sum(x[:,0] == one.flatten())\n if judge != m:\n x = np.hstack((one,x))\n return x", "def add_columns(array, cols=1):\n # TODO: error handling\n rows = array.shape[0]\n new_cols = np.empty((rows, cols), dtype=np.object)\n new_array = np.concatenate((array, new_cols),\n axis=1)\n return new_array", "def _add_row(self, w2):\n done = []\n row = {}\n while len(done) != len(self.columns):\n for col in self.columns:\n if col in done:\n continue\n\n ok = self._add_item(w2, col, row)\n if ok:\n done.append(col)\n\n self.rows.append(row)", "def AddColumnsInRow(self, r, ncol):\n return _table.Table_AddColumnsInRow(self, r, ncol)", "def AddAColumnInRow(self, r):\n return _table.Table_AddAColumnInRow(self, r)", "def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)", "def add_data(self, rowdata):\n if not rowdata.keys():\n # No columns were specified\n return\n for colnam in rowdata.keys():\n # Check the the column is actually defined in\n # in the table\n try:\n self.list_columns().index(colnam)\n except ValueError:\n # The column name wasn't found\n raise ValueError(\n \"Column \" + str(colnam) + \" is not defined in the table\"\n )\n for icol in range(0, self.ncolumns()):\n # Look up whether the column has an\n # explicit value assigned\n colnam = self.table_column(icol).title()\n if colnam in rowdata:\n self.table_column(icol).append(rowdata[colnam])\n else:\n # Assign a null value\n self.table_column(icol).append(\"*\")", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def AppendRows(self, numRows=1): # real signature unknown; restored from __doc__\n return (self.GetNumberRows() + numRows)", "def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True" ]
[ "0.7198061", "0.716274", "0.6843265", "0.6479029", "0.6377876", "0.6279669", "0.62465566", "0.6215555", "0.62106025", "0.62064993", "0.6145421", "0.6095104", "0.60865563", "0.6051224", "0.6049674", "0.6012048", "0.59923834", "0.5945139", "0.5912783", "0.58509195", "0.58039695", "0.5795468", "0.57950115", "0.57943153", "0.5775309", "0.57458436", "0.57408446", "0.57085854", "0.5702599", "0.56984246", "0.56856054", "0.5669246", "0.56562895", "0.56349593", "0.5623968", "0.55755544", "0.5543129", "0.553926", "0.5529327", "0.5518935", "0.55178404", "0.5508943", "0.54644644", "0.54547065", "0.5444359", "0.54301584", "0.54110736", "0.54104835", "0.5399807", "0.5396438", "0.5362191", "0.5360749", "0.535985", "0.535985", "0.535985", "0.535985", "0.535985", "0.535985", "0.535985", "0.535985", "0.535985", "0.5358392", "0.5334478", "0.5321349", "0.5314868", "0.52892977", "0.52789235", "0.526818", "0.52597857", "0.5259465", "0.52560437", "0.5251636", "0.5238305", "0.52322614", "0.5208832", "0.5204863", "0.5189858", "0.51862013", "0.5178162", "0.5178162", "0.5178162", "0.5178162", "0.5178162", "0.5178162", "0.5178162", "0.5170611", "0.51665103", "0.5165336", "0.5152392", "0.51202565", "0.51191425", "0.51126933", "0.5108861", "0.509967", "0.5096612", "0.5090528", "0.5083401", "0.5078757", "0.5078757", "0.50722665" ]
0.6547947
3
Starting at the current column header, shift to the right col_shift times
def get_header(col_current, col_shift): header = col_current for i in range(col_shift): header = header.right return header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_column(self, coords, direction):\n self.shift_cells(self.get_column(coords, direction), direction)", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)", "def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta", "def draw_next_column(self):\n self.xPos += self.XCOLUMNSKIP + self.XCOLUMNSEP\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]", "def cols(self, col):\n self.col += col", "def col_data_mover_at(row, col):\n if col == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"l{row}\")\n else:\n return NAME_SCHEME[\"register move right\"].format(pe=f\"pe_{row}_{col - 1}\")", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def shift(row):\r\n new_lst = []\r\n for i in range(4):\r\n if row[i] != 0:\r\n new_lst.append(row[i])\r\n if len(new_lst) < len(row):\r\n new_lst.extend([0] * (len(row) - len(new_lst)))\r\n row = new_lst\r\n\r\n return row", "def _modify_columns(self, cols, X, y=None):", "def _drag_col(self, event):\n x = self._dx + event.x # get dragged column new left x coordinate\n self._visual_drag.place_configure(x=x) # update column preview position\n # if one border of the dragged column is beyon the middle of the\n # neighboring column, swap them\n if (self._dragged_col_neighbor_widths[0] is not None and\n x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):\n self._swap_columns('left')\n elif (self._dragged_col_neighbor_widths[1] is not None and\n x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):\n self._swap_columns('right')\n # horizontal scrolling if the cursor reaches the side of the table\n if x < 0 and self.xview()[0] > 0:\n # scroll left and update dragged column x coordinate\n self.xview_scroll(-10, 'units')\n self._dragged_col_x += 10\n elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:\n # scroll right and update dragged column x coordinate\n self.xview_scroll(10, 'units')\n self._dragged_col_x -= 10", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)", "def rel_shift(x, klen=-1):\n x_size = x.shape\n\n x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])\n x = x[1:, ...]\n x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])\n # x = x[:, 0:klen, :, :]\n x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))\n\n return x", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def _shift(BD):\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD", "def shift_column(code, n, s):\n def shift(s, n):\n if n == 0 or len(s) == 1:\n return s\n else:\n return shift(s[-1] + s[:-1], n-1)\n\n if type(code) is not list:\n return code\n else:\n n = int(n)\n s = int(s) % len(code)\n if s > 0 and n < len(code[0]):\n column = select_column(code, n)\n column = shift(column, s)\n for i in range(0, len(column)):\n new = list(code[i])\n new[n] = column[i]\n code[i] = ''.join(new)\n return code\n else:\n return code", "def rshift(self):\n self.lcd_byte(0x1C, LCD_CMD)", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def appforth(df, line):\n df.loc[-1]=line\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n return df", "def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])", "def resize_last_column(self):\n\n total_width = 0\n last_width = 0\n last_column = -1\n for i in range(0, self.column_count):\n w = self.GetColumnWidth(i)\n total_width += w\n last_width = w\n if w > 0:\n last_column = i\n\n if total_width < (self.GetSize()[0] - 20) and last_column > -1:\n self.SetColumnWidth(last_column, last_width + self.GetSize()[0] - total_width)", "def shift(self, days):\n # Since to predict close price of day n we need the indicators\n # of day n-1 we move the above columns days_forward to the bottom\n self.df['MA'] = self.df['MA'].shift(days)\n self.df['WMA'] = self.df['WMA'].shift(days)\n self.df['MOM'] = self.df['MOM'].shift(days)\n self.df['STOCH'] = self.df['STOCH'].shift(days)\n self.df['STOCHD'] = self.df['STOCHD'].shift(days)\n self.df['MACD'] = self.df['MACD'].shift(days)\n self.df['WILLIAMS'] = self.df['WILLIAMS'].shift(days)\n self.df['ADL'] = self.df['ADL'].shift(days)\n self.df['CCI'] = self.df['CCI'].shift(days)\n if 'sent_trends' in self.df.columns:\n self.df['sent_trends'] = self.df['sent_trends'].shift(days)\n\n # Drop rows with nan\n self.df.dropna(inplace=True)", "def add_end_caps(self):\n\n # Far top dummy row (first row above array is NOT flipped if even number of rows)\n flip_dummy = (self.row_size + self.rbl[1]) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, self.rbl[1] + flip_dummy) + self.bitcell_array_inst.ul()\n self.dummy_row_insts[1].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n\n # Far bottom dummy row (first row below array IS flipped)\n flip_dummy = (self.rbl[0] + 1) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, -self.rbl[0] - 1 + flip_dummy) + self.unused_offset\n self.dummy_row_insts[0].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n # Far left dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(-len(self.left_rbl) - 1, -self.rbl[0] - 1) + self.unused_offset\n self.dummy_col_insts[0].place(offset=dummy_col_offset)\n\n # Far right dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(len(self.right_rbl), -self.rbl[0] - 1) + self.bitcell_array_inst.lr()\n self.dummy_col_insts[1].place(offset=dummy_col_offset)", "def adjustFrame(frame, shifts):\n if min(shifts)<0:\n botShifts = [colShift-min(shifts) for colShift in shifts]\n else:\n botShifts = [colShift for colShift in shifts]\n topShifts = [max(botShifts)-shift for shift in botShifts]\n newFrame=np.empty([frame.shape[1],frame.shape[0]+max(botShifts)])\n for i, col in enumerate(frame.T):\n newCol = np.concatenate((np.zeros(topShifts[i]),col,np.zeros(botShifts[i])))\n newFrame[i]=newCol\n newFrame=newFrame.T\n \n return newFrame", "def adjustFrame(frame, shifts):\n if min(shifts)<0:\n botShifts = [colShift-min(shifts) for colShift in shifts]\n else:\n botShifts = [colShift for colShift in shifts]\n topShifts = [max(botShifts)-shift for shift in botShifts]\n newFrame=np.empty([frame.shape[1],frame.shape[0]+max(botShifts)])\n for i, col in enumerate(frame.T):\n newCol = np.concatenate((np.zeros(topShifts[i]),col,np.zeros(botShifts[i])))\n newFrame[i]=newCol\n newFrame=newFrame.T\n \n return newFrame", "def shift(self, col_shift=0, row_shift=0):\n\n if (self.min_col + col_shift <= 0\n or self.min_row + row_shift <= 0):\n raise ValueError(\"Invalid shift value: col_shift={0}, row_shift={1}\".format(col_shift, row_shift))\n self.min_col += col_shift\n self.min_row += row_shift\n self.max_col += col_shift\n self.max_row += row_shift", "def width(self, width):\n self.col += width", "def __lshift__(self, other):\n for c in self.__table__.columns:\n self.__setattr__(c.name, other.__getattribute__(c.name))", "def __lshift__(self,fpath):\n raise NotImplemented", "def shift_df(df, shift, shift_names):\n\n other_names = [name for name in df.columns if name not in shift_names]\n\n df1 = df.loc[:, shift_names].drop(df.head(shift).index)\n df2 = df.loc[:, other_names].drop(df.tail(shift).index)\n df2.index += shift # need to match index, otherwise concat will ignore offset\n new_df = pd.concat((df1, df2), axis=1, ignore_index=True, join='inner')\n new_df.columns = shift_names + other_names\n del df1, df2\n df_shifted = new_df\n del new_df\n\n # Reset index\n df_shifted.reset_index(inplace=True)\n df_shifted = df_shifted.drop(['index'], axis=1)\n\n return df_shifted", "def shift_frame(self, x):\n return np.concatenate([x[:, self.hop_length:], np.zeros((1, self.hop_length))], axis=1)", "def __lshift__(self, other: Any) -> ColumnOperators:\n return self.operate(lshift, other)", "def alignshift(self, hits):\n return hits.shift(self.horizon, axis=0) \\\n .align(self.truth, axis=0, join='right')[0]", "def shift(x, row_ind, col_ind, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='constant', cval=0.):\n h, w = x.shape[row_axis], x.shape[col_axis]\n tx = row_ind - (h / 2)\n ty = col_ind - (w / 2) \n translation_matrix = np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n \n return x", "def _offset_subtract(col):\n offset = col.values[:-1]\n offset = np.insert(offset, 0, 0)\n return col - offset", "def _pos2col(self, start, cpos, **opts):\n tw = opts.get('tab_width', self.TAB_WIDTH)\n tt = opts.get('tab_type', 'stop')\n if tt == 'fixed':\n\n def advance(p):\n return p + tw\n else:\n\n def advance(p):\n return tw * ((p + tw) // tw)\n\n colnum = 0\n while cpos > 0:\n if self.input[start] == '\\t':\n colnum = advance(colnum)\n else:\n colnum += 1\n start += 1\n cpos -= 1\n return colnum", "def shift_left(self):\n self.pointer = (self.pointer - 1) % len(self.data)", "def _justify(self):\n minLengths = [max([max(map(len, row[i].split() + [''])) for row in self._rows if len(row) > 0])\n for i in range(self._colsNum)]\n shifts = [w - mw for mw, w in zip(minLengths, self._widthes)]\n # length = len(shifts)\n borrow = zip(self._colsRange, shifts)\n borrow.sort(lambda a, b: cmp(a[1], b[1]))\n delta = [0] * self._colsNum\n\n donorIdx = self._colsNum - 1\n recIdx = 0\n while True:\n\n curDonation = borrow[donorIdx][1]\n curRec = borrow[recIdx][1]\n\n if curRec >= 0 or curDonation <= 0:\n break\n\n curDelta = min(curDonation, -curRec)\n curDonation -= curDelta\n curRec += curDelta\n delta[borrow[donorIdx][0]] -= curDelta\n delta[borrow[recIdx][0]] += curDelta\n\n if curDonation == 0:\n donorIdx -= 1\n\n if curRec == 0:\n recIdx += 1\n\n for i in self._colsRange:\n self._widthes[i] += delta[i]", "def _col2pos(self, start, colnum, **opts):\n tw = opts.get('tab_width', self.TAB_WIDTH)\n tt = opts.get('tab_type', 'stop')\n if tt == 'fixed':\n\n def advance(p):\n return p + tw\n else:\n\n def advance(p):\n return tw * ((p + tw) // tw)\n\n epos = cpos = 0\n while epos < colnum:\n if self.input[start] == '\\t':\n epos = advance(epos)\n else:\n epos += 1\n start += 1\n cpos += 1\n return cpos - (epos > colnum)", "def _rel_shift(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(0, 3, 2, 1)\n idx = torch.arange(klen, device=xs.device)\n k_idx, q_idx = idx.unsqueeze(0), idx.unsqueeze(1)\n rel_pos_idx = torch.abs(k_idx - q_idx)\n if klen != qlen:\n rel_pos_idx = rel_pos_idx[:, :qlen]\n mask = xs.new_ones(qlen, klen, dtype=torch.bool if torch_12_plus else torch.uint8)\n mask = torch.tril(mask, diagonal=0).transpose(1, 0)\n rel_pos_idx[mask] *= -1\n rel_pos_idx = klen - qlen - rel_pos_idx\n rel_pos_idx[rel_pos_idx < 0] *= -1\n if self.clamp_len > 0:\n rel_pos_idx.clamp_(max=self.clamp_len)\n rel_pos_idx = rel_pos_idx.expand_as(xs)\n x_shift = torch.gather(xs, dim=2, index=rel_pos_idx)\n x_shift = x_shift.permute(0, 3, 2, 1)\n return x_shift", "def right_shift(key,shift):\n if shift > len(key):\n shift = shift % len(key)\n return key[-shift:] + key[:-shift]", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def scan(self, item, col):\n if item.nxt == col.token.typ:\n col.add(item.shifted())", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def perfect_shift(y):\n return np.append([y[-1]],y[0:-1])", "def row_data_mover_at(row, col):\n if row == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"t{col}\")\n else:\n return NAME_SCHEME[\"register move down\"].format(pe=f\"pe_{row - 1}_{col}\")", "def _transpose_shift(E):\n bsz, n_head, max_len, _ = E.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n indice = layers.arange(start=0, end=max_len, dtype=int)\n E = layers.index_select(input=E, index=indice, dim=-2)\n E = layers.transpose(E, perm=[0, 1, 3, 2])\n return E", "def shiftRight(self):\n if self.features==[]:\n raise StopIteration\n nextF = self.features[0]\n diff = nextF.start - self.winEnd + 1\n self.winEnd+=diff\n self.winStart+=diff\n # remove on the left side and ajust counts\n for i in reversed(range(0, len(self.winFeatures))):\n f = self.winFeatures[i]\n if f.start<self.winStart and (self.names==None or f.name in self.names):\n self.counts[f.name]-=1\n del self.winFeatures[i]\n\n # add on the right side and ajust counts\n self._fillWindow()", "def _output_padding_line(self):\n for i in range(self.num_new_columns):\n self._write_column(self.new_columns[i], '|')\n self.buf += ' '\n\n self._pad_horizontally(self.num_new_columns * 2)", "def shift(self):\n return self._shift", "def adjust_columns(self):\r\n for col in range(3):\r\n self.resizeColumnToContents(col)", "def right_shift_quirk(self):\n register = self.return_middle_registers(self.opcode)\n bits = self.registers[register[1]]\n self.registers[0xF] = bits & 0b1\n self.registers[register[0]] = self.registers[register[1]] >> 1\n logger.info(\"Shifted register V{} to the right into V{}({})\".format(\n register[1],\n register[0],\n hex(self.registers[register[0]])))", "def shift_zero(line):\n for index_i in range(0, len(line)):\n if line[index_i] != 0:\n key = line[index_i]\n index_j = index_i-1\n while index_j >= 0 and line[index_j] == 0:\n line[index_j+1] = line[index_j]\n index_j = index_j-1\n line[index_j+1] = key", "def ScrollRight(self, offset=100):\n if (self.translated == True):\n offset = offset * 3\n self.Show(self.displayedColumn + offset)", "def shift(self): # shift\n span,stack_top=self\n pos=span[1]\n nex=self.lattice.begins.get(pos,None) # nex is the list of next items\n if not nex : return []\n # nex[0], nex[1] ...\n\n s0,s1,s2=stack_top\n\n print('shift==========', self)\n rtns=[]\n for ne in nex :\n item=self.lattice[ne]\n ns=((item[0],item[1]),((ne,None,None),s0,s1[0]if s1 else None))\n print(ne,ns)\n rtns.append((256+ne,pickle.dumps(ns)))\n return rtns", "def _rotate_cw(self, table):\n return [ [ table[1][0], table[0][0] ],\n [table[1][1], table[0][1] ] ]", "def shift_to_opinion(i):\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]", "def shiftcontroll(self, messagelength):\n if self._shiftmode == 1:\n self.lshift()\n elif self._shiftmode == 2:\n self.rshift()\n elif self._shiftmode == 3:\n\n excesslen = messagelength - self._width\n if excesslen > 0:\n if ((excesslen - self._shiftlen) > 0) and self._shift:\n self.lshift()\n self._shiftlen += 1\n if self._shiftlen == excesslen:\n self._shift = False\n self._shiftlen = 0\n else:\n self.rshift()\n self._shiftlen += 1\n if self._shiftlen == excesslen:\n self._shift = True\n self._shiftlen = 0", "def rotate_right(self, times: int):\n for i in range(0, times):\n new_rows = [''] * self.side\n self.tile_rows.reverse()\n\n for row in self.tile_rows:\n for i, ch in enumerate(row):\n new_rows[i] += ch\n\n self.tile_rows = new_rows", "def reorder_columns(df,first_cols=['']):\n\n last_cols = [col for col in df.columns if col not in first_cols]\n df = df[first_cols+last_cols]\n return(df)", "def timeshift(self, shift='random'):\n\n if shift == 'random':\n one_month = pd.Timedelta('30 days').value\n two_years = pd.Timedelta('730 days').value\n random_timedelta = - pd.Timedelta(random.uniform(one_month, two_years)).round('min')\n self.timeshift(random_timedelta)\n\n if not self.data.index.empty:\n if isinstance(shift, pd.Timestamp):\n timedeltas = self.data.index - self.data.index[0]\n self.data.index = shift.round('min') + timedeltas\n if isinstance(shift, pd.Timedelta):\n self.data.index += shift.round('min')\n self.data['date'] = self.data.index.map(lambda timestamp: timestamp.date())\n self.data['time'] = self.data.index.map(lambda timestamp: timestamp.time())\n else:\n if isinstance(shift, pd.Timestamp):\n timedeltas = self.data['timestamp'] - self.data['timestamp'].min()\n self.data['timestamp'] = shift.round('min') + timedeltas\n if isinstance(shift, pd.Timedelta):\n self.data['timestamp'] += shift.round('min')\n self.data['date'] = self.data['timestamp'].map(lambda timestamp: timestamp.date())\n self.data['time'] = self.data['timestamp'].map(lambda timestamp: timestamp.time())", "def move_columns_to_front ( infilename, outfilename, cols_to_move ):\n xcols = cols_to_move\n \n reader = csv.reader( open( infilename, 'rt' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer = csv.writer( open( outfilename, 'wb' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n vals = row\n i = 0\n for x in xcols :\n vals[i], vals[x] = vals[x], vals[i]\n i += 1\n writer.writerow( vals )", "def lshift(self):\n self.lcd_byte(0x18, LCD_CMD)", "def ROW(x):\n return (x >> 3)", "def ROW(x):\n return (x >> 3)", "def apply_shift(text, shift):\n ### TODO.", "def remove_col(self, col_header):\n # Remove the column header from the header chain\n col_header.right.left = col_header.left\n col_header.left.right = col_header.right\n # Loop down through the column and remove the rows\n cell = col_header.down\n while cell != col_header:\n row_cell = cell.right\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell.up\n row_cell.up.down = row_cell.down\n row_cell.header.sum -= 1\n # Move on to the next cell in the row\n row_cell = row_cell.right\n # Move on to the next row\n cell = cell.down", "def shift_right(input, pad=2):\n return tf.concat((tf.ones_like(input[:, :1]) * pad, input[:, :-1]), 1)", "def _shift(self, c, mark):\n import pdb\n #pdb.set_trace()\n debug(\"sequence shift with mark %s\", mark)\n old_marked_left = self.left.marked\n marked_left = self.left.shift(c, mark) # based on current letter\n marked_right = self.right.shift(\n c, old_marked_left or (mark and self.left.empty))\n \n return (marked_left and self.right.empty) or marked_right\n \"\"\"\n if marked_right:\n return marked_right + 1\n else:\n # TODO, empties advance the counter\n return max(marked_left, self.right.empty)\n \"\"\"", "def _ProcessColumn(self, index, row, separator_width, skip_empty):\n record = row[index]\n last_index = len(row) - 1\n if isinstance(record, _Marker):\n if index == last_index:\n # TODO(b/148901171) Compute column widths of nested tables.\n return\n else:\n raise TypeError('Markers can only be used in the last column.')\n\n if _IsLastColumnInRow(row, index, last_index, skip_empty):\n self._SetWidth(index, 0)\n else:\n self._SetWidth(index, len(record) + separator_width)", "def shift(self, num):\n if not num:\n self._times, self._values = self._times, self._values\n elif num > 0:\n self._times, self._values = self._times[:-num], self._values[num:]\n else:\n self._times, self._values = self._times[-num:], self._values[:num]", "def rshift(self, count):\n self._c = (bitarray('0') * count) + self._c[:-count]", "def add_frame_shift(self, basis, axis_settings):\n angle, axis = angle_and_axis(basis)\n\n if angle == 0:\n axis = (0,0,1)\n\n if basis.include_translation:\n translation = basis.translation\n else:\n translation = (0,0,0)\n\n self.add_row([basis.axis_name,\"rotation\",\"detector\",basis.depends_on,\n str(axis[0]),str(axis[1]),str(axis[2]),\n str(translation[0]),\n str(translation[1]),\n str(translation[2]),\n basis.equipment_component])\n\n axis_settings.append([basis.axis_name, \"FRAME1\", str(angle), \"0\"])", "def shift_down(line, result):\n \n for index in range(len(line)):\n current = index\n next_greater_zero = -1\n if line[index] == 0:\n #while the next value is still zero move right\n while current + 1 < len(line) and line[current] == 0:\n current +=1\n #if value is not equal to zero save index\n #of the next >0 value to assign current index that value\n if line[current] != 0:\n next_greater_zero = current\n break\n #assign result[next_greater_zero] to line[next_greater_zero]\n #change line[next_greater_zero] to zero\n next_value = line[next_greater_zero]\n line[next_greater_zero] = 0\n result[index] = next_value\n else:\n result[index] = line[index]\n return result", "def shifted(self, shift_by):\n return self - shift_by", "def repair_column():\n turn_left()\n while front_is_clear():\n if no_beepers_present():\n put_beeper()\n move()\n if no_beepers_present():\n put_beeper()\n turn_around()\n while front_is_clear():\n move()\n turn_left()", "def pad_shift(x, shift, padv=0.0):\n if shift > 0:\n padding = torch.ones(x.size(0), shift, x.size(2)).to(x.device) * padv\n return torch.cat((padding, x[:, :-shift, :]), dim=1)\n elif shift < 0:\n padding = torch.ones(x.size(0), -shift, x.size(2)).to(x.device) * padv\n return torch.cat((x[:, -shift:, :], padding), dim=1)\n else:\n return x", "def shift(self, da, dim, shift):\n # TODO: generalize rolling function, allow custom shifts, handle\n # boundary conditions, etc.\n return da.roll(**{dim: shift})", "def _swap_columns(self, side):\n displayed_cols = self._displayed_cols\n i1 = self._dragged_col_index\n i2 = i1 + 1 if side == 'right' else i1 - 1\n if 0 <= i2 < len(displayed_cols):\n # there is a neighbor, swap columns:\n displayed_cols[i1] = displayed_cols[i2]\n displayed_cols[i2] = self._dragged_col\n self[\"displaycolumns\"] = displayed_cols\n if side == 'left':\n right = self._dragged_col_neighbor_widths[0]\n self._dragged_col_x -= right # update dragged column x coordinate\n # set new left neighbor width\n if i2 > 0:\n left = ttk.Treeview.column(self, displayed_cols[i2 - 1], 'width')\n else:\n left = None\n else:\n left = self._dragged_col_neighbor_widths[1]\n self._dragged_col_x += left # update x coordinate of dragged column\n # set new right neighbor width\n if i2 < len(displayed_cols) - 1:\n right = ttk.Treeview.column(self, displayed_cols[i2 + 1], 'width')\n else:\n right = None\n self._dragged_col_index = i2 # update dragged column index\n self._dragged_col_neighbor_widths = (left, right)", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def move_right(self) -> None:\n if not self.buffer:\n return\n\n if self.index == self.buffer.end:\n return\n\n if self.buffer[self.index] != '\\n':\n self.index += 1", "def scrollDisplayRight(self):\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)", "def push_right (grid): \r\n \r\n for row in range (4):\r\n section = []\r\n for col in range (4):\r\n section.append(grid[row][3-col])\r\n add(section) \r\n for i in range (4):\r\n grid[row][i] = section[3-i]", "def rotate_left_90(board):\n new_board = board[:]\n for (col, item) in enumerate(board):\n new_board[item] = (len(board)-1) - col\n\n return new_board", "def _width_shift_(self, x: np.array, m: np.array) -> (np.array, np.array):\n # get a random sign for the shifting direction\n sign = np.random.randint(0, 2)\n shift_pix = np.random.randint(0, self.shift)\n x = shift(x, [0, sign*shift_pix])\n m = shift(m, [0, sign*shift_pix, 0], mode='nearest')\n return x,m", "def shift_down_right(gridcopy, row_num, column_num):\n result = []\n if location[0] != 0:\n current_row = gridcopy[location[0]]\n upper_row = gridcopy[location[0] - 1]\n current_row_lst = tuple_to_list(current_row)\n upper_row_lst = tuple_to_list(upper_row)\n current_row_lst[column_num] = upper_row_lst[column_num]\n upper_row_lst[column_num] = \"*\"\n current_row, upper_row = tuple(current_row_lst), \\\n tuple(upper_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = current_row\n board_lst[row_num - 1] = upper_row\n upper_altered = tuple(board_lst)\n result.append(upper_altered)\n if location[0] != self.n - 1:\n upper_row = gridcopy[location[0] + 1]\n lower_row = gridcopy[location[0]]\n upper_lst = tuple_to_list(upper_row)\n lower_lst = tuple_to_list(lower_row)\n lower_lst[location[1]] = upper_lst[location[1]]\n upper_lst[location[1]] = \"*\"\n upper_row, lower_row = tuple(upper_lst), tuple(lower_lst)\n big_lst = tuple_to_list(gridcopy)\n big_lst[location[0]] = lower_row\n big_lst[location[0] + 1] = upper_row\n changed = tuple(big_lst)\n result.append(changed)\n return result", "def shift_right_left(gridcopy, row_num, column_num):\n result = []\n # Extract the specific row to change.\n current_row = gridcopy[row_num]\n # Change the current_row to list in order to mutate.\n current_row_lst = tuple_to_list(current_row)\n if location[1] != 0:\n # Going left!\n # (\"5\", \"*\", \"6\") to (\"*\", \"5\", \"6\")\n current_row_lst[column_num] = current_row_lst[column_num - 1]\n current_row_lst[column_num - 1] = \"*\"\n # Switch back to tuple\n left_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = left_altered\n result.append(tuple(board_lst))\n if location[1] != self.m - 1:\n # Going right!\n # (\"5\", \"*\", \"6\") to (\"5\", \"6\", \"*\")\n # Reset the values to swap right.\n current_row = gridcopy[row_num]\n current_row_lst = tuple_to_list(current_row)\n current_row_lst[column_num] = current_row_lst[column_num + 1]\n current_row_lst[column_num + 1] = \"*\"\n # Switch back to tuple\n right_altered = tuple(current_row_lst)\n board_lst = tuple_to_list(gridcopy)\n board_lst[row_num] = right_altered\n result.append(tuple(board_lst))\n return result", "def main():\n while front_is_clear():\n repair_column()\n for i in range(4):\n move()\n repair_column()", "def shift_img(img, shft_int = 1):\n no_cols = img[0].shape[1]\n lst_col = no_cols - 1\n col_sty = no_cols - abs(shft_int)\n\n # shift object to the left\n if shft_int < 0:\n shft_int = abs(shft_int)\n col_idx = torch.cat([torch.ones(shft_int, dtype = torch.bool),\n torch.zeros(col_sty, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n if inval_shft:\n raise ValueError('Consider shifting to the right for this image.')\n mod_img = torch.cat([img[0][0,:,~col_idx],cols], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n return mod_img\n \n # shift object to right\n col_idx = torch.cat([torch.zeros(col_sty, dtype = torch.bool),\n torch.ones(shft_int, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n if inval_shft:\n raise ValueError('Consider shifting to the left for this image.')\n \n mod_img = torch.cat([cols,img[0][0,:,~col_idx]], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n \n return mod_img", "def unremove_col(self, col_header):\n # Add the column head back into the chain\n col_header.right.left = col_header\n col_header.left.right = col_header\n # Loop up through the column and add the rows back in\n # Doing this in exactly the reverse order of the removing ensures that we return\n # to the state we were in before the removal\n cell = col_header.up\n while cell != col_header:\n row_cell = cell.left\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell\n row_cell.up.down = row_cell\n row_cell.header.sum += 1\n # Move on to the next cell in the row\n row_cell = row_cell.left\n # Move on to the next row\n cell = cell.up", "def shiftRight(board):\n\t# remove 0's in between numbers\n\tfor i in range(4):\n\t\tnums, count = [], 0\n\t\tfor j in range(4):\n\t\t\tif board[i][j] != 0:\n\t\t\t\tnums.append(board[i][j])\n\t\t\t\tcount += 1\n\t\tboard[i] = [0] * (4 - count)\n\t\tfor x in range(4 - count):\n\t\t\tboard[i] = [0] + board[i]", "def rshift(self, value):\n return self.clone().rshift_(value)", "def __set_column_width(self):\n for i in range(0, len(self.header_width)):\n self.view.setColumnWidth(i, self.header_width[i])", "def _start_drag_col(self, event):\n # identify dragged column\n col = self.identify_column(event.x)\n self._dragged_col = ttk.Treeview.column(self, col, 'id')\n # get column width\n self._dragged_col_width = w = ttk.Treeview.column(self, col, 'width')\n # get x coordinate of the left side of the column\n x = event.x\n while self.identify_region(x, event.y) == 'heading':\n # decrease x until reaching the separator\n x -= 1\n x_sep = x\n w_sep = 0\n # determine separator width\n while self.identify_region(x_sep, event.y) == 'separator':\n w_sep += 1\n x_sep -= 1\n if event.x - x <= self._im_drag.width():\n # start dragging if mouse click was on dragging icon\n x = x - w_sep // 2 - 1\n self._dragged_col_x = x\n # get neighboring column widths\n displayed_cols = self._displayed_cols\n self._dragged_col_index = i1 = displayed_cols.index(self._dragged_col)\n if i1 > 0:\n left = ttk.Treeview.column(self, displayed_cols[i1 - 1], 'width')\n else:\n left = None\n if i1 < len(displayed_cols) - 1:\n right = ttk.Treeview.column(self, displayed_cols[i1 + 1], 'width')\n else:\n right = None\n self._dragged_col_neighbor_widths = (left, right)\n self._dx = x - event.x # distance between cursor and column left border\n # configure dragged column preview\n self._visual_drag.column(self._dragged_col, width=w)\n self._visual_drag.configure(displaycolumns=[self._dragged_col])\n if 'headings' in tuple(str(p) for p in self['show']):\n self._visual_drag.configure(show='headings')\n else:\n self._visual_drag.configure(show='')\n self._visual_drag.place(in_=self, x=x, y=0, anchor='nw',\n width=w + 2, relheight=1)\n self._visual_drag.state(('active',))\n self._visual_drag.update_idletasks()\n self._visual_drag.yview_moveto(self.yview()[0])\n else:\n self._dragged_col = None", "def column_move(plateau,num_col,sens):\n if check_room(plateau, 3, num_col) == False or (sens != 1 and sens != 0):\n return \"Erreur !\"\n if sens==1:\n for i in range(0,3):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i+1,num_col)and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i+1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i+1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n\n else:\n for i in range(3,0,-1):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i-1,num_col) and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i-1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i-1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break", "def shift(self, shift_vec: np.ndarray) -> None:\n if len(shift_vec) != 3:\n raise ValueError(\"`shift_vec` must be a three dimensional vector\")\n shift = np.argmax(shift_vec) - 1\n self._head += shift\n if self._head < 0:\n new_buffer = np.zeros_like(self._buffer)\n self._buffer = np.concatenate([new_buffer, self._buffer], axis=0)\n self._head += len(new_buffer)\n elif self._head > len(self._buffer) - 1:\n new_buffer = np.zeros_like(self._buffer)\n self._buffer = np.concatenate([self._buffer, new_buffer], axis=0)", "def __rlshift__(self, other):\r\n return NotImplemented", "def __rlshift__(self, other):\r\n return NotImplemented", "def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame" ]
[ "0.6121716", "0.59776706", "0.5851196", "0.5847812", "0.57968843", "0.5760453", "0.566838", "0.5658573", "0.558538", "0.5567467", "0.5556038", "0.5546835", "0.55436087", "0.5541556", "0.5532854", "0.5511011", "0.5500459", "0.5493521", "0.5485904", "0.54635084", "0.5462295", "0.54582185", "0.54580015", "0.5441772", "0.5436838", "0.5436838", "0.5415506", "0.54096705", "0.54043746", "0.5399213", "0.5394421", "0.53774256", "0.53522825", "0.53398776", "0.5332844", "0.5306662", "0.53046286", "0.5295616", "0.5295308", "0.5290023", "0.5270761", "0.5261475", "0.5226525", "0.52261907", "0.52150565", "0.5213431", "0.52123606", "0.52103657", "0.52102375", "0.5206506", "0.5200778", "0.5193889", "0.5178221", "0.5167494", "0.5150361", "0.51497525", "0.5130978", "0.5129041", "0.5127321", "0.51268613", "0.5114823", "0.5111558", "0.508624", "0.5070129", "0.5065221", "0.5065221", "0.5061918", "0.50591797", "0.5052556", "0.50490373", "0.50457746", "0.5044556", "0.50348675", "0.503446", "0.50294137", "0.5028536", "0.50191116", "0.50122446", "0.49988198", "0.4997453", "0.4994374", "0.49873805", "0.4986239", "0.49834833", "0.49638164", "0.4963364", "0.4957456", "0.49539375", "0.49533227", "0.4949098", "0.49486965", "0.4947202", "0.49417773", "0.4939626", "0.4921668", "0.49195996", "0.4913862", "0.490984", "0.490984", "0.49051666" ]
0.69813544
0
Remove the specified column header from the header chain All rows that appear in this column are also removed
def remove_col(self, col_header): # Remove the column header from the header chain col_header.right.left = col_header.left col_header.left.right = col_header.right # Loop down through the column and remove the rows cell = col_header.down while cell != col_header: row_cell = cell.right # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell.up row_cell.up.down = row_cell.down row_cell.header.sum -= 1 # Move on to the next cell in the row row_cell = row_cell.right # Move on to the next row cell = cell.down
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unremove_col(self, col_header):\n # Add the column head back into the chain\n col_header.right.left = col_header\n col_header.left.right = col_header\n # Loop up through the column and add the rows back in\n # Doing this in exactly the reverse order of the removing ensures that we return\n # to the state we were in before the removal\n cell = col_header.up\n while cell != col_header:\n row_cell = cell.left\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell\n row_cell.up.down = row_cell\n row_cell.header.sum += 1\n # Move on to the next cell in the row\n row_cell = row_cell.left\n # Move on to the next row\n cell = cell.up", "def RemoveColumn(self, column):\r\n\r\n self._header_win.RemoveColumn(column)\r\n self._header_win.Refresh()", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1", "def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True", "def remove_header( self, *names ):\n for name in names:\n del self[ name.strip() ]", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def RemoveColumn(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n self._total_col_width -= self._columns[column].GetWidth()\r\n self._columns.pop(column)\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True", "def _delcolumns(self, columnname, columndata=\"\"):\n\n del self[columnname]", "def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)", "def removekwd(header, kwd):\n if kwd in header.keys():\n header.remove(kwd)\n return", "def clear_header(self):\n\n if self.terminate:\n return\n\n self.windows['HEADER'].erase()\n # if not self.active_portfolio:\n self.windows['HEADER'].addstr(0, 0, 'Portfolio: None')", "def remove_column(self, name):\n if name not in self.column_names():\n raise KeyError('Cannot find column %s' % name)\n self.__is_dirty__ = True\n try:\n with cython_context():\n if self._is_vertex_frame():\n assert name != '__id', 'Cannot remove \\\"__id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)\n self.__graph__.__proxy__ = graph_proxy\n elif self._is_edge_frame():\n assert name != '__src_id', 'Cannot remove \\\"__src_id\\\" column'\n assert name != '__dst_id', 'Cannot remove \\\"__dst_id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)\n self.__graph__.__proxy__ = graph_proxy\n except:\n self.__is_dirty__ = False\n raise", "def deleteColumn(self, column):\n if (column >= self._width or column <= -self._width):\n raise IndexError('Invalid index, row %d does not exist' % column)\n returnvalue = list()\n self._width -= 1\n for row in self._value:\n returnvalue.append(row.pop(column))\n return returnvalue", "def remove_head_line(self, gtfs_file, path):\n out_list = []\n header = GtfsHeader.return_header(self, gtfs_file).strip()\n in_file = os.path.join(os.path.expanduser(path), '{}.tmp'.format(gtfs_file))\n\n lines = open(in_file).readlines()\n cnt = 0\n for line in lines:\n if header in line:\n cnt += 1\n print('>>> Found header {} in {}.'.format(cnt, gtfs_file))\n lines.remove(line)\n # out_list.append(header.strip())\n\n for line in lines:\n out_list.append(line.strip())\n out_file = in_file\n\n f = open(out_file, 'w')\n for line in out_list:\n f.write('{}\\n'.format(line.strip()))\n f.close()", "def remove_attr(self, key):\n del self.header[key]", "def remove_header(self, name, value=None):\r\n\r\n found_it = 0\r\n\r\n # Remove things from the old dict as well\r\n if (name in self.reply_headers and\r\n (value is None or\r\n self.reply_headers[name] == value)):\r\n del self.reply_headers[name]\r\n found_it = 1\r\n\r\n\r\n removed_headers = []\r\n if not value is None:\r\n if (name, value) in self.__reply_header_list:\r\n removed_headers = [(name, value)]\r\n found_it = 1\r\n else:\r\n for h in self.__reply_header_list:\r\n if h[0] == name:\r\n removed_headers.append(h)\r\n found_it = 1\r\n\r\n if not found_it:\r\n if value is None:\r\n search_value = \"%s\" % name\r\n else:\r\n search_value = \"%s: %s\" % (name, value)\r\n\r\n raise LookupError(\"Header '%s' not found\" % search_value)\r\n\r\n for h in removed_headers:\r\n self.__reply_header_list.remove(h)", "def delcolumn(self, column, accept_small_names=True):\n if column in self.keys():\n self[column] = \"\"\n return\n elif accept_small_names:\n if self[\"__psvcolumnstracker__\"].get(column):\n self.__delattr__(column)\n return\n if not accept_small_names:\n raise ValueError(\"'{}'\".format(column))\n else:\n raise ValueError(\"'{}'. Make sure the shorterned columns name have no collisions\".format(column))", "def delete_headers(self, ):\n if self.AttributeNames.HEADERS in self.attrs:\n del self.attrs[self.AttributeNames.HEADERS]\n return self", "def remove_column(df,col_name):\n return df.drop(col_name)", "def remove_columns ( infilename, outfilename, cols_to_remove ):\n xcols = cols_to_remove\n xcols.sort()\n xcols.reverse()\n \n reader = csv.reader( open( infilename, 'rt' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer = csv.writer( open( outfilename, 'wb' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n vals = row\n for x in xcols :\n vals.pop( x )\n writer.writerow( vals )", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def depart_thead(self, node):\n self.Table.add_header_line(\"|\")", "def del_header(self, name):\n key = name.upper()\n if key not in _RESPONSE_HEADER_DICT:\n key = name\n if key in self._headers:\n del self._headers[key]", "def __delitem__(self, key):\n\n del self._headers[key.lower()]", "def removeRow(self, index: int) -> None:\n ...", "def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)", "def remove_row(self, row_id):", "def del_header_value(old_rmap, new_rmap, key):\n mapping = rmap.load_mapping(old_rmap)\n del mapping.header[key]\n mapping.write(new_rmap)", "def __delitem__(self, name):\n name = name.lower()\n newheaders = []\n for k, v in self._headers:\n if k.lower() <> name:\n newheaders.append((k, v))\n self._headers = newheaders", "def clear_columns(self):\n self._columns = []\n return self", "def del_column(self, fieldname):\n ...", "def remove_row(self, row_id):\n TODO('https://github.com/posterior/treecat/issues/27')", "def strip_header(book):\n\theader_regex = header_string_regex()\n\theader_match = re.search(header_regex, book)\n\n\theader_end = 0\n\tif header_match:\n\t\theader_end = header_match.end()\n\n\treturn book[header_end:]", "def remove_columns(lst):\n cols_rem = ['yearID','Team','lgID','Name','X','playerID','pops']\n\n for item in cols_rem:\n if item in lst:\n lst.remove(item)\n\n return(lst)", "def DeleteResponseHeader(self, name):\n assert name.islower()\n self._wpr_response.original_headers = \\\n [x for x in self._wpr_response.original_headers if x[0].lower() != name]", "def delete_columns(self, columns):\n columns = to_list(columns)\n\n unknown = set(columns) - set(self._columns)\n if unknown:\n names = \", \".join(str(name) for name in unknown)\n raise ValueError(f\"Unable to remove unknown columns: {names}\")\n\n for column in columns:\n col = self.column_location(column)\n for idx in self.index:\n del self._data[idx][col]\n del self._columns[col]", "def remove_column(self, pos, labels=\"REMOVE\"):\n MutableAlignment.remove_column(self, pos)\n if labels == \"RESET\":\n self._reset_col_names()\n elif labels == \"REMOVE\":\n self._col_labels = self._col_labels[:pos] + \\\n self._col_labels[pos + 1:]", "def delete_column(self, col_id):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n column = None\n for col in columns:\n if columns[col].id == col_id:\n column = columns[col]\n break\n if not column:\n return False\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == col_id:\n continue\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` SELECT %(newcols)s FROM '%(oldtablename)s';\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(column.name)\n managers.request_manager.get_request().session().value(\"columns\", columns)\n return True", "def remove_columns(path, columns=None, anonymize = True, make_backups = True):\n\n if columns is None: columns = []\n if anonymize: columns.extend(PERSONAL_INFO_COLUMN_NAMES_LIST)\n files = []\n if os.path.isfile(path):\n files.extend(path)\n elif os.path.isdir(path):\n files.extend([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))])\n else:\n raise TypeError(\"remove_columns() requires a file name or directory name\")\n for file_name in files:\n if make_backups:\n shutil.copyfile(file_name, \"_original_file_\" + file_name)\n table_df = pd.read_csv(file_name)\n if isinstance(columns, basestring):\n columns = [columns]\n table_df.drop(labels=columns, axis=1, inplace=True)\n table_df.to_csv(file_name, index=False)\n return files", "def remove_col(df):\n df[\"bag_of_words\"] = \"\"\n columns = df.columns\n for index, row in df.iterrows():\n words = \"\"\n for col in columns:\n if col != \"Director\":\n words = words + \" \".join(row[col]) + \" \"\n else:\n words = words + row[col] + \" \"\n row[\"bag_of_words\"] = words\n df.drop(columns=[col for col in df.columns if col != \"bag_of_words\"], inplace=True)\n return df", "def remove_header(filename, packet_num):\n bin_data = np.fromfile(filename, dtype=np.int16)\n index = []\n for i in range(packet_num):\n j = i * 735\n index.append([j, j + 1, j + 2, j + 3, j + 4, j + 5, j + 6])\n output = np.delete(bin_data, index)\n return output", "def drop_columns(self, col):\n try:\n self.cleaned_data.drop(col, axis=1, inplace=True)\n except Exception as e:\n raise e", "def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return", "def __delitem__(self, index):\n # delete the column\n del self._data[index]\n\n # adjust the number of columns\n self._nrows -= 1", "def _clean_up_spectrometer_df_header(unformatted_spectrometer_df):\n\n timestamp_column, epoch_column = unformatted_spectrometer_df.columns[[0, 1]]\n spectrometer_df_with_clean_header = unformatted_spectrometer_df.rename(\n # Timestamp column is in local time, epoch_time column is in UTC\n columns={timestamp_column: \"timestamp\", epoch_column: \"epoch_time\"}\n )\n # 'epoch_time' column is redundant to 'timestamp' column. Remove it\n spectrometer_df_with_clean_header.drop([\"epoch_time\"], axis=1, inplace=True)\n\n return spectrometer_df_with_clean_header", "def remove_index(self):\n if \"@Index\" not in self.col_lines[0]:\n return\n\n while not self.col_lines[0].startswith('@Entity'):\n self.col_lines.pop(0)", "def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df", "def remove_columns(data, col_ids):\n return np.delete(data, col_ids, axis=1)", "def remove(self, from_line, from_col, to_line, to_col):\n assert from_line == to_line\n from_col = self.canonicalize_column_index(from_line, from_col)\n to_col = self.canonicalize_column_index(to_line, to_col)\n\n col_off = self.col_offs[from_line]\n adj_from_col = col_off.get_rewritten_pos(from_col)\n adj_to_col = col_off.get_rewritten_pos(to_col)\n theline = self.lines[from_line]\n self.lines[from_line] = theline[:adj_from_col] + theline[adj_to_col:]\n col_off.remove(from_col, to_col-from_col)", "def removeStudyActualColumn(self, study_id, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.remove_study_actual_column', [study_id, column_name])\n except Exception, e: \n raise Exception('Exception caught in removeStudyActualColumns(): %s.\\nThe error is: %s' % (type(e), e))", "def remove(table, id_):\n return common.remove_line(table, id_)", "def hide_invisible_headers(self):\n # Hide all the non selected columns\n col_index = 0\n for header in self.column_headers_all:\n if header in self.column_headers:\n self.csv_data_table.setColumnHidden(col_index, False)\n self.file_changed = True\n self.set_save_enabled(True)\n else:\n self.csv_data_table.setColumnHidden(col_index, True)\n col_index = col_index + 1", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def delcol(self, name, colname):\n if not self._open:\n raise ValueError(\"Operation on closed file\")\n if colname in self.grp[name].keys():\n del self.grp[name][colname]", "def delete_variable(self, columns):\n if not isinstance(columns, (list, tuple)):\n columns = [columns]\n for col in columns:\n if isinstance(col, str):\n col = [i for i, v in enumerate(self.list) if v.name == col][0]\n self.list.pop(col)", "def delete_col(A, delcol):\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n keeprows = arange(0, m)\r\n keepcols = delete(arange(0, n), delcol)\r\n return A[keeprows][:, keepcols]", "def remove_feature(self,colName):\n if isinstance(self.time_series_data,Time_Series_Data_Collection):\n for i in self.time_series_data:\n self.time_series_data[i].remove(colName)\n return self\n self.time_series_data.remove(colName)\n return self", "def delete_column(self, pos):\n for i in range(len(self._grid)):\n del self._grid[i][pos]", "def clean_headers(df):\n filtered_headers = [header.replace(\"'\",'').replace(' ', '').replace('(', '').replace(')', '').replace('.', '').replace('[', '').replace(']', '') for header in df.columns]\n map_to_new_headers = {}\n for i in range(len(df.columns)):\n map_to_new_headers[df.columns[i]] = filtered_headers[i]\n\n return df.rename(columns = map_to_new_headers)", "def remove_row(self):\n if len(self.columns[\"rows\"].children) > 0:\n self.selects.pop()\n self.button_groups.pop()\n self.buttons[\"edit\"].pop()\n self.columns[\"rows\"].children.pop()", "def scrub_headers(self, header_dict):\n return self.__headers_scrubber(header_dict)", "def remove(table, id_):\n\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def deselect (a_data,a_column) :\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data", "def __delitem__(self,key):\n self.table.delItem(key,self.column)", "def transform(self, X, y=None):\n return X.drop(columns=self.columns_to_remove)", "def remove(table, id_):\n # 3\n for index in range(len(table)):\n if table[index][0] == id_:\n table.pop(index)\n data_manager.write_table_to_file('accouting/items.csv', table)\n\n return table", "def remove_rows(self, rows, regroup=False):\n self.table.remove_rows(np.atleast_1d(rows))\n if regroup:\n for col in ['setup', 'calib', 'calibbit', 'comb_id', 'bkg_id']:\n if col in self.keys():\n del self.table[col]\n self.set_configurations()\n self.set_calibration_groups()\n self.set_combination_groups()", "def delItem(self,row,column):\n data = self.data\n if row in data and column in data[row]:\n del data[row][column]\n self.hasChanged = True", "def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)", "def drop(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name not in columns\n },\n self._mask,\n )", "def resetFilter(self, column):\n if self.hasFilter(column):\n column_name = self._dataframe.columns[column]\n del self._filters[column_name]\n self._applyFilters()", "def remove_tail(col_lines):\n while len(col_lines[-1]) < 2:\n col_lines.pop()", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile", "def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))", "def pop_header_name(\n row: list[Hashable], index_col: int | Sequence[int]\n) -> tuple[Hashable | None, list[Hashable]]:\n # Pop out header name and fill w/blank.\n if is_list_like(index_col):\n assert isinstance(index_col, Iterable)\n i = max(index_col)\n else:\n assert not isinstance(index_col, Iterable)\n i = index_col\n\n header_name = row[i]\n header_name = None if header_name == \"\" else header_name\n\n return header_name, row[:i] + [\"\"] + row[i + 1 :]", "def remove_colspan(self, ):\n if self.AttributeNames.COLSPAN in self.attrs:\n del self.attrs[self.AttributeNames.COLSPAN]\n return self", "def remove_file(self):\n selected_column = self.files_treeview.selection()\n\n if not selected_column:\n return\n self.files_treeview.delete(selected_column)\n treeview_items = self.files_treeview.get_children()\n if treeview_items:\n self.files_treeview.selection_set(treeview_items[-1])", "def pop_header(self):\n header = None\n if self._headers:\n header = self._headers.pop()\n self._set_message_length()\n\n return header", "def delete_move(self, column):\n row = 0\n # Find the first non-empty cell in the specified column\n while (row < self.height and self.data[row][column] == \" \"):\n row = row + 1\n\n # If the column is not empty, remove the top peg\n if (row != self.height):\n self.data[row][column] = \" \"", "def del_row(self, row_index):\n ...", "def delete_column(dataset):\r\n\t# transpose columns in rows\r\n\tdataset = zip(*dataset)\r\n\t# delete empty rows if any\r\n\tdataset = [row for row in dataset if any(row)]\r\n\t# transpose rows in columns to return a list of lists.\r\n\treturn [list(row) for row in zip(*dataset)]", "def deleteColumn(self, table: Table, column: Column,\n noLine, noColumn):\n database = SymbolTable().useDatabase\n if not database:\n desc = f\": Database not selected\"\n ErrorController().add(4, 'Execution', desc,\n noLine, noColumn)\n return\n\n dbStatement = data_mode.mode(database.mode).alterDropColumn(database.name.lower(),\n table.name.lower(), column.number)\n\n if dbStatement == 0:\n if column:\n table.remove(column)\n self.updateColumnIndex(table)\n self.writeFile()\n DataWindow().consoleText('Query returned successfully: Column deleted')\n return\n\n desc = f\": Column {column.name} does not exist\"\n ErrorController().add(26, 'Execution', desc, noLine, noColumn)\n\n elif dbStatement == 1:\n desc = f\": Can't update Table {table.name}\"\n ErrorController().add(34, 'Execution', desc, noLine, noColumn)\n\n elif dbStatement == 2:\n desc = f\": Database {database.name} does not exist\"\n ErrorController().add(35, 'Execution', desc, noLine, noColumn)\n\n elif dbStatement == 3:\n desc = f\": Table {table.name} does not exist\"\n ErrorController().add(27, 'Execution', desc, noLine, noColumn)\n\n elif dbStatement == 4:\n desc = f\": Column of relation {column.name} does not exist\"\n ErrorController().add(26, 'Execution', desc, noLine, noColumn)", "def clear_columns(prefixlist,datas):\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n ccc=[c.lower() for c in ccc]\n \n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas.rename(columns=d,inplace=True)\n\n u, i = np.unique(datas.columns, return_index=True)\n y=u[np.argsort(i)] \n \n r=[datas.columns.tolist().index(rr)for rr in y]\n\n return datas.iloc[:, r]", "def delTcline(self, line):\n self._checkfigure()\n ld = self._get_linedict(line)\n for vline in ld['vlines']:\n vline.remove()\n ld['vlines'] = []", "def drop_column(self, *columns):\n for column in columns:\n self.table.drop_column(column)\n\n return self", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def remove(table, id_):\n\n # your code\n\n key = common.check_for_key(id_,table)\n\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n table.pop(key)\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n #print(table)\n return table", "def processHeader(self, header=None, pdata=None):\n\t\tif self.invariantPData.writer and not self.invariantPData.headerOutputted:\n\t\t\tnewHeader = [\"outputID\", 'noOfOutliers', 'noOfNonMissing', 'outlierFraction', 'chiSqStat', 'chiSqMinusLogPvalue',\\\n\t\t\t\t\t\t'xMedianValue', 'yMedianValue', 'corr']\n\t\t\tself.invariantPData.writer.writerow(newHeader)\n\t\t\tself.invariantPData.headerOutputted = True", "def __delattr__(self, attr):\n s = cleanup_name(attr)\n try:\n self[self[\"__psvcolumnstracker__\"][attr]] = \"\"\n except KeyError:\n if attr in self.__delwhitelist__:\n super(Row, self).__delattr__(attr)\n else:\n keys = self[\"__psvcolumnstracker__\"].keys()\n if s in keys:\n raise AttributeError((\n \"{}{}\"\n .format(\n '\\'{}\\' has no attribute \\'{}\\''.format(\n type(self), attr),\n \". However, '{s}' is an existing condensed \".format(s=s) +\n \"column name. Only the condensed version is supported.\"\n .format(s=s)\n )))\n else:\n\n if attr in dir(self):\n raise AttributeError(\n msg.attribute_readonly.format(classname=self.__class__, attr=attr))\n else:\n raise AttributeError(msg.attribute_missing.format(\n type(self), attr))", "def deleteDistortionKeywords(hdr):\n # We need to use '.pop' to guard against the possibility, however remote,\n # that the keyword has already been removed before calling this function.\n for kw in DIST_KWS:\n hdr.pop(kw, None)\n\n # This can use 'del' since it will work even if the keywords\n # are missing altogether since the multi_kw uses wild-cards\n for multi_kw in DIST_MULTI_KWS:\n del hdr[multi_kw]", "def delete_columns(houses:pd.DataFrame)-> pd.DataFrame:\n drop_columns= ['NEXT OPEN HOUSE START TIME', 'NEXT OPEN HOUSE END TIME', \n 'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)',\n 'MLS#', 'FAVORITE', 'INTERESTED', 'LATITUDE', 'LONGITUDE',\n SOURCE, SALE_TYPE, CITY, STATE]\n houses= houses[houses[STATUS].isin(['Sold'])]\n houses= houses[houses[CITY].isin(['Irvine'])]\n return houses.drop(drop_columns, axis= 1)", "def _drop_cols(self, duplicate_cols):\n self._hybrid_meta.drop(\n duplicate_cols + DROPPED_COLUMNS,\n axis=1, inplace=True, errors='ignore'\n )", "def remove(table, id_):\n\n common.toremoveid(\"inventory/inventory.csv\",data_manager.get_table_from_file(\"inventory/inventory.csv\"),id_)", "def EliminateCols(self, cols):\n return _hypre.HypreParMatrix_EliminateCols(self, cols)", "def remove_data(self, remove_without_confirmation = False):\n if self.verbose > 0:\n print(\"SpectraTools.Hitran.remove_data()\") \n \n if not remove_without_confirmation:\n answer = input(\"Do you want to delete {:}? yes/no [no] \".format(self.tablename))\n print(answer)\n if answer != \"yes\":\n print(\"Removal of data was canceled by the user\")\n return 0\n\n hapi.dropTable(self.tablename)\n \n filepath = self.db_path.joinpath(pathlib.Path(\"{:s}.data\".format(self.tablename)))\n if filepath.is_file():\n os.remove(filepath)\n\n filepath = self.db_path.joinpath(pathlib.Path(\"{:s}.header\".format(self.tablename)))\n if filepath.is_file():\n os.remove(filepath)", "def data_deletion(data, columnX=\"time\", columnY=\"forceX\"):\n\n subset = data_selection(data, columnX, columnY)\n\n data = data.drop(subset.index)\n return data", "def del_field_pattern(self):\n self.ui.tableFields.removeRow(self.ui.tableFields.currentRow())", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols" ]
[ "0.7807759", "0.7441563", "0.71072763", "0.67735565", "0.6634205", "0.6594297", "0.6533182", "0.6480123", "0.6172724", "0.6143548", "0.6119587", "0.6100099", "0.60919523", "0.6055784", "0.60248214", "0.60123044", "0.60102904", "0.5948839", "0.5927248", "0.5872746", "0.5860796", "0.5850218", "0.58420604", "0.5832519", "0.5811461", "0.57704747", "0.5750552", "0.5731311", "0.5705728", "0.5679308", "0.56621796", "0.5650993", "0.5646481", "0.5632239", "0.5618907", "0.55947953", "0.5530277", "0.5529258", "0.55183035", "0.55010885", "0.5489142", "0.54780215", "0.5476371", "0.5465447", "0.5458805", "0.54525554", "0.54359156", "0.5432739", "0.5432102", "0.5430745", "0.5414758", "0.5400155", "0.5362935", "0.53424734", "0.533141", "0.53217584", "0.53159744", "0.53106374", "0.53060246", "0.5304711", "0.5304273", "0.5287351", "0.5286118", "0.5275413", "0.5264525", "0.5263583", "0.52616763", "0.5260898", "0.5241339", "0.5233695", "0.5232079", "0.52301496", "0.52290255", "0.5224378", "0.52210796", "0.52129817", "0.5208322", "0.52020216", "0.51934546", "0.51930875", "0.51853645", "0.51769274", "0.5171832", "0.5166001", "0.51552296", "0.5151939", "0.51332915", "0.5128326", "0.5127503", "0.51192296", "0.511656", "0.5097252", "0.50958735", "0.5091694", "0.5091569", "0.5088428", "0.50880384", "0.5086917", "0.5084518", "0.50812566" ]
0.80928975
0
Adds the specified column header back into the header chain Also adds all rows that this column removed back in
def unremove_col(self, col_header): # Add the column head back into the chain col_header.right.left = col_header col_header.left.right = col_header # Loop up through the column and add the rows back in # Doing this in exactly the reverse order of the removing ensures that we return # to the state we were in before the removal cell = col_header.up while cell != col_header: row_cell = cell.left # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell row_cell.up.down = row_cell row_cell.header.sum += 1 # Move on to the next cell in the row row_cell = row_cell.left # Move on to the next row cell = cell.up
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_col(self, col_header):\n # Remove the column header from the header chain\n col_header.right.left = col_header.left\n col_header.left.right = col_header.right\n # Loop down through the column and remove the rows\n cell = col_header.down\n while cell != col_header:\n row_cell = cell.right\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell.up\n row_cell.up.down = row_cell.down\n row_cell.header.sum -= 1\n # Move on to the next cell in the row\n row_cell = row_cell.right\n # Move on to the next row\n cell = cell.down", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def RemoveColumn(self, column):\r\n\r\n self._header_win.RemoveColumn(column)\r\n self._header_win.Refresh()", "def add_header(self, *column_headers):\n raise NotImplementedError", "def add_header(self, *column_headers):\n header = \"| \"\n header += \" | \".join(column_headers)\n header += \" |\\n\"\n header += '|'\n header += \"|\".join(\"-\" * (len(header) + 2) for header in column_headers)\n header += \"|\\n\"\n self.col_widths = [len(header) for header in column_headers]\n self.result += header", "def add_header(self, *column_headers):\n header = \"<tr>\"\n header += \" \".join(f\"<th>{header}</th> \" for header in column_headers)\n header += \"</tr>\\n\"\n self.result += header", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def depart_thead(self, node):\n self.Table.add_header_line(\"|\")", "def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers", "def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment", "def addcolumn(self, column):\n if column not in self.headersindex:\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"ALTER TABLE \\'%s\\' ADD COLUMN %s\" % (self.name, column.to_declaration()))", "def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1", "def _augment_filter(self, header):\n return header", "def add_headers(dataset, headers):\n dataset.columns = headers\n return dataset", "def add_headers(dataset, headers):\n dataset.columns = headers\n return dataset", "def _configure_bintable_header(new_header, table_headers):\n\n # Using a single header to get the column descriptions\n column_info = {}\n for kwd in table_headers[0]:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = table_headers[0][kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n cards = []\n for att in ['TTYPE', 'TFORM', 'TUNIT', 'TDISP', 'TDIM']:\n try:\n cards.append(table_headers[0].cards[att+num])\n except KeyError:\n pass # if we don't have info for this keyword, just skip it\n \n column_info[colname] = (num, cards)\n\n # Adding column descriptions and additional info\n for kwd in new_header:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = new_header[kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n info_row = column_info.get(colname)\n if not info_row:\n new_header.comments[kwd] = 'column name'\n new_header.comments[kwd.replace(\"TTYPE\", \"TFORM\")] = 'column format'\n continue\n \n info_num = info_row[0]\n cards = info_row[1]\n \n for key, val, desc in cards:\n key_new = key.replace(info_num, num)\n try:\n ext_card = new_header.cards[key_new]\n \n if ext_card[1]:\n val = ext_card[1]\n if ext_card[2]:\n desc = ext_card[2]\n \n new_header[key_new] = (val, desc)\n except KeyError: # card does not already exist, just add new one\n new_header.set(key_new, val, desc, after=kwd)\n\n # Adding any additional keywords from the original cutout headers\n shared_keywords = _combine_headers(table_headers, constant_only=True)\n for kwd in shared_keywords:\n if kwd in new_header: # Don't overwrite anything already there\n continue\n\n if any(x in kwd for x in [\"WCA\", \"WCS\", \"CTY\", \"CRP\", \"CRV\", \"CUN\",\n \"CDL\", \"11PC\", \"12PC\", \"21PC\", \"22PC\"]): # Skipping column WCS keywords\n continue\n\n new_header.append(shared_keywords.cards[kwd])", "def _modify_columns(self, cols, X, y=None):", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def _change_header(self, add=False):\n if self.data['history_file'] is None:\n return\n good_heading = self.data['history_header'] % self.data\n # ^^^ history_header is a string with %(abc)s replacements.\n headings = self.data['headings']\n history_lines = self.data['history_lines']\n previous = ''\n underline_char = '-'\n empty = False\n if not history_lines:\n # Remember that we were empty to start with.\n empty = True\n # prepare header line\n history_lines.append('')\n if len(history_lines) <= 1:\n # prepare underline\n history_lines.append(underline_char)\n if not headings:\n # Mock a heading\n headings = [{'line': 0}]\n inject_location = 0\n first = headings[0]\n inject_location = first['line']\n underline_line = first['line'] + 1\n try:\n underline_char = history_lines[underline_line][0]\n except IndexError:\n logger.debug(\"No character on line below header.\")\n underline_char = '-'\n previous = history_lines[inject_location]\n if add:\n inject = [\n good_heading,\n underline_char * len(good_heading),\n '',\n self.data['nothing_changed_yet'],\n '',\n '',\n ]\n if empty:\n history_lines = []\n history_lines[inject_location:inject_location] = inject\n else:\n # edit current line\n history_lines[inject_location] = good_heading\n logger.debug(\"Set heading from %r to %r.\", previous, good_heading)\n history_lines[underline_line] = utils.fix_rst_heading(\n heading=good_heading,\n below=history_lines[underline_line])\n logger.debug(\"Set line below heading to %r\",\n history_lines[underline_line])\n # Setting history_lines is not needed, except when we have replaced the\n # original instead of changing it. So just set it.\n self.data['history_lines'] = history_lines", "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True", "def pop_header_name(\n row: list[Hashable], index_col: int | Sequence[int]\n) -> tuple[Hashable | None, list[Hashable]]:\n # Pop out header name and fill w/blank.\n if is_list_like(index_col):\n assert isinstance(index_col, Iterable)\n i = max(index_col)\n else:\n assert not isinstance(index_col, Iterable)\n i = index_col\n\n header_name = row[i]\n header_name = None if header_name == \"\" else header_name\n\n return header_name, row[:i] + [\"\"] + row[i + 1 :]", "def fix_header(infile, outfile, colnum):\n\n with open(infile, mode='r') as fid:\n colnum -= 1 # adj. colnum to account for zero-based indexing\n cread = csv.reader(fid)\n ctr = 0\n\n with open(outfile, mode='w') as new_file:\n cwrite = csv.writer(new_file)\n\n for row in cread:\n if ctr==0:\n # we're in the header\n outrow = row[:colnum] + [stamp2iso(elem) for elem in row[colnum:]]\n ctr += 1\n else:\n outrow = row\n cwrite.writerow(outrow)", "def _postprocess_name_columns(\n table: pyarrow.Table, has_header: bool, settings: Settings\n) -> Tuple[pyarrow.Table, List[I18nMessage]]:\n if has_header and table.num_rows > 0:\n names, warnings = gen_unique_clean_colnames_and_warn(\n list((c[0].as_py() if c[0].is_valid else \"\") for c in table.columns),\n settings=settings,\n )\n\n # Remove header (zero-copy: builds new pa.Table with same backing data)\n table = table.slice(1)\n else:\n names = [f\"Column {i + 1}\" for i in range(len(table.columns))]\n warnings = []\n\n return (\n pyarrow.table(dict(zip(names, table.columns))),\n warnings,\n )", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def parseColHeader(self, i, j) :\n cell_content = self.processString(self.source_cell.value)\n if self.isEmpty(i,j):\n if self.insideMergeBox(i,j):\n k, l = self.getMergeBoxCoord(i,j)\n \n # If we are in a vertical merge box, skip adding the dimension\n if l == j:\n return\n\n # Update cell content \n cell_content = self.processString(self.r_sheet.cell(k,l).value)\n else:\n return\n\n # Add the value qname to the column_dimensions list for that column\n self.column_dimensions.setdefault(j,[self.sheet_qname]).append(cell_content)\n \n # Add the data to the graph\n resource = self.getColHeaderValueURI(self.column_dimensions[j])\n self.graph.add((resource, RDF.type, self.namespaces['tablink']['ColumnHeader']))\n self.graph.add((resource, self.namespaces['skos']['prefLabel'], Literal(cell_content)))\n self.graph.add((resource, self.namespaces['tablink']['cell'], Literal(self.source_cell_name)))\n return", "def addheader(datasets):\n header = get_header()\n for i in range(0, len(datasets)):\n datasets[i].columns = header\n return datasets", "def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)", "def RemoveColumn(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n self._total_col_width -= self._columns[column].GetWidth()\r\n self._columns.pop(column)\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True", "def clear_columns(self):\n self._columns = []\n return self", "def set_column_headers(self,param_headers):\n self.cur_quotes_parm_headers = param_headers", "def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)", "def add_new_column(dataframe, column_name):\r\n dataframe[column_name] = \"\"\r\n return dataframe", "def set_headers(ws):\r\n for column in range(1, 1 + len(headers)): # parse through each column in the first row\r\n ws.cell(row=1, column=column).value = headers[column - 1] # add corresponding header value to the Excel file\r", "def appforth(df, line):\n df.loc[-1]=line\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n return df", "def processHeader(self, header=None, pdata=None):\n\t\tif self.invariantPData.writer and not self.invariantPData.headerOutputted:\n\t\t\tnewHeader = [\"outputID\", 'noOfOutliers', 'noOfNonMissing', 'outlierFraction', 'chiSqStat', 'chiSqMinusLogPvalue',\\\n\t\t\t\t\t\t'xMedianValue', 'yMedianValue', 'corr']\n\t\t\tself.invariantPData.writer.writerow(newHeader)\n\t\t\tself.invariantPData.headerOutputted = True", "def remove_column(self, name):\n if name not in self.column_names():\n raise KeyError('Cannot find column %s' % name)\n self.__is_dirty__ = True\n try:\n with cython_context():\n if self._is_vertex_frame():\n assert name != '__id', 'Cannot remove \\\"__id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)\n self.__graph__.__proxy__ = graph_proxy\n elif self._is_edge_frame():\n assert name != '__src_id', 'Cannot remove \\\"__src_id\\\" column'\n assert name != '__dst_id', 'Cannot remove \\\"__dst_id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)\n self.__graph__.__proxy__ = graph_proxy\n except:\n self.__is_dirty__ = False\n raise", "def _delcolumns(self, columnname, columndata=\"\"):\n\n del self[columnname]", "def clear_header(self):\n\n if self.terminate:\n return\n\n self.windows['HEADER'].erase()\n # if not self.active_portfolio:\n self.windows['HEADER'].addstr(0, 0, 'Portfolio: None')", "def delete_column(self, col_id):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n column = None\n for col in columns:\n if columns[col].id == col_id:\n column = columns[col]\n break\n if not column:\n return False\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == col_id:\n continue\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` SELECT %(newcols)s FROM '%(oldtablename)s';\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(column.name)\n managers.request_manager.get_request().session().value(\"columns\", columns)\n return True", "def extend_headers(self, fragment):\r\n\r\n self.header_b.append(fragment)", "def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)", "def add_custom_headers(self, headers):\n headers_to_remove = [x for x in headers if x.lower() in [y.lower() for y in self.headers]]\n for header in headers_to_remove:\n headers.pop(header, None)\n headers.update(self.headers)", "def prepend_header(rendered_header):\n debug(\"adding header\")\n _range = CURRENT_BUFFER.range(0, 0)\n _range.append(rendered_header.split(\"\\n\"))", "def update_header():\n print_debug_info()\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not (has_header() or suffix_is_supported()):\n # This file do not have a header, or it's format is unknown, quit.\n debug(\"cannot add header to a script of unknown format.\")\n return\n\n # if current buffer is not modified, do not bother to update it's date.\n if not modified():\n debug(\"Buffer not modified, just quit\")\n return\n\n row, column = vim.current.window.cursor\n header_template = globals().get(\"%s_header\" % SUFFIX).rstrip()\n\n # if line has the keyword, find the current for the keyword, get the line, re-render it and fill it in.\n head = CURRENT_BUFFER[:10]\n\n more_updates = vim.eval(\"g:BHUpdates\")\n\n update = {\n 'Maintained by': AUTHOR,\n 'Modified by': AUTHOR,\n 'Last modified': datetime.now().strftime(\"%Y-%m-%d %H:%M\"),\n 'Filename': FILENAME,\n }\n update.update(more_updates)\n for index, line in enumerate(head):\n for keyword in update:\n if line.find(keyword) != -1:\n original_line = [_line for _line in header_template.splitlines() if _line.find(keyword) != -1]\n if original_line:\n original_line = original_line[0]\n else:\n continue\n debug(\"original line: %s\" % original_line)\n debug(\"line to be replaced: %s\" % line)\n rendered_line = original_line % {KEYWORDS[keyword]: update[keyword]}\n debug(\"rendered line: %s\" % rendered_line)\n CURRENT_BUFFER[index] = rendered_line\n\n vim.current.window.cursor = (row, column)", "def _add_column(self, column):\n if column is None:\n column = len(self._columns)\n\n if column in self._columns:\n raise ValueError(f\"Duplicate column name: {column}\")\n\n if isinstance(column, int):\n assert column >= len(self._columns)\n for empty in range(len(self._columns), column):\n self._add_column(empty)\n\n self._columns.append(column)\n for idx in self.index:\n row = self._data[idx]\n row.append(None)\n\n return len(self._columns) - 1", "def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def get_header(col_current, col_shift):\n header = col_current\n for i in range(col_shift):\n header = header.right\n return header", "def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):\n # If we're reading just a single cell in speech, the new\n # header portion is going to give us this information.\n #\n if args['mode'] == 'speech' and not args.get('readingRow', False):\n return []\n\n result = []\n descendant = self._script.utilities.realActiveDescendant(obj)\n label = self._script.utilities.displayedText(descendant)\n if not label and self._script.utilities.hasMeaningfulToggleAction(obj):\n accHeader = self._script.utilities.columnHeaderForCell(obj)\n result.append(accHeader.name)\n return result", "def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable):\n empty_flag = (numerical_subtable == '').mean(1) == 1\n empty_rows = list(np.where(empty_flag)[0])\n non_empty_rows = np.where(~empty_flag)[0]\n if len(empty_rows) > 0:\n if empty_rows[-1] != len(row_headers):\n empty_rows.append(len(row_headers))\n all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)]\n for i, append_rows in zip(empty_rows, all_append_rows):\n for append_row in append_rows:\n row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row]\n row_headers = [row_headers[i] for i in non_empty_rows]\n numerical_subtable = numerical_subtable[non_empty_rows]\n return row_headers, numerical_subtable", "def remove_header( self, *names ):\n for name in names:\n del self[ name.strip() ]", "def cols(self, col):\n self.col += col", "def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def new_run_header(self, changed):\n self.header = changed['value']", "def add_header(self, delta, header):\n\n if not delta or not header:\n return\n\n header_line = f\"{header}\\n\"\n\n delta.insert(0, header_line)", "def del_header_value(old_rmap, new_rmap, key):\n mapping = rmap.load_mapping(old_rmap)\n del mapping.header[key]\n mapping.write(new_rmap)", "def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)", "def deleteColumn(self, column):\n if (column >= self._width or column <= -self._width):\n raise IndexError('Invalid index, row %d does not exist' % column)\n returnvalue = list()\n self._width -= 1\n for row in self._value:\n returnvalue.append(row.pop(column))\n return returnvalue", "def removeRow(self, index: int) -> None:\n ...", "def close_thead(self) -> str:\n self.html_table = self.html_table + \"\"\"</thead>\\n\"\"\"\n return self.html_table", "def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)", "def close_cell_head(self) -> str:\n self.html_table = self.html_table + \"\"\"</th>\\n\"\"\"\n return self.html_table", "def del_column(self, fieldname):\n ...", "def delcolumn(self, column, accept_small_names=True):\n if column in self.keys():\n self[column] = \"\"\n return\n elif accept_small_names:\n if self[\"__psvcolumnstracker__\"].get(column):\n self.__delattr__(column)\n return\n if not accept_small_names:\n raise ValueError(\"'{}'\".format(column))\n else:\n raise ValueError(\"'{}'. Make sure the shorterned columns name have no collisions\".format(column))", "def modify_bam_header(self, in_bam, out_bam):\n #bam_header = pysam.Samfile(in_bam,'rb',check_header=False, check_sq=False).header\n bam_header_raw = pysam.Samfile(in_bam,'rb',check_header=False, check_sq=False).text.replace(\"\\t\\n\",\"\\n\")\n temp_header = in_bam + \".tempheader\"\n with open(temp_header ,\"w\") as f:\n f.write(bam_header_raw)\n\n bam_header = pysam.Samfile(temp_header,'r', check_header=False, check_sq=False).header\n sample_id = os.path.basename(in_bam).replace(\".pre.bam\", \"\")\n try:\n original_SM = list(set([x[\"SM\"] for x in bam_header[\"RG\"]]))[0]\n except:\n raise PipelineException(\"@RG header line not found in %s!\" % bam_in)\n\n # make sure SM tags in RG line are consistent with sample_id\n rgs = copy.copy(bam_header[\"RG\"])\n bam_header[\"RG\"] = []\n for rg in rgs:\n rg[\"SM\"] = sample_id\n bam_header[\"RG\"].append(rg)\n\n # save original SM tage\n if \"CO\" not in bam_header:\n bam_header[\"CO\"] = [\"Original RG/SM tag: %s\" % original_SM]\n else:\n bam_header[\"CO\"].append(\"Original RG/SM tag: %s\" % original_SM)\n\n # write out header\n header_filename = self.as_temp(\"%s.header\" % in_bam)\n header_file = pysam.Samfile(header_filename, 'wh', header=bam_header)\n header_file.close()\n\n self.cmd(\"{samtools} reheader \\\n {header_file} \\\n {in_bam} > {out_bam}\"\n .format(\n samtools = self.cmds[\"samtools\"],\n in_bam=in_bam,\n out_bam=out_bam,\n header_file=header_filename,\n ),\n shell=True)\n\n self.rm(in_bam)", "def insert_column_headers_for_outlier_correction(\n self, data_df: pd.DataFrame, new_headers: List[str], filepath: str\n ) -> pd.DataFrame:\n\n if len(new_headers) != len(data_df.columns):\n difference = int(len(data_df.columns) - len(new_headers))\n bp_missing = int(abs(difference) / 3)\n if difference < 0:\n raise DataHeaderError(\n msg=f\"SIMBA ERROR: SimBA expects {len(new_headers)} columns of data inside the files within project_folder/csv/input_csv directory. However, within file {filepath} file, SimBA found {len(data_df.columns)} columns. Thus, there is {abs(difference)} missing data columns in the imported data, which may represent {int(bp_missing)} bodyparts if each body-part has an x, y and p value. Either revise the SimBA project pose-configuration with {bp_missing} less body-part, or include {bp_missing} more body-part in the imported data\"\n )\n else:\n raise DataHeaderError(\n msg=f\"SIMBA ERROR: SimBA expects {len(new_headers)} columns of data inside the files within project_folder/csv/input_csv directory. However, within file {filepath} file, SimBA found {len(data_df.columns)} columns. Thus, there is {abs(difference)} more data columns in the imported data than anticipated, which may represent {int(bp_missing)} bodyparts if each body-part has an x, y and p value. Either revise the SimBA project pose-configuration with {bp_missing} more body-part, or include {bp_missing} less body-part in the imported data\"\n )\n else:\n data_df.columns = new_headers\n return data_df", "def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)", "def prepend_header(filename, header=None, drop=0):\n for no, line in enumerate(fileinput.input(filename, inplace=True)):\n # it's meaningless to set drop to -1, -2, ...\n if no == 0 and drop == 0:\n if header:\n print(header)\n print(line, end='')\n # replace\n elif no + 1 == drop:\n if header:\n print(header)\n elif no >= drop:\n print(line, end='')\n else:\n # no + 1 < drop\n continue", "def modify_header():\n\n print_debug_info()\n if not bool(int(vim.eval(\"g:BHModify\"))):\n return\n\n if not should_do_write():\n debug(\"should not write this buffer.\")\n return\n\n if not has_header():\n debug(\"This file has no header.\")\n return add_header()\n\n # only if the suffix is supported and we have a method to strip the comment.\n if not ((\"extract_comment_%s\" % SUFFIX) in globals() and suffix_is_supported()):\n return\n\n comment = globals()[\"extract_comment_%s\" % SUFFIX]()\n debug(\"comment: %s\" % str(comment))\n if not comment:\n debug(\"comment is empty\")\n return\n\n comment_dict = {}\n\n if len(comment) < 3:\n # Less than 3 lines of original comment, put them in Description part.\n comment_dict['Description'] = '\\n'.join(comment)\n else:\n comment_dict = read_comment(comment)\n if \"\" in comment_dict:\n del comment_dict[\"\"]\n new_header_dict = read_comment(globals().get(\"%s_header\" % SUFFIX).rstrip().splitlines())\n debug(\"new\")\n debug(set(new_header_dict.keys()))\n debug(set(comment_dict.keys()))\n debug(\"end\")\n if not set(new_header_dict.keys()) == set(comment_dict.keys()):\n return prepend_header(render_header(comment_dict))\n else:\n debug(\"do not modify header since we already have the same header.\")", "def reformat_csv_header(self, path, train_file, test_file):\n\n \"\"\"\n \"id\",\"comment_text\",\"toxic\",\"severe_toxic\",\"obscene\",\"threat\",\"insult\",\"identity_hate\"\n \"\"\"\n\n train = pd.read_csv(os.path.join(path, train_file))\n test = pd.read_csv(os.path.join(path, test_file))\n train = train.drop('id', axis=1)\n test = test.drop('id', axis=1)\n for label in [\"jobflag\"]:\n test[label] = pd.Series(0, index=test.index)\n temp_path = os.path.join(path, \"temp\")\n if not os.path.isdir(temp_path):\n os.mkdir(temp_path)\n train.to_csv(os.path.join(temp_path, train_file),\n index=False, header=False)\n test.to_csv(os.path.join(temp_path, test_file),\n index=False, header=False)\n return temp_path", "def SetColumn(self, column, colInfo):\r\n\r\n self._header_win.SetColumn(column, colInfo)\r\n self._header_win.Refresh()", "def remove_head_line(self, gtfs_file, path):\n out_list = []\n header = GtfsHeader.return_header(self, gtfs_file).strip()\n in_file = os.path.join(os.path.expanduser(path), '{}.tmp'.format(gtfs_file))\n\n lines = open(in_file).readlines()\n cnt = 0\n for line in lines:\n if header in line:\n cnt += 1\n print('>>> Found header {} in {}.'.format(cnt, gtfs_file))\n lines.remove(line)\n # out_list.append(header.strip())\n\n for line in lines:\n out_list.append(line.strip())\n out_file = in_file\n\n f = open(out_file, 'w')\n for line in out_list:\n f.write('{}\\n'.format(line.strip()))\n f.close()", "def header(self):\n ...", "def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out", "def _add_directive_header_no_object_base(self, *args, **kwargs):\n self.add_line = _add_line_no_object_base.__get__(self)\n\n result = add_directive_header(self, *args, **kwargs)\n\n del self.add_line\n\n return result", "def WriteHeader(self):\n return", "def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0", "def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)", "def header(self, hdata):\n self = self\n file = open(\"imdb_output.csv\", \"w\")\n file.write(str(\",\".join(hdata)) + \"\\n\")", "def remove_column(self, pos, labels=\"REMOVE\"):\n MutableAlignment.remove_column(self, pos)\n if labels == \"RESET\":\n self._reset_col_names()\n elif labels == \"REMOVE\":\n self._col_labels = self._col_labels[:pos] + \\\n self._col_labels[pos + 1:]", "def add_header(self, header_name):\n self.check_header(header_name)\n if header_name not in self.header:\n self.header.append(header_name)\n return", "def reorder_columns(df,first_cols=['']):\n\n last_cols = [col for col in df.columns if col not in first_cols]\n df = df[first_cols+last_cols]\n return(df)", "def writeHeader(self,header):\n pass", "def finalize(self, col):\n\t\traise NotImplementedError()", "def test_negative_index(self):\n # TODO: change Exception\n with self.assertRaises(Exception):\n self.test_table.change_header(Path=-1)", "def insert_header_reference(self, header, reffile):\n if self._rmap_update_headers:\n # Generate variations on header as needed to emulate header \"pre-conditioning\" and fall back scenarios.\n for hdr in self._rmap_update_headers(self, header):\n new = self.insert(hdr, reffile)\n else:\n # almost all instruments/types do this.\n new = self.insert(header, reffile)\n return new", "def add_post_join_computed_column(self, computed_column):\n self.obj_payload[\"computedColumns\"].append(computed_column)", "def remove_header(self, name, value=None):\r\n\r\n found_it = 0\r\n\r\n # Remove things from the old dict as well\r\n if (name in self.reply_headers and\r\n (value is None or\r\n self.reply_headers[name] == value)):\r\n del self.reply_headers[name]\r\n found_it = 1\r\n\r\n\r\n removed_headers = []\r\n if not value is None:\r\n if (name, value) in self.__reply_header_list:\r\n removed_headers = [(name, value)]\r\n found_it = 1\r\n else:\r\n for h in self.__reply_header_list:\r\n if h[0] == name:\r\n removed_headers.append(h)\r\n found_it = 1\r\n\r\n if not found_it:\r\n if value is None:\r\n search_value = \"%s\" % name\r\n else:\r\n search_value = \"%s: %s\" % (name, value)\r\n\r\n raise LookupError(\"Header '%s' not found\" % search_value)\r\n\r\n for h in removed_headers:\r\n self.__reply_header_list.remove(h)", "def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)", "def removekwd(header, kwd):\n if kwd in header.keys():\n header.remove(kwd)\n return", "def add_column(self, tap_column):\r\n self.__columns.append(tap_column)", "def format_report_header(self):", "def remove_columns ( infilename, outfilename, cols_to_remove ):\n xcols = cols_to_remove\n xcols.sort()\n xcols.reverse()\n \n reader = csv.reader( open( infilename, 'rt' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer = csv.writer( open( outfilename, 'wb' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n vals = row\n for x in xcols :\n vals.pop( x )\n writer.writerow( vals )", "def write_header_line(bag, output_file, topic_name):\n header_column_names = []\n\n \"\"\" Use the first message from a topic to build the header line. Note that this\n assumes the first message has all of the fields fully defined\n \"\"\"\n for _, msg, _ in bag.read_messages(topics=topic_name):\n get_field_names('', msg, header_column_names)\n break\n\n \"\"\" Alphabetize and write the column names to the output file, minus the leading underscore \"\"\"\n header_column_names.sort()\n trimmed_names = [col[1:] for col in header_column_names]\n header_line = ','.join(trimmed_names) + '\\n'\n output_file.write(header_line)\n\n return header_column_names", "def _rearrange_columns(self, df):\n if self.all_columns is None:\n content_columns = [c for c in df.columns if not c.startswith(\"_\")]\n indicator_columns = [\"__in_{}\".format(t) for t in self.table_names\n ] if self.add_full_join_indicators else []\n fanout_columns = _get_fanout_columns(\n self.table_info) if self.add_full_join_fanouts else []\n self.all_columns = content_columns + indicator_columns + fanout_columns\n df = df[self.all_columns]\n if not self.disambiguate_column_names:\n df.columns = [\n c if c.startswith(\"_\") else c.split(\":\")[1] for c in df.columns\n ]\n return df", "def OnPaint(self, event):\r\n \r\n if self._buffered:\r\n dc = wx.BufferedPaintDC(self)\r\n else:\r\n dc = wx.PaintDC(self)\r\n \r\n self.PrepareDC(dc)\r\n self.AdjustDC(dc)\r\n\r\n x = 0\r\n\r\n # width and height of the entire header window\r\n w, h = self.GetClientSize()\r\n w, dummy = self._owner.CalcUnscrolledPosition(w, 0)\r\n dc.SetBackgroundMode(wx.TRANSPARENT)\r\n\r\n numColumns = self.GetColumnCount()\r\n \r\n for i in xrange(numColumns):\r\n\r\n if x >= w:\r\n break\r\n \r\n if not self.IsColumnShown(i):\r\n continue # do next column if not shown\r\n\r\n params = wx.HeaderButtonParams()\r\n\r\n column = self.GetColumn(i)\r\n params.m_labelColour = column.GetColour()\r\n params.m_labelFont = column.GetFont()\r\n\r\n wCol = column.GetWidth()\r\n flags = 0\r\n rect = wx.Rect(x, 0, wCol, h)\r\n x += wCol\r\n\r\n if i == self._hotTrackCol:\r\n flags |= wx.CONTROL_CURRENT\r\n \r\n params.m_labelText = column.GetText()\r\n params.m_labelAlignment = column.GetAlignment()\r\n\r\n image = column.GetImage()\r\n imageList = self._owner.GetImageList()\r\n\r\n if image != -1 and imageList:\r\n params.m_labelBitmap = imageList.GetBitmap(image)\r\n\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect, flags, params)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect, flags,\r\n wx.HDR_SORT_ICON_NONE, params)\r\n \r\n # Fill up any unused space to the right of the columns\r\n if x < w:\r\n rect = wx.Rect(x, 0, w-x, h)\r\n if self._headerCustomRenderer != None:\r\n self._headerCustomRenderer.DrawHeaderButton(dc, rect)\r\n else:\r\n wx.RendererNative.Get().DrawHeaderButton(self, dc, rect)" ]
[ "0.71142775", "0.6889686", "0.65644413", "0.6336859", "0.608312", "0.60671866", "0.6046899", "0.6015363", "0.5942229", "0.58050436", "0.5728618", "0.57080615", "0.5686853", "0.5644749", "0.55979675", "0.5594624", "0.5594624", "0.5585846", "0.5558042", "0.5540971", "0.55384934", "0.54837596", "0.5465062", "0.5460001", "0.5458509", "0.5451826", "0.5445517", "0.541178", "0.5411285", "0.5401584", "0.539385", "0.53870094", "0.53860945", "0.53831947", "0.533575", "0.53356755", "0.5293002", "0.5277476", "0.5266694", "0.52584785", "0.525601", "0.52540994", "0.52400815", "0.52322084", "0.5221584", "0.5218455", "0.5210153", "0.5204357", "0.52043283", "0.51938534", "0.51601946", "0.51554567", "0.5152932", "0.51486534", "0.5118421", "0.5110593", "0.5104592", "0.5085248", "0.50788796", "0.5065402", "0.50617856", "0.50597596", "0.50568384", "0.5054209", "0.5049796", "0.5041277", "0.5036726", "0.5033394", "0.5033166", "0.50247437", "0.50234723", "0.5016476", "0.49999768", "0.4983714", "0.4976832", "0.49753463", "0.497459", "0.49618348", "0.49542505", "0.49536222", "0.49530077", "0.49488437", "0.4947173", "0.49277034", "0.49268717", "0.48915595", "0.4878379", "0.4875588", "0.486942", "0.48664823", "0.4865367", "0.48638704", "0.48630366", "0.48574907", "0.48572588", "0.48555133", "0.4850113", "0.48452514", "0.48433608", "0.48388752" ]
0.73550874
0
Find the column that has the minimum number of cells in it to minimize branching Returning a column with 0 cells in it is ok this gets dealt with in the solving loop
def get_minimum_column(self): min_col = self.root.right current_col = min_col.right while current_col != self.root: if current_col.sum < min_col.sum: min_col = current_col # Move on to the next column current_col = current_col.right return min_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_reduce_nb(col, a, *args):\n return np.nanmin(a)", "def find_smallest(self):\n # add max value to covered rows and columns to ignore the covered cells\n maxval = self.C.max()\n C = self.C + self.row_cover[:, np.newaxis]*maxval\n C += self.col_cover*maxval\n # return the smallest value\n return C.min()", "def minimize(self, grid):\n self.deep += 1\n cells = grid.getAvailableCells()\n if cells == [] or self.deep > self.maxDeep:\n self.deep -= 1\n return self.evaluate(grid)\n\n ab_value = MiniMaxAlgorithm.infinity\n for cell in cells:\n for cell_value in self.possibleNewTiles:\n next_grid = grid.clone()\n next_grid.setCellValue(cell, cell_value)\n next_value = self.maximize(next_grid)\n ab_value = min(ab_value, next_value)\n if ab_value <= next_value:\n self.deep -= 1\n return ab_value\n\n self.deep -= 1\n return ab_value", "def cell_cost(row_count, col_count):\r\n while row_count < n_rows:\r\n if col_count >= n_cols:\r\n row_count += 1\r\n col_count = 0\r\n else:\r\n cost = grid[row_count][col_count]\r\n if row_count != 0:\r\n values = []\r\n for i in range(-1, 2):\r\n if col_count + i > -1 and col_count + i < n_cols:\r\n values.append(grid[row_count - 1][col_count + i])\r\n cost += min(values)\r\n grid[row_count][col_count] = cost\r\n col_count += 1", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_first_free_cell(board, picked_column):\n for row in reversed(range(len(board))):\n if board[row][picked_column] == 0:\n return row", "def get_smallest_h_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n return min(node_list, key=lambda x: x.h_cost)", "def minimum_f_cell(self):\n return sorted(self.open_cells,key = lambda cell: cell.f)[0]", "def min_number(self, rows: List[Row], column: NumberColumn) -> Number:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return 0.0 # type: ignore\n if not all([isinstance(value, Number) for value in cell_values]):\n raise ExecutionError(f\"Invalid values for number selection function: {cell_values}\")\n return min(cell_values) # type: ignore", "def fit(self, col):\n self.find_max_min(col)", "def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def get_min(self):\n min_value= self.df[self.col_name].min()\n return min_value", "def get_nearest_col(self):\n return (self.rect.left - (self.screen.get_width() // 5)) // self.maze.block_size", "def cell_cost(row, col):\r\n if row < 0 or row >= n_rows or col < 0 or col >= n_cols:\r\n return INFINITY # Off grid cells are treated as infinities\r\n elif cashe[row][col] is None:\r\n cost = grid[row][col]\r\n if row != 0:\r\n doom = [cell_cost(row - 1, col + delta_col) for delta_col in range(-1, 2)]\r\n cost += min(doom)\r\n cashe[row][col] = cost\r\n return cashe[row][col]\r\n else:\r\n return cashe[row][col]", "def get_column_with_min_value(data):\n if not isinstance(data, pd.DataFrame):\n raise TypeError('Invalid input type: type(data) = {}'.format(type(data)))\n min_col_name = pd.Series(index=data.index)\n for idx, row in data.iterrows():\n min_col_name[idx] = row.argmin()\n return min_col_name", "def customMin(x,mergedSegments, minValidData = 0.8):\n if mergedSegments.loc[x].nonNullProp >= minValidData : \n return np.inf\n\n idx = min(criteriaMatrix.get(x),\n key=lambda y : np.inf if y not in inversedIndex.values\n else criteriaMatrix.get(x).get(y)\n )\n return np.inf if idx not in inversedIndex.values else criteriaMatrix.get(x).get(idx)", "def get_smallest_f_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n min_f_cost_node = min(node_list, key=lambda x: x.g_cost)\n min_f_cost_list = []\n for column in self.grid:\n for node in column:\n if (\n node.f_cost == min_f_cost_node.f_cost\n and node.pos in self.unvisited_pos\n ):\n min_f_cost_list.append(node)\n return min_f_cost_node, len(min_f_cost_list)", "def find_job_smallest_colset():\r\n smallest_colset_value = None\r\n smallest_colset_key = \"\"\r\n smallest_colset_length = 99999\r\n\r\n # iterate over all tasks and find smallest\r\n for key in r.scan_iter():\r\n value = r.get(key).decode(\"utf-8\")\r\n task = json.loads(value)\r\n colset_length = len(task[\"columns\"])\r\n\r\n if colset_length < smallest_colset_length:\r\n smallest_colset_value = task\r\n smallest_colset_key = key\r\n smallest_colset_length = colset_length\r\n\r\n return smallest_colset_value", "def argmin_reduce_nb(col, a, *args):\n a = np.copy(a)\n mask = np.isnan(a)\n if np.all(mask):\n raise ValueError(\"All-NaN slice encountered\")\n a[mask] = np.inf\n return np.argmin(a)", "def known_mines(self):\n \n if len(self.cells) == self.count:\n return self.cells", "def minimum_spanning_arborescence(sol):", "def minim(self) -> int:\n\t\treturn 2", "def excel_min_col(self, sheet_name):\n return self.wb[sheet_name].min_column", "def expanding_min_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_min_1d_nb(a[:, col], minp=minp)\n return out", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def find_min_distance():\n return np.argmin(d)", "def localmin(x):\r\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def get_min_filled_threshold(df):\n percentage = 0.1\n return df.shape[0] * percentage", "def localmin(x):\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def nanmin_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.nanmin(a[:, col])\n return out", "def min_column(in_f, column=0):\n minimum = None\n for line in in_f:\n spl = line.split()\n if len(spl) > column:\n value = float(line.split()[column])\n if minimum is None or value < minimum:\n minimum = value\n yield \"%f\\n\" % minimum", "def smallest_inf_norm_mpmath(V):\n minc = mpmath.mpf(100)\n mi = 0\n for j in range(V.cols):\n maxr = mpmath.mpf(0)\n for k in range(V.rows):\n t = abs(V[k, j])\n if(t > maxr):\n maxr = t\n if(maxr < minc):\n minc = maxr\n mi = j\n return minc", "def nearest_min(dist_matrix):\n # much faster than np.where\n i, j = np.unravel_index(\n np.argmin(dist_matrix), \n dims=dist_matrix.shape\n )\n return i, j", "def _find_min_pair(pandas_matrix):\n numpy_matrix = pandas_matrix.values\n mins = np.where(numpy_matrix == np.nanmin(numpy_matrix))\n min_col_idx = mins[0][0]\n min_row_idx = mins[1][0]\n (min_col, min_row) = (pandas_matrix.index[min_col_idx], \n pandas_matrix.columns[min_row_idx])\n\n return (min_col, min_row)", "def minimumCostPathOnArray(arr):\n arr_mask = np.ones(np.array(arr).shape)\n\n rows = len(arr)\n cols = len(arr[0])\n\n for i in range(1,rows):\n arr[i][0] = arr[i][0] + min(arr[i-1][0], arr[i-1][1])\n for j in range(1, cols-1):\n arr[i][j] = arr[i][j] + min(arr[i-1][j-1], arr[i-1][j], arr[i-1][j+1])\n arr[i][cols-1] = arr[i][cols-1] + min(arr[i-1][cols-2], arr[i-1][cols-1])\n\n min_index = [0]*rows\n min_cost = min(arr[-1])\n for k in range(1,cols-1):\n if arr[-1][k] == min_cost:\n min_index[-1] = k\n\n for i in range(rows-2, -1, -1):\n j = min_index[i+1]\n lower_bound = 0\n upper_bound = 1 # Bounds for the case j=1\n \n if j==cols-1:\n lower_bound = cols-2\n upper_bound = cols-1\n elif j>0:\n lower_bound = j-1\n upper_bound = j+1\n \n min_cost = min(arr[i][lower_bound:upper_bound+1])\n for k in range(lower_bound, upper_bound+1):\n if arr[i][k] == min_cost:\n min_index[i] = k\n\n\n path = []\n for i in range(0, rows):\n arr_mask[i,0:min_index[i]] = np.zeros(min_index[i])\n path.append((i+1, min_index[i]+1))\n # print(\"Minimum cost path is: \")\n # print(path)\n return arr_mask", "def least_constraining_values(self, cell):\r\n vals = {}\r\n for val in cell.domain:\r\n vals[val] = 0\r\n for i, j in cell.neighbors:\r\n if val in self.grid[i][j].domain:\r\n vals[val] += 1\r\n x = sorted(vals.items(), key=lambda i: i[1])\r\n res = []\r\n for i in x:\r\n res.append(i[0])\r\n return res", "def grid_cost(grid):\r\n n_rows = len(grid)\r\n n_cols = len(grid[0])\r\n row_count = 1\r\n col_count = 0\r\n \r\n def cell_cost(row_count, col_count):\r\n \"\"\"The cost of getting to a given cell in the current grid.\"\"\"\r\n while row_count < n_rows:\r\n if col_count >= n_cols:\r\n row_count += 1\r\n col_count = 0\r\n else:\r\n cost = grid[row_count][col_count]\r\n if row_count != 0:\r\n values = []\r\n for i in range(-1, 2):\r\n if col_count + i > -1 and col_count + i < n_cols:\r\n values.append(grid[row_count - 1][col_count + i])\r\n cost += min(values)\r\n grid[row_count][col_count] = cost\r\n col_count += 1\r\n \r\n cell_cost(row_count, col_count)\r\n new_list = [grid[n_rows-1][x] for x in range(n_cols)]\r\n return min(new_list)", "def argmin(self, rows: List[Row], column: ComparableColumn) -> List[Row]:\n if not rows:\n return []\n value_row_pairs = [(row.values[column.name], row) for row in rows if row.values[column.name] is not None]\n if not value_row_pairs:\n return []\n # Returns a list containing the row with the max cell value.\n return [sorted(value_row_pairs, key=lambda x: x[0])[0][1]]", "def shortestDistance(self, grid):\n # return self.house_oriented_TLE(grid)\n # One axis\n row_count = [sum(row) for row in grid]\n col_count = [0]* len(grid[0])\n row_dist = [0]* len(grid)\n col_dist = [0]* len(grid[0])\n output = sys.maxsize\n for i in range(len(grid)): \n for j in range(len(grid[0])):\n col_count[j] += grid[i][j]\n \n for index_p in range(len(row_count)):\n for index_h in range(len(row_count)):\n row_dist[index_p] += abs(index_h - index_p) * row_count[index_h]\n \n for index_p in range(len(col_count)):\n for index_h in range(len(col_count)):\n col_dist[index_p] += abs(index_h - index_p) * col_count[index_h]\n \n # print(row_count)\n # print(col_count)\n # print(row_dist)\n # print(col_dist)\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n continue\n output = min(output, row_dist[i] + col_dist[j])\n return output", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def min_index(self):\n return self.__pq[1]", "def enforce_cell_constraints(row_candidates, col_candidates, column_size, row_size, max_iter=1):\n\n rows = copy.deepcopy(row_candidates)\n cols = copy.deepcopy(col_candidates)\n\n step = 0\n while True:\n step += 1\n\n _rows = copy.deepcopy(rows)\n _cols = copy.deepcopy(cols)\n\n for indices, row_idx, cell_value in find_critical_cells(rows):\n for col_idx, each in zip(indices, cols[indices]):\n X = np.array(each)\n filtered = X[X[:, column_size - 1 - row_idx] == cell_value]\n if len(filtered) == 0:\n raise InfeasibleStateException\n\n cols[col_idx] = filtered\n\n for indices, col_idx, cell_value in find_critical_cells(cols):\n row_indices = column_size - 1 - indices\n for row_idx, each in zip(row_indices, rows[row_indices]):\n row_candidates = np.array(each)\n filtered = row_candidates[row_candidates[:, col_idx] == cell_value]\n if len(filtered) == 0:\n raise InfeasibleStateException\n\n rows[row_idx] = filtered\n\n row_unchanged = np.all([np.equal(len(x), len(y)) for x, y in zip(_rows, rows)])\n col_unchanged = np.all([np.equal(len(x), len(y)) for x, y in zip(_cols, cols)])\n\n if (row_unchanged and col_unchanged) or (step == max_iter):\n return rows, cols", "def lower_row_invariant(self, target_row, target_col):\r\n # Tile zero is positioned at (i,j).\r\n # All tiles in rows i+1 or below are positioned at their solved location.\r\n # All tiles in row i to the right of position (i,j) are positioned at their solved location.\r\n solved_lower = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[target_row][target_col] == 0:\r\n solved_lower = True\r\n \r\n for row in range(target_row + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower = False\r\n \r\n for col in range(target_col + 1, self._width):\r\n if self._grid[target_row][col] != solved_grid[target_row][col]:\r\n solved_lower = False\r\n \r\n return solved_lower", "def fn(i, j):\n if i == 0 and j == 0: return grid[i][j]\n if i < 0 or j < 0: return float(\"inf\")\n return grid[i][j] + min(fn(i-1, j), fn(i, j-1))", "def fn(i, j):\n if i == 0 and j == 0: return grid[i][j]\n if i < 0 or j < 0: return float(\"inf\")\n return grid[i][j] + min(fn(i-1, j), fn(i, j-1))", "def OptimalWarpingPath( self, colStart=None ):\n rows = len(self.D)\n cols = len(self.D[0])\n n = rows-1\n m = cols-1\n if colStart:\n m=colStart\n path = [(n,m)]\n while n > 0 or m > 0:\n if n == 0 :\n path.insert(0,(0,m-1))\n m -= 1\n elif m == 0 :\n path.insert(0,(n-1,0))\n n -= 1\n else:\n minStep = min( self.D[n-1][m-1], self.D[n-1][m], self.D[n][m-1] )\n if self.D[n-1][m-1] == minStep:\n path.insert(0,(n-1,m-1))\n n -= 1\n m -= 1\n elif self.D[n-1][m] == minStep:\n path.insert(0,(n-1,m))\n n -= 1\n else: # self.D[n][m-1] == min:\n path.insert(0,(n,m-1))\n m -= 1\n return path, self.CostOfPath( path, self.D )", "def feasible(leaf, x):\n feasibleDim =[]\n try:\n atom = (leaf.root.ub - leaf.root.lb) / leaf.problem.discreteLevel\n except:\n atom = 0\n for i in range(len(leaf.lb)):\n if leaf.ub[i] - leaf.lb[i] >= x * atom[i]:\n feasibleDim.append(i)\n return feasibleDim", "def min(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no minimum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = min(m, np.min(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m", "def ensure_sparse_cols(self,max_density,remove_lowest=True):\n if max_density >= 1:\n max_nnz = int(max_density)\n else:\n max_nnz = int(max_density*self.shape[0])\n for j in range(self.shape[1]):\n col = self.fast_get_col(j)\n excess = col.nnz - max_nnz\n if excess > 0:\n if remove_lowest:\n zero_entries = np.argsort(col.data)[:excess]\n else:\n zero_entries = random.sample(range(col.nnz),excess)\n col.data[zero_entries] = 0\n self.fast_update_col(j,col.data)", "def minMoves(maze, x, y):\n\n def maze_guard():\n \"\"\"Guard function to block oversized dimensions\"\"\"\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard\n\n def walk_maze(finish):\n \"\"\"Walks the maze, finding the shortest path including all coins.\n Finishes when reach the coordenate finish, a tuple with row and\n column numbers\n \"\"\"\n i, j = (0, 0)\n result = -1\n weight = -1\n while nodes:\n i, j, path, coins = nodes.popleft()\n cell = maze[i][j]\n if (i, j) == finish:\n weight, result = check_result(coins, path, weight, result)\n elif cell != 1:\n adjacent_nodes(i, j, path, coins)\n\n return result\n\n def adjacent_nodes(i, j, path, coins):\n \"\"\"Adds the node in positions i, j, with its path added to\n accumulated path. The path is transformed into a binary\n number, i.e, 2 ** (i * n + j), being n the number of rows\n in the maze matrix.\n \"\"\"\n def neighbour(x, y):\n this_path = 2 ** (i * n + j)\n if not this_path & path:\n coin = coins + 1 if maze[i][j] == 2 else coins\n nodes.append((x, y, path + this_path, coin))\n\n coord = [(i + 1, j, i + 1 < n), (i - 1, j, i - 1 >= 0),\n (i, j + 1, j + 1 < m), (i, j - 1, j - 1 >= 0)]\n _ = [neighbour(x, y) for x, y, test in coord if test]\n\n if not maze_guard():\n return -1\n\n n = len(maze)\n m = len(maze[0])\n nodes = deque([(0, 0, 0, 0)])\n return walk_maze((x, y))", "def matrix_min(data):\n if is_SparseDataFrame(data):\n data = [np.min(data[col]) for col in data.columns]\n elif is_sparse_dataframe(data):\n data = [sparse_series_min(data[col]) for col in data.columns]\n elif isinstance(data, pd.DataFrame):\n data = np.min(data)\n elif isinstance(data, sparse.lil_matrix):\n data = [np.min(d) for d in data.data] + [0]\n elif isinstance(data, sparse.dok_matrix):\n data = list(data.values()) + [0]\n elif isinstance(data, sparse.dia_matrix):\n data = [np.min(data.data), 0]\n return np.min(data)", "def row1_invariant(self, target_col):\r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[1][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def auxmin_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If jk = 1 and k_ind = nomax, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n #print line_start,cfg.jk,k_ind,cfg.nomax-1,cfg.jk[k_ind], cfg.xprev,x\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1:\n #print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n return f_cc\n else:\n #print \"hihu1\",line_start,k_ind\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n if cfg.jk[k_ind]==1:\n return f_cc\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n # The last line.\n if k_ind==cfg.nomax-1:\n #print \"hihu3\"\n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n\n return f_cc", "def minMax(df, column):\n df_result = df.copy()\n not_nulls = df[column].isnull() == False\n\n v = np.matrix(df_result.loc[not_nulls, column]) #.as_matrix()\n scaler = skpreprocess.MinMaxScaler()\n df_result.loc[not_nulls, column] = scaler.fit_transform(v.T)\n return df_result", "def findmin(h5file, pcoord_dim, fi, li):\n min_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n minv = numpy.min(pc[:,-1,pcoord_dim-1])\n min_values.append(minv)\n minmin = numpy.min(min_values)\n print(min_values)\n print(minmin)\n nw = numpy.where(min_values<(minmin+minmin*0.0001))\n print(nw)\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n min_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmin = numpy.min(min_iter)\n nw2 = numpy.where(min_iter<(segmin+segmin*0.0001))\n seg_num = (nw2[0]+1)[0]\n print (\"Minimum pcoord value for dimension\",pcoord_dim,\"is:\",segmin) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def solve(given: np.array) -> np.array:\n possible = np.full((9, 9, 9), True)\n mask = given > 0\n possible[mask, :] = False\n possible[mask, given[mask] - 1] = True\n\n # number of possibilities at each site, masking those already propagated\n # to avoid repetitive work. All masked == problem solved\n count = ma.array(possible.sum(axis=2), fill_value=1)\n\n # allocate upfront to as out parameter to np.equal\n # (ma.array because count is ma.array)\n where = ma.array(np.empty((9, 9), dtype=bool), fill_value=False)\n\n stack = [(possible, count)]\n while stack:\n node, count = stack.pop()\n unsolved = propagate(node, count, where)\n if unsolved == -1:\n continue\n if unsolved == 0:\n break\n # try all possibilities from cell with fewest > 1\n i, j = np.unravel_index(count.argmin(), count.shape)\n for k in np.flatnonzero(node[i, j, :]):\n node_copy, count_copy = node.copy(), count.copy()\n node_copy[i, j, :] = False\n node_copy[i, j, k] = True\n count_copy[i, j] = 1\n stack.append((node_copy, count_copy))\n\n i, j, k = node.nonzero()\n count[i, j] = k + 1\n return np.array(count)", "def _u_naught(self):\n adjusted_cost = self.c/self.a_csc.dot(np.ones(self.mrows))\n cost_matrix = adjusted_cost*self.a + np.amax(adjusted_cost)*(~self.a)\n return adjusted_cost[np.argmin(cost_matrix, axis=1)]", "def count_mines(row, col):\r\n total = 0\r\n for r,c in ((-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)):\r\n try:\r\n if mines[row+r][col+c] == 1:\r\n total += 1\r\n except KeyError:\r\n pass\r\n return total", "def minimize(self):\n pass", "def grid_cost(grid):\r\n cashe = [[None for x in range(len(grid[0]))] for y in range(len(grid))]\r\n n_rows = len(grid)\r\n n_cols = len(grid[0])\r\n \r\n def cell_cost(row, col):\r\n \"\"\"The cost of getting to a given cell in the current grid.\"\"\"\r\n if row < 0 or row >= n_rows or col < 0 or col >= n_cols:\r\n return INFINITY # Off grid cells are treated as infinities\r\n elif cashe[row][col] is None:\r\n cost = grid[row][col]\r\n if row != 0:\r\n doom = [cell_cost(row - 1, col + delta_col) for delta_col in range(-1, 2)]\r\n cost += min(doom)\r\n cashe[row][col] = cost\r\n return cashe[row][col]\r\n else:\r\n return cashe[row][col]\r\n \r\n best = min(cell_cost(n_rows - 1, col) for col in range(n_cols))\r\n return best", "def replace_zeros(self):\n\n min_c = np.array(self.map[self.column])\n self.map.loc[self.map[self.column]==0, self.column] = np.min(min_c[np.nonzero(min_c)])", "def find_optimal_dimensions_wrong(self):\n\n min_product_index = 0\n min_product = self.ans[0][0]*self.ans[0][1]\n\n for i in range(0,len(self.ans),1):\n if self.ans[i][0]*self.ans[i][1] < min_product or min_product == 0:\n min_product = self.ans[i][0]*self.ans[i][1]\n min_product_index = i\n\n print(i, \":\", self.ans[min_product_index])\n\n return self.ans[min_product_index]", "def neirest_neighbour(business, cells):\n array = cells.get_neighbours(business, num=1)\n neighbours = pd.DataFrame(array).set_index('index')\n index = neighbours['distance'].idxmin()\n return neighbours.loc[index]", "def constraint_col_coef(n_col, n_row):\n\n all_rows = []\n for i in range(n_col):\n matrix_values = np.zeros((n_row, n_col), dtype=int)\n col_offer = np.ones(n_row, dtype=int)\n matrix_values[:, i] = col_offer\n all_rows.append(matrix_values.flatten())\n\n cols_constraints = np.stack(all_rows)\n\n return cols_constraints", "def firstEmptyCell(board):\r\n for i in range(9):\r\n for j in range(9):\r\n if board[i][j] == 0:\r\n return (i, j) # row, col\r\n return None", "def lower_row_invariant(self, target_row, target_col):\n # replace with your code\n if self.get_number(target_row, target_col) != 0:\n print 'Error 1: Current number is not 0'\n return False\n current = 0\n for row in range(target_row + 1, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 2'\n return False\n current += 1\n if target_col != self.get_width() - 1:\n current = self._grid[target_row][target_col + 1]\n for grid in self._grid[target_row][target_col + 1:]:\n if grid != current:\n print 'Error 3'\n return False\n current += 1\n return True", "def expanding_min_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n minv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(minv) or a[i] < minv:\n minv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = minv\n return out", "def fn(i, j):\n if i < 0 or j < 0 or matrix[i][j] == \"0\": return 0\n return 1 + min(fn(i-1, j-1), fn(i-1, j), fn(i, j-1))", "def optimal_min(board):\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is positive infinity\n global_optimum = [None, math.inf]\n\n for action in available_actions:\n # Anticipates optimal adversarial moves.\n local_optimum = optimal_max(result(board, action))\n\n if global_optimum[1] >= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def get_best_clique(self):\n\t\treturn [i+1 for i in range(self._size) if self._globalMinimumState[i] == 1]", "def find_min_path(s, t, dist):\n\n rows = len(dist) - 1\n cols = len(dist[0]) - 1\n col = cols\n row = rows\n pos_str = \"Position: (row={} col={}) -> (row={} col={})\"\n cst_str = \"Cost: {}\"\n prev_row = row\n prev_col = col\n\n # init sparse path matrix\n sparse_path = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n sparse_path[0][0] = \"0\"\n\n # start with operation at (rows, cols) and work backwards\n sparse_path[rows][cols] = dist[rows][cols]\n\n if verbose == 2:\n print()\n print(\"Initial Minimum Path Matrix:\")\n print_matrix(s, t, sparse_path)\n\n while True:\n\n # bail out if we are in the corner\n if row == 0 and col == 0:\n break\n\n # if we are not at a matrix boundary\n if row != 0 and col != 0: # if at left edge or top row, cannot move diagonally\n\n # diagonal\n if (dist[row - 1][col - 1] == min(dist[row - 1][col],\n dist[row][col - 1],\n dist[row - 1][col - 1])) and (dist[row - 1][col - 1] == dist[row][col] or dist[row - 1][col - 1] == dist[row][col] - 1):\n sparse_path[row - 1][col - 1] = dist[row - 1][col - 1]\n temp_cost = dist[row - 1][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # left\n elif dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # above\n else:\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # if at matrix edge, can only move up\n elif col == 0:\n # above\n sparse_path[row - 1][col] = dist[row - 1][col]\n temp_cost = dist[row - 1][col]\n\n # move current cell\n prev_row = row\n prev_col = col\n if row > 0:\n row -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # must be at row boundary, can only move left\n else:\n # left\n if dist[row][col - 1] <= dist[row][col]:\n sparse_path[row][col - 1] = dist[row][col - 1]\n temp_cost = dist[row][col - 1]\n\n # move current cell\n prev_row = row\n prev_col = col\n if col > 0:\n col -= 1\n\n if verbose == 2:\n print(pos_str.format(str(prev_row), str(prev_col), str(row), str(col)))\n print(cst_str.format(temp_cost))\n print()\n\n # print matrix\n if verbose == 2:\n print_matrix(s, t, sparse_path)\n\n return sparse_path", "def row0_invariant(self, target_col):\r\n \r\n solved_lower_right = False\r\n solved_grid = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self._height)]\r\n if self._grid[0][target_col] == 0:\r\n solved_lower_right = True\r\n \r\n for row in range(1 + 1, self._height):\r\n for col in range(self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n for row in range(0, 1):\r\n for col in range(target_col + 1, self._width):\r\n if self._grid[row][col] != solved_grid[row][col]:\r\n solved_lower_right = False\r\n \r\n if self._grid[1][target_col] != solved_grid[1][target_col]:\r\n solved_lower_right = False\r\n \r\n return solved_lower_right", "def minimum_value(self):\n return self._fitness[self._minidx]", "def potential_min(self):\n\n return self._args.min", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def first_zombie_col(self, row_num):\n row = self.board[row_num]\n for col_num, square in enumerate(row):\n if any(self.is_zombie([row_num, col_num])):\n return col_num", "def find_local_minima(black_pixels_per_column_in_line):\n\n #array che contiene i minimi locali nella forma di coppie (coordinata x, numero di pixel neri)\n local_minima = []\n\n # un punto e' minimo locale se e' minore dei suoi punti adiacenti;\n # nello specifico, diciamo che deve essere minore strettamente almeno di uno dei due\n # (mentre puo' essere minore o uguale all'altro)\n\n local_minima.append(0)\n\n for i in range(1, len(black_pixels_per_column_in_line)-1):\n\n is_local_minimum = ((black_pixels_per_column_in_line[i] <= black_pixels_per_column_in_line[i-1] and\n black_pixels_per_column_in_line[i] < black_pixels_per_column_in_line[i+1]) or\n (black_pixels_per_column_in_line[i] < black_pixels_per_column_in_line[i-1] and\n black_pixels_per_column_in_line[i] <= black_pixels_per_column_in_line[i+1]))\n\n if is_local_minimum:\n local_minima.append(i)\n\n local_minima.append(len(black_pixels_per_column_in_line)-1)\n\n return np.array(local_minima)", "def nodes_min_energy_index(self, node):\n idx = -1\n curr_energy = np.inf\n for i in range(self.cost_matrix.shape[1]):\n new_energy = self.cost_matrix[node][i]\n if new_energy < curr_energy:\n curr_energy = new_energy\n idx = i\n return idx", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def best_unexplored_lower_bound(self):\n if self._unexplored_nodes:\n return min(node.lower_bound for node in self._unexplored_nodes)\n else:\n return 0.0", "def minimize(self):\n raise NotImplementedError", "def dim_zero_min(x: Tensor) ->Tensor:\n return torch.min(x, dim=0).values", "def get_min_cell_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? (.*?) .*? . .*? .*? . . . .*?'\n minv = float(re.findall(pattern,summary).pop())\n return minv", "def min_number_of(g, elt, n_fuel):\n if elt == 'FUEL':\n return n_fuel\n t = sum([g.edges[elt, s]['cost'] * min_number_of(g, s, n_fuel) for s in\n g.successors(elt)])\n return divup(t, g.nodes[elt]['batch_size'])", "def reduce_columns(m):\n s = np.sum(m, axis=0)\n c = np.array(np.nonzero(s))\n c = c[0,:]\n m_prime = m[:,c]\n \n return m_prime", "def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best", "def minimax(board):\n if player(board) == X:\n return optimal_max(board)[0]\n else:\n return optimal_min(board)[0]", "def minimax(board):\n play = player(board)\n\n bestMove = (-1, -1)\n if play == X:\n bestVal = -1000\n # Traverse all cells, evaluate minimax function for\n # all empty cells. And return the cell with optimal\n # value.\n for i in range(3) : \n for j in range(3) :\n \n # Check if cell is empty\n if (board[i][j] == EMPTY) :\n \n # Make the move\n board[i][j] = play\n \n # compute evaluation function for this\n # move.\n moveVal = minimaxScore(board)\n \n # Undo the move\n board[i][j] = EMPTY\n \n # If the value of the current move is\n # more than the best value, then update\n # best/\n if (moveVal > bestVal) : \n bestMove = (i, j)\n bestVal = moveVal\n else:\n bestVal = 1000\n # Traverse all cells, evaluate minimax function for\n # all empty cells. And return the cell with optimal\n # value.\n for i in range(3) : \n for j in range(3) :\n \n # Check if cell is empty\n if (board[i][j] == EMPTY) :\n \n # Make the move\n board[i][j] = play\n \n # compute evaluation function for this\n # move.\n moveVal = minimaxScore(board)\n \n # Undo the move\n board[i][j] = EMPTY\n \n # If the value of the current move is\n # more than the best value, then update\n # best/\n if (moveVal < bestVal) : \n bestMove = (i, j)\n bestVal = moveVal\n \n return bestMove", "def get_min_shannon_entropy(grid):\r\n curr_min = math.inf\r\n curr_best = []\r\n for i in range(len(grid[0])):\r\n for j in range(len(grid)):\r\n if not grid[j][i].collapsed:\r\n w = grid[j][i].block_weights\r\n shannon_entropy = sum([-math.log(el) for el in w] )\r\n if shannon_entropy < curr_min:\r\n curr_min = shannon_entropy\r\n curr_best = [(i,j)]\r\n elif shannon_entropy == curr_min:\r\n curr_best.append((i,j))\r\n idx = np.random.choice(range(len(curr_best))) #choose randomly if theres a tie\r\n return curr_best[idx] #x,y\r", "def column_count_modal(rows):\n counts = defaultdict(int)\n for row in rows:\n length = len([c for c in row if not c.empty])\n if length > 1:\n counts[length] += 1\n if not len(counts):\n return 0\n return max(counts.items(), key=lambda (k,v): v)[0]", "def minimax(state, depth, player):\r\n if player == COMP:\r\n best = [-1, -1, -infinity]\r\n else:\r\n best = [-1, -1, +infinity]\r\n\r\n if depth == 0 or game_over(state):\r\n score = evaluate(state)\r\n return [-1, -1, score]\r\n\r\n for cell in empty_cells(state):\r\n x, y = cell[0], cell[1]\r\n state[x][y] = player\r\n score = minimax(state, depth - 1, -player)\r\n state[x][y] = 0\r\n score[0], score[1] = x, y\r\n\r\n if player == COMP:\r\n if score[2] > best[2]:\r\n best = score # max value\r\n else:\r\n if score[2] < best[2]:\r\n best = score # min value\r\n\r\n return best", "def _get_minimal_lanes(self):\n return np.argwhere(self.end_of_lanes == np.min(self.end_of_lanes)).flatten()", "def choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables):\n if(prev_col_name == None):\n return changed_variables[row_index][0]\n return changed_variables[row_index][(changed_variables[row_index].index(prev_col_name)+1) % max_concealable_variables]", "def optimal_solution_single_pickup(memo):\n # Calculates what the maximum value is and saves which row\n maxvalue = None\n for i in range(len(memo)+1):\n if maxvalue is None:\n if memo[len(memo)-1][i] is not None:\n maxvalue = (memo[len(memo)-1][i], i)\n else:\n if memo[len(memo)-1][i] is not None:\n if memo[len(memo)-1][i] > maxvalue[0]:\n maxvalue = (memo[len(memo)-1][i], i)\n\n # Goes back and calculates how the solution was formed\n optimal_solution = [0] * len(memo)\n current_row = maxvalue[1]\n # Goes backwards through the array starting at the best value\n for j in range(len(memo)-1, 0, -1):\n if current_row > 0:\n # Checks if it did pick up. If current cell does not have the same value as the previous column with\n # 1 less energy then it must have picked up\n if memo[j][current_row] != memo[j-1][current_row-1]:\n optimal_solution[j] = 1\n current_row += 1\n else:\n current_row -= 1\n # If at 0 energy then it must have picked up\n else:\n optimal_solution[j] = 1\n current_row += 1\n return maxvalue[0], optimal_solution", "def lower_row_invariant(self, target_row, target_col):\n \n # Returns False if target cell is NOT the zero tile.\n if self.get_number(target_row, target_col) != 0:\n return False\n \n # Returns False if cells to the right of target_col \n # are NOT positioned in their solved locations.\n if target_col < self.get_width():\n for col in range(target_col + 1, self.get_width()):\n if self.get_number(target_row, col) != col + (target_row * self.get_width()):\n return False\n\n # Returns False if cells in rows target_row + 1 and below \n # are NOT positioned at their solved locations.\n if target_row < self.get_height():\n for row in range(target_row + 1, self.get_height()):\n for col in range(self.get_width()):\n if self.get_number(row, col) != col + (row * self.get_width()):\n return False\n\n return True", "def row0_invariant(self, target_col):\n # replace with your code\n if self.get_number(0, target_col) != 0:\n return False\n current = 0\n for row in range(2, self.get_height()):\n if target_col == self.get_width() - 1:\n current = self._grid[row][0]\n else:\n current = self._grid[row - 1][-1] + 1\n column = self._grid[row]\n for grid in column:\n if grid != current:\n print 'Error 4'\n return False\n current += 1\n current = self._grid[1][target_col]\n for grid in self._grid[1][target_col:]:\n if grid != current:\n print 'Error 5'\n return False\n current += 1\n return True", "def minimax(state, depth, player):\n\n if player == COMP:\n best = [-1, -1, inf]\n else:\n best = [-1, -1, -inf]\n\n if depth == 0 or game_over(state):\n score = evaluate(state)\n return [-1, -1, score]\n\n for cell in empty_cells(state):\n x, y = cell[0], cell[1]\n state[x][y] = player\n score = minimax(state, depth - 1, -player)\n state[x][y] = 0\n score[0], score[1] = x, y\n\n if player == COMP:\n if score[2] < best[2]:\n best = score\n else:\n if score[2] > best[2]:\n best = score\n\n return best", "def check_cells_fit(cell_no, min_cell_distance, space_range=[[0,10],[0,10],None]):\n\n dim1, dim2, dim3 = space_range\n full_dim = 1.\n for dim in [dim1, dim2, dim3]:\n if dim != None:\n dim = dim[1]-dim[0]\n full_dim = full_dim*dim\n\n return full_dim / min_cell_distance >= cell_no", "def _non_zero_columns_search(array):\n col_num = array.shape[1]\n non_zero_col = CArray([], dtype=int)\n for c in range(col_num):\n col = array[:, c]\n if col.any() == True:\n non_zero_col = non_zero_col.append(c)\n\n return non_zero_col" ]
[ "0.6645709", "0.6558186", "0.64990723", "0.6368767", "0.63344336", "0.6301566", "0.6237791", "0.6110719", "0.60172206", "0.60031587", "0.59943664", "0.59906703", "0.5983194", "0.59770536", "0.5949309", "0.5948226", "0.59324056", "0.592747", "0.5884803", "0.58845806", "0.58685064", "0.58682936", "0.58529323", "0.5841027", "0.5836812", "0.5836258", "0.5804371", "0.5783265", "0.5777008", "0.5755377", "0.5727216", "0.57262075", "0.57231665", "0.5722811", "0.5694477", "0.5691728", "0.5673158", "0.56704336", "0.56640255", "0.5639344", "0.5635431", "0.5635128", "0.5606216", "0.56034034", "0.5601208", "0.5601208", "0.5594815", "0.5584962", "0.557389", "0.55695826", "0.5562527", "0.55616975", "0.55485463", "0.55434436", "0.55427885", "0.55419075", "0.55392545", "0.55382276", "0.55344534", "0.5533035", "0.5528297", "0.5516086", "0.5508779", "0.5505824", "0.5498958", "0.5496447", "0.5487537", "0.54858524", "0.5484413", "0.5482077", "0.54766005", "0.5469713", "0.54662794", "0.5464544", "0.54574937", "0.54574186", "0.5449821", "0.5445619", "0.5443692", "0.54433906", "0.54366904", "0.5430906", "0.5416373", "0.5411376", "0.5410598", "0.5400924", "0.539384", "0.5387952", "0.53857857", "0.53854454", "0.53848755", "0.53824455", "0.53817654", "0.53814995", "0.5379688", "0.53789765", "0.5376926", "0.53707653", "0.53707075", "0.53699857" ]
0.69948006
0
Solve the exact cover problem recursively
def solve(self, solution_rows): # Are we out of columns? # Can only occur if each column has been removed through row selection if self.root.right == self.root: # Construct a tuple of the rows in this solution soln = [] for row in solution_rows: soln.append(row.name) # Add it to the list of solutions self.solutions.append(tuple(sorted(soln))) return # Choose the column with the minimum sum col = self.get_minimum_column() # Remove the column self.remove_col(col) # print("Chosen to remove column " + str(col.name)) # Try adding each row in this column to the solution, one at a time row = col.down while row != col: # If there are no rows in this column, there is nothing to loop over here # Add to the solution solution_rows.append(row) # print("Trying row " + str(row.name)) # Every column on this row needs to be removed cell = row.right while cell != row: self.remove_col(cell.header) cell = cell.right # Now try to solve self.solve(solution_rows) # Now add that row back in cell = row.left while cell != row: self.unremove_col(cell.header) cell = cell.left # Remove this row from the solution solution_rows.pop() # print("Removing row " + str(row.name)) # Move on to the next row row = row.down # Add the column back in self.unremove_col(col)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cover():\n # D corresponds to the items and the transactions in which they appear, it is the standard code table\n D = {\n \"B\": Bitmap([0, 1, 2]),\n \"A\": Bitmap([0, 1]),\n \"C\": Bitmap([0, 2]),\n \"D\": Bitmap([2])\n }\n # ct corresponds to the itemsets on which we want to calculate the cover\n ct = [\n frozenset(\"ABC\"),\n frozenset(\"AB\"),\n frozenset(\"BC\"),\n frozenset(\"A\"),\n frozenset(\"B\"),\n frozenset(\"C\"),\n frozenset(\"D\")\n ]\n CTc = cover(D, ct)\n\n assert CTc[frozenset(\"ABC\")] == Bitmap([0])\n assert CTc[frozenset(\"AB\")] == Bitmap([1]) # AB appears only in tid_1 for usage because ABC is placed before in ct\n # so the AB of the first transaction has been covered by ABC\n assert CTc[frozenset(\"BC\")] == Bitmap([2])\n assert CTc[frozenset(\"A\")] == Bitmap()\n assert CTc[frozenset(\"B\")] == Bitmap()\n assert CTc[frozenset(\"C\")] == Bitmap()\n assert CTc[frozenset(\"D\")] == Bitmap([2])", "def ExactCover(grid, x, y):\n x_axis = get_x_axis_elements(grid[x])\n \n y_axis = get_y_axis_elements(grid, y)\n \n BlockOfCoordinates = ReturnBlockOfCoordinates(x, y) \n\n block = set()\n steps = int(len(grid)**0.5)\n for i in range(0, len(grid), steps):\n for j in range(0, len(grid), steps):\n CurrentBlock = ReturnBlockOfCoordinates(i, j)\n if CurrentBlock == BlockOfCoordinates:\n # not happy here\n block.update(element for element in grid[i][j:j+block_size] if element != 0)\n block.update(element for element in grid[i+1][j:j+block_size] if element != 0)\n block.update(element for element in grid[i+2][j:j+block_size] if element != 0)\n\n numbers_used_for_coordinates = set()\n numbers_used_for_coordinates.update(x_axis)\n numbers_used_for_coordinates.update(y_axis)\n numbers_used_for_coordinates.update(block)\n\n possible_answers = set()\n for possible_answer in grid_numbers:\n if not possible_answer in numbers_used_for_coordinates:\n possible_answers.add(possible_answer)\n\n return possible_answers", "def solve(grid):\n\n if is_grid_solved(grid):\n return grid\n\n new_grid = copy.deepcopy(grid)\n\n for x_element in range(len(new_grid)):\n for y_element in range(len(new_grid[x_element])):\n if new_grid[x_element][y_element] == 0:\n answers = ExactCover(new_grid, x_element, y_element)\n for answer in answers:\n new_grid[x_element][y_element] = answer\n new_grid = solve(new_grid)\n if not is_grid_solved(new_grid):\n new_grid[x_element][y_element] = 0\n else:\n break\n return new_grid\n\n return new_grid", "def upper_covers(self, x):", "def test_vertex_cover_basic(self):\n G = dnx.chimera_graph(1, 2, 2)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)\n\n G = nx.path_graph(5)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)\n\n for __ in range(10):\n G = nx.gnp_random_graph(5, .5)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)", "def compute_cover(tree: CoverMultiWaySearchTree,\n k: int, c1: str, c2: str) -> Optional[Set[CoverMultiWaySearchTree.Position.Node]]:\n # Step 1: Find nodes useful for the (k, c1, c2)-cover\n nodes = tree.find_nodes_in_range(c1, c2)\n\n # Step 2: Count number of items in range [c1, c2]\n n = get_number_of_useful_items(nodes, c1, c2)\n\n # Step 3: Compare with k\n if not n >= k:\n return None\n\n # Step 4: Sort nodes by number of useful items\n pq = HeapPriorityQueue(contents=[(get_number_of_useful_items([node], c1, c2), node) for node in nodes])\n\n # Step 5: Greedy approach - Use the node with the maximum number of useful items\n cover = set()\n while k > 0:\n useful_items, node = pq.remove_max()\n k -= useful_items\n cover.add(node)\n return cover", "def _clump(input, valid, output, search_list, clumpId=1):\n (ysize, xsize) = input.shape\n\n # lists slow from Numba - use an array since\n # we know the maximum size\n searchIdx = 0\n\n # run through the image\n for y in range(ysize):\n for x in range(xsize):\n # check if we have visited this one before\n if valid[y, x] and output[y, x] == 0:\n val = input[y, x]\n searchIdx = 0\n search_list[searchIdx, 0] = y\n search_list[searchIdx, 1] = x\n searchIdx += 1\n output[y, x] = clumpId # marked as visited\n\n while searchIdx > 0:\n # search the last one\n searchIdx -= 1\n sy = search_list[searchIdx, 0]\n sx = search_list[searchIdx, 1]\n\n # work out the 3x3 window to vist\n tlx = sx - 1\n if tlx < 0:\n tlx = 0\n tly = sy - 1\n if tly < 0:\n tly = 0\n brx = sx + 1\n if brx > xsize - 1:\n brx = xsize - 1\n bry = sy + 1\n if bry > ysize - 1:\n bry = ysize - 1\n\n for cx in range(tlx, brx+1):\n for cy in range(tly, bry+1):\n # do a '4 neighbour search'\n # don't have to check we are the middle\n # cell since output will be != 0\n # since we do that before we add it to search_list\n if (cy == sy or cx == sx) and (valid[cy, cx] and \n output[cy, cx] == 0 and \n input[cy, cx] == val):\n output[cy, cx] = clumpId # mark as visited\n # add this one to the ones to search the neighbours\n search_list[searchIdx, 0] = cy\n search_list[searchIdx, 1] = cx\n searchIdx += 1\n clumpId += 1\n\n return clumpId", "def find_cover(self):\n\n base = self('~dirname')\n fns = []\n # We can't pass 'base' alone to glob.glob because it probably\n # contains confusing metacharacters, and Python's glob doesn't\n # support any kind of escaping, so chdir and use relative\n # paths with known safe strings.\n try:\n olddir = os.getcwd()\n os.chdir(base)\n except EnvironmentError:\n pass\n else:\n for subdir in [\"\", \"scan\", \"scans\", \"images\", \"covers\"]:\n for ext in [\"jpg\", \"jpeg\", \"png\", \"gif\"]:\n subdir = util.make_case_insensitive(subdir)\n ext = util.make_case_insensitive(ext)\n fns.extend(glob.glob(os.path.join(subdir, \"*.\" + ext)))\n fns.extend(glob.glob(os.path.join(subdir, \".*.\" + ext)))\n os.chdir(olddir)\n images = []\n for fn in sorted(fns):\n lfn = fn.lower()\n score = 0\n # check for the album label number\n if self.get(\"labelid\", fn + \".\").lower() in lfn: score += 100\n score += sum(map(lfn.__contains__,\n [\"front\", \"cover\", \"jacket\", \"folder\", \"albumart\"]))\n if score:\n images.append((score, os.path.join(base, fn)))\n # Highest score wins.\n if images:\n try:\n return file(max(images)[1], \"rb\")\n except IOError:\n return None\n elif \"~picture\" in self:\n # Otherwise, we might have a picture stored in the metadata...\n return self.get_format_cover()\n else: return None", "def dfs_recursion(self, tour, sque_v, gain):\n i = len(sque_v) // 2 # step i done already\n if i == self.max_depth:\n return\n dahuitou = (i + 1) % self.submove_size == 0\n v_2i_2, v_2i_1 = sque_v[-2], sque_v[-1]\n # step i+1: search for (v_2i, v_2ip1)\n for v_2i in self.candidates[v_2i_1]:\n if v_2i in sque_v: # disjunctivity criterion\n continue\n new_gain = gain + self.cost_d[v_2i_2, v_2i_1] - self.cost_d[v_2i_1, v_2i]\n if new_gain <= 0:\n continue\n for v_2ip1 in tour.neighbours(v_2i):\n if v_2ip1 in sque_v: # disjunctivity criterion\n continue\n if dahuitou:\n if tour.check_feasible(sque_v + [v_2i, v_2ip1]):\n if new_gain + self.cost_d[v_2i, v_2ip1] - self.cost_d[v_2ip1, sque_v[0]] > 0:\n return tour.k_exchange(sque_v + [v_2i, v_2ip1])\n else:\n result = self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n if result is not None:\n return result\n else: # optional, can be deleted\n continue\n else:\n if new_gain + self.cost_d[v_2i, v_2ip1] - self.cost_d[v_2ip1, sque_v[0]] > 0 and \\\n tour.check_feasible(sque_v + [v_2i, v_2ip1]):\n return tour.k_exchange(sque_v + [v_2i, v_2ip1])\n else:\n result = self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n if result is not None:\n return result\n return", "def find_optimal_components_subset(contours, edges):\n c_info = props_for_contours(contours, edges)\n c_info.sort(key=lambda x: -x['sum'])\n total = np.sum(edges) / 255\n area = edges.shape[0] * edges.shape[1]\n\n c = c_info[0]\n del c_info[0]\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n crop = this_crop\n covered_sum = c['sum']\n\n while covered_sum < total:\n changed = False\n recall = 1.0 * covered_sum / total\n prec = 1 - 1.0 * crop_area(crop) / area\n f1 = 2 * (prec * recall / (prec + recall))\n # print '----'\n for i, c in enumerate(c_info):\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n new_crop = union_crops(crop, this_crop)\n new_sum = covered_sum + c['sum']\n new_recall = 1.0 * new_sum / total\n new_prec = 1 - 1.0 * crop_area(new_crop) / area\n new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)\n\n # Add this crop if it improves f1 score,\n # _or_ it adds 25% of the remaining pixels for <15% crop expansion.\n # ^^^ very ad-hoc! make this smoother\n remaining_frac = c['sum'] / (total - covered_sum)\n new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1\n if new_f1 > f1 or (\n remaining_frac > 0.25 and new_area_frac < 0.15):\n print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (\n i, covered_sum, new_sum, total, remaining_frac,\n crop_area(crop), crop_area(new_crop), area, new_area_frac,\n f1, new_f1))\n crop = new_crop\n covered_sum = new_sum\n del c_info[i]\n changed = True\n break\n\n if not changed:\n break\n\n return crop", "def cover_star(self):\n # clear covers\n self.row_cover.fill(0)\n self.col_cover.fill(0)\n # clear primed zeros\n self.M[self.M == self.Zeros.PRIME.value] = 0\n\n # find the starred zeros\n star = self.M == self.Zeros.STAR.value\n # cover each column that containing a starred zero\n self.col_cover = (star.sum(axis=0) > 0).astype(int)\n\n # calculated the number of covered cols\n colcount = self.col_cover.sum()\n if(colcount >= self.ncol or colcount >= self.nrow):\n # done\n return self.Steps.DONE\n\n return self.Steps.STEP1", "def fn(i, j, empty):\n nonlocal ans \n if grid[i][j] == 2: \n if empty == -1: ans += 1\n return \n grid[i][j] = -1 # mark as visited \n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid[ii][jj] != -1: \n fn(ii, jj, empty-1)\n grid[i][j] = 0 # backtracking", "def search(values):\n # TODO: Copy your code from the classroom to complete this function\n # First, reduce the puzzle using the previous function\n #print (\"before\")\n #display(values)\n reduce_puzzle(values)\n #print(\"after\")\n #display(values)\n \n for box in boxes:\n if len(values[box]) < 1:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes): \n return values ## Solved!\n \n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n #print (n,s,values[s])\n \n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n for value in values[s]:\n values_copy = values.copy()\n values_copy[s] = value\n #print (s, \"values:\", values[s],\"=>\",value)\n #display(values_copy)\n attempt = search(values_copy)\n if attempt:\n return attempt", "def dfs_recursion(self, tour, sque_v, gain):\n i = int(len(sque_v)/2) # step i done already\n dahuitou = (i + 1) % self.max_exchange == 0\n v_2i_2, v_2i_1 = sque_v[-2], sque_v[-1]\n # step i+1: search for (v_2i, v_2ip1)\n for v_2i in self.candidates[v_2i_1]:\n if v_2i in sque_v: # disjunctivity criterion\n continue\n new_gain = gain + self.cost[v_2i_2, v_2i_1] - self.cost[v_2i_1, v_2i]\n if new_gain <= 0:\n continue\n for v_2ip1 in tour.neighbours(v_2i):\n if v_2ip1 in sque_v: # disjunctivity criterion\n continue\n if dahuitou:\n if tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n if new_gain + self.cost[v_2i, v_2ip1] - self.cost[v_2ip1, sque_v[0]] > 0:\n tour.k_exchange(sque_v + [v_2i, v_2ip1])\n return tour\n else:\n return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n else: # optional, can be deleted\n continue\n else:\n if new_gain + self.cost[v_2i, v_2ip1] - self.cost[v_2ip1, sque_v[0]] > 0 and \\\n tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n tour.k_exchange(sque_v + [v_2i, v_2ip1])\n return tour\n else:\n return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n # if (i + 1) % self.max_exchange == 0:\n # continue\n # return self.dfs_recursion(tour, sque_v + [v_2i, v_2ip1], new_gain)\n # gain += - self.cost[v_2i_1, v_2i] + self.cost[v_2i, v_2ip1]\n # if gain - self.cost[v_2ip1, sque_v[0]] > 0:\n # # check feasibility immediately\n # if tour.check_feasible(sque_v+[v_2i, v_2ip1]):\n # tour.k_exchange(sque_v + [v_2i, v_2ip1])\n # return tour\n # # if not feasible, check whether stop or search for next two nodes\n # if (i+1) % self.max_exchange == 0:\n # continue\n # delta_gain = self.cost[sque_v[2 * i - 2], sque_v[2 * i - 1]] - self.cost[sque_v[2 * i - 1], v_2i]\n # return self.dfs_recursion(tour, sque_v+[v_2i, v_2ip1], gain+delta_gain)\n return", "def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list of nodes we traversed (or None).\n :param problem: the starting set up.\n :param limit: a given numeric depth limit.\n :return: a list of nodes.\n \"\"\"\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path\n\n import sys\n for depth in range(sys.maxsize): # depth from 0 to infinity\n print(\"Lower-bound of the optimal cost is {}\".format(depth))\n res2 = depth_limited_search(problem, depth)\n if res2 is not None:\n action_list = list()\n for move in res2:\n action_list.append(move[1]) # recall index 0 is the parent\n # do not forget a None returned in iteration 0 (with depth 0)\n action_list.remove('None')\n return action_list", "def make_set_cover_nr(gRNA_hits, num_sets = 1, target_ids = [], low_coverage_penalty = 0,\n num_lengths_to_track = None, prioritise_3prime = False, optimal_depth = 5,\n suppress_warning = False):\n collapsed_grnas = gRNA_hits.collapse()\n if not target_ids:\n target_ids = set().union(*[set(cg) for cg in collapsed_grnas])\n else:\n target_ids = set(target_ids)\n ## function to regenerate set cover solutions from collapsed_grna object\n collapsed_grnas_original = collapsed_grnas.copy()\n def generate_sc_solutions():\n ## sort in order of smallest set cover size, smallest redundancy, and size of largest set in set cover\n minweight_sc = limited_minweight_SC(collapsed_grnas, num_sets, targets = target_ids,\n low_coverage_penalty = low_coverage_penalty,\n num_lengths_to_track = num_lengths_to_track)\n ## optimal solutions\n max_depth = min(optimal_depth, max(map(len, minweight_sc)))\n max_redundancy = max(map(lambda C:C.redundancy, minweight_sc))/len(target_ids)\n print(max_depth, max_redundancy)\n optimal_sc = limited_optimal_SC(target_ids, collapsed_grnas_original,\n size = max_depth, redundancy = max_redundancy)\n print(\"num unfiltered optimal sc:\", len(optimal_sc))\n ## remove duplicates\n optimal_sc = [C for C in optimal_sc\n if all(map(lambda minweight_C:(len(C) != minweight_C\n and C != minweight_C),\n minweight_sc))]\n print(\"num filtered optimal sc:\", len(optimal_sc))\n return sorted(minweight_sc + optimal_sc,\n key = lambda C:(len(C), C.redundancy, -C.max_coverage))\n sc_solutions = []\n sc_solutions.extend(generate_sc_solutions())\n eliminated_grna = []\n ## function to generate set covers\n def make_set_cover(restore = []):\n ## restore only works if gRNA belonged in the current set cover\n curr_sc = sc_solutions[0]\n for grna in restore:\n curr_sc.add_grna(grna)\n eliminated_grna.remove(grna)\n ## if current set cover solution has at least one CollapsedgRNA with no gRNA left\n while not curr_sc.all_not_empty():\n sink = sc_solutions.pop(0) ## remove set cover solution\n ## generate more possible gRNA sets if no pre-generated set covers are left\n if not sc_solutions:\n collapsed_grnas.remove_grna(*eliminated_grna)\n collapsed_grnas.remove_empty()\n sc_solutions.extend(generate_sc_solutions())\n if not sc_solutions:\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all\"\n \" target sequences at least once.\\n\"))\n return []\n ## select next solution\n curr_sc = sc_solutions[0]\n ## consume=True -> remove selected gRNA from CollapsedgRNA\n output = curr_sc.generate_grna_set(prioritise_3prime = prioritise_3prime, consume = True)\n eliminated_grna.extend(output)\n return output\n return make_set_cover", "def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res", "def conceptcover(bin_arr, limit=1, uncovered=0.1):\n arr = np.copy(bin_arr)\n arr_sum = np.sum(arr)\n result = []\n while True:\n k = kernel(arr)\n i = intent(bin_arr, k)\n e = extent(bin_arr, i)\n if len(e)*len(i) < limit or (e, i) in result: break\n result.append((e, i))\n arr = removed(arr, e, i)\n if np.sum(arr)/arr_sum < uncovered: break\n return result", "def trivial_cover(regions_count, clinics_count, clinics):\n clinics_built = [0]*range(0, clinics_count)\n coverted = set()\n\n for clinic in clinics:\n clinics_built[clinic.index] = 1\n coverted |= set(clinic.regions)\n if len(coverted) >= regions_count:\n break # We are done, we cover all the regions\n\n # Calculamos el costo total de construcción\n total_costs = sum([clinic.cost*clinics_built[clinic.index] for clinic in clinics])\n \n # Convertimos la solución en el formato esperado\n output_data = str(total_cost) + '\\n'\n output_data += ' '.join(map(str, clinics_built))\n\n return output_data", "def bipartite_vertex_cover(bigraph, algo=\"Hopcroft-Karp\"):\n if algo == \"Hopcroft-Karp\":\n coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]\n coord = np.array(coord)\n graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))\n matchV = maximum_bipartite_matching(graph, perm_type='row')\n matchV = [None if x==-1 else x for x in matchV]\n nU, nV = graph.shape\n assert len(matchV) == nV\n elif algo == \"Hungarian\":\n matchV = max_bipartite_matching2(bigraph)\n nU, nV = len(bigraph), len(matchV)\n else:\n assert False\n\n matchU = [None] * nU\n \n for v in range(nV): # -- build the mapping from U to V\n if matchV[v] is not None:\n matchU[matchV[v]] = v\n \n def old_konig():\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n for u in range(nU):\n if matchU[u] is None: # -- starting with free vertices in U\n _alternate(u, bigraph, visitU, visitV, matchV)\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n def new_konig():\n # solve the limitation of huge number of recursive calls\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n wait_u = set(range(nU)) - set(matchV) \n while len(wait_u) > 0:\n u = wait_u.pop()\n visitU[u] = True\n for v in bigraph[u]:\n if not visitV[v]:\n visitV[v] = True\n assert matchV[v] is not None # otherwise match is not maximum\n assert matchV[v] not in wait_u\n wait_u.add(matchV[v])\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n #res_old = old_konig()\n res_new = new_konig()\n #assert res_old == res_new\n return res_new", "def fn(i, j):\n if grid[i][j] <= 0: return 0\n grid[i][j] *= -1 # mark as visited \n ans = 0\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n: \n ans = max(ans, fn(ii, jj) - grid[i][j])\n grid[i][j] *= -1 # backtracking \n return ans", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n from util import Stack\n Pilha_Caminho = Stack()\n Pilha_Estados = Stack()\n Caminho = []\n Visitados = []\n\n Pilha_Caminho.push(Caminho) # empilha caminho (vazio, no começo)\n Pilha_Estados.push(problem.getStartState()) # empilha estado inicial\n\n while (Pilha_Caminho.isEmpty() == False and Pilha_Estados.isEmpty() == False):\n Caminho_Andado = Pilha_Caminho.pop() # atualiza caminho\n Estado_Atual = Pilha_Estados.pop() # atualiza estado\n if problem.isGoalState(Estado_Atual): # caso estado atual seja o desejado,\n return Caminho_Andado # retorna o caminho total\n if Estado_Atual not in Visitados: # caso estado atual não tenha sido visitado\n Visitados.append(Estado_Atual) # marca estado como visitado\n for Sucessor in problem.getSuccessors(Estado_Atual): # busca sucessores\n if Sucessor[0] not in Visitados: # caso sucessor não tenha sido visitado\n Pilha_Caminho.push(Caminho_Andado + [Sucessor[1]]) # atualiza caminho total na pilha\n Pilha_Estados.push(Sucessor[0]) # atualiza estado\n return", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()<k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])", "def limited_optimal_SC(U, S, size = 1, redundancy = 1):\n S_length = len(S)\n S_class = S.__class__\n S = sorted(S, key = len, reverse = True)\n max_redundancy = redundancy * len(U)\n print(S_length, max_redundancy)\n def recur(C, iS, d):\n \"\"\"\n C (SetOfSets): (partial) set cover solution\n iS (int): index of set in S from which to start adding (to avoid repeating combinations)\n d (int): current depth of recursion; if exceeds 'size', terminates.\n I'm hoping using a var is quicker than using len(C).\n \"\"\"\n C_elements = C.elements\n ## if max depth reached or set cover not possible, exit\n if ((d >= size)\n or (len(U - (C_elements.union(*S[iS:]))) != 0)):\n return []\n else:\n output = []\n ## set minimum set size to be <uncovered>/<remaining set cover size allowance>\n min_set_size = int((len(U - C_elements) / (size - d)) + 1)\n for i in range(iS, S_length):\n s = S[i]\n ## if set size is too small, stop searching\n ## (S is sorted by largest to shortest so lengths of all sets after\n ## will be <= len(s))\n if len(s) < min_set_size:\n break\n ## if s is not a subset of current partial cover solution, add it\n if not s < C_elements:\n C_branch = C.copy()\n C_branch.add(s)\n ## if exceeds redundancy threshold, skip\n if C_branch.redundancy > max_redundancy:\n continue\n else:\n ## if set cover, add to solutions\n if C_branch.elements == U:\n output.append(C_branch)\n else:\n output.extend(recur(C_branch, i+1, d+1))\n return output\n return recur(S_class(), 0, 0)", "def func(self):\n char = self.character\n lhs, rhs = self.lhs, self.rhs\n if not rhs:\n char.msg(\"Usage: cover <worn clothing> [=|with] <clothing object>\")\n return\n to_cover = char.search(lhs, candidates=char.contents)\n cover_with = char.search(rhs, candidates=char.contents)\n if not to_cover or not cover_with:\n return\n if not to_cover.is_typeclass(\"world.clothing.Item\"):\n char.msg(\"{item} isn't clothes!\".format(item=to_cover.get_display_name(char)))\n return\n if not cover_with.is_typeclass(\"world.clothing.Item\"):\n char.msg(\"{item} isn't wearable!\".format(item=cover_with.get_display_name(char)))\n return\n if cover_with.db.clothing_type:\n if cover_with.db.clothing_type in CLOTHING_TYPE_CANT_COVER_WITH:\n char.msg(\"You can't cover anything with that!\")\n return\n if not to_cover.db.worn:\n char.msg(\"You're not wearing {item}!\".format(item=to_cover.get_display_name(char)))\n return\n if to_cover == cover_with:\n char.msg(\"You can't cover an item with itself!\")\n return\n if cover_with.db.covered_by:\n char.msg(\"{} is covered by something else!\".format(cover_with.get_display_name(char)))\n return\n if to_cover.db.covered_by:\n char.msg(\"{item} is already covered by {cover}.\".format(\n item=cover_with.get_display_name(char),\n cover=to_cover.db.covered_by.get_display_name(char)))\n return\n if not cover_with.db.worn:\n cover_with.wear(char, True) # Put on the item to cover with if it's not on already\n char.location.msg_contents(\"{wearer} covers {item} with {cover}.\",\n mapping=dict(wearer=char,\n item=to_cover.get_display_name(char),\n cover=cover_with.get_display_name(char)))\n to_cover.db.covered_by = cover_with", "def lower_covers(self, x):", "def challenge2(self):\n # Let's try an octree-type approach\n # For each grid cube we should be able to find whether a nanobot:\n # 1) is not in range (is outside grid cube and not in range of nearest face)\n # 2) is in range of whole cube (all 8 corners are in range)\n # 3) is in range of part of the cube (i.e. not 1 or 2)\n # Root node: figure out extent of whole space\n mins = []\n maxs = []\n for axis in range(3):\n mins.append(min(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n maxs.append(max(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n\n for count in range(len(self.nanobots), 0, -1):\n results = self.search_coord_with_max_nanobots(mins, maxs, [], self.nanobots, count)\n if results and results[0].count >= count:\n break\n\n print(f\"Found {len(results)} octree search results with {results[0].count} nanobots in range.\")\n\n # Find result coord closest to origin\n closest_dist = np.iinfo(np.int32).max\n best_coord = None\n for result in results:\n for corner in itertools.product(*zip(result.mins, result.maxs)):\n d = manhattan_dist(corner, (0, 0, 0))\n if d < closest_dist:\n closest_dist = d\n best_coord = corner\n\n print(f\"Best coord: {best_coord} (dist={manhattan_dist(best_coord, (0, 0, 0))})\")", "def _dd(self, c, n):\n\n assert self.test([]) == self.PASS\n\n\trun = 1\n cbar_offset = 0\n\n\t# We replace the tail recursion from the paper by a loop\n\twhile 1:\n tc = self.test(c)\n #print self.pretty(c) + ' ' +tc\n assert tc == self.FAIL or tc == self.UNRESOLVED\n\n if n > len(c):\n # No further minimizing\n if self.verbose:\n print \"dd: done\"\n return c\n\n\t self.report_progress(c, \"dd\")\n\n\t cs = self.split(c, n)\n\n if self.verbose:\n ###print\n print \"dd (run #\" + `run` + \"): trying\",\n for i in range(n):\n if i > 0:\n print \"+\",\n print len(cs[i]),\n print\n\n c_failed = 0\n cbar_failed = 0\n\n next_c = c[:]\n next_n = n\n\n\t # Check subsets\n##\t for i in range(n):\n## if self.debug_dd:\n## print \"dd: trying\", self.pretty(cs[i])\n\n## (t, cs[i]) = self.test_mix(cs[i], c, self.REMOVE)\n\n## if t == self.FAIL:\n## # Found\n## if self.debug_dd:\n## print \"dd: found\", len(cs[i]), \"deltas:\",\n## print self.pretty(cs[i])\n\n## c_failed = 1\n## next_c = cs[i]\n## next_n = 2\n## cbar_offset = 0\n## self.report_progress(next_c, \"dd\")\n## break\n\n if not c_failed:\n # Check complements\n cbars = n * [self.UNRESOLVED]\n\n # print \"cbar_offset =\", cbar_offset\n\n for j in range(n):\n i = (j + cbar_offset) % n\n cbars[i] = self.__listminus(c, cs[i]) \n t, cbars[i] = self.test_mix(cbars[i], c, self.ADD) \n doubled = self.__listintersect(cbars[i], cs[i])\n if doubled != []:\n cs[i] = self.__listminus(cs[i], doubled)\n\n if t == self.FAIL:\n if self.debug_dd:\n print \"dd: reduced to\", len(cbars[i]),\n print \"deltas:\",\n print self.pretty(cbars[i])\n\n cbar_failed = 1\n next_c = self.__listintersect(next_c, cbars[i])\n next_n = next_n - 1\n self.report_progress(next_c, \"dd\")\n\n # In next run, start removing the following subset\n cbar_offset = i\n break\n\n if not c_failed and not cbar_failed:\n if n >= len(c):\n # No further minimizing\n print \"dd: done\"\n return c\n\n next_n = min(len(c), n * 2)\n if self.verbose:\n print \"dd: increase granularity to\", next_n\n cbar_offset = (cbar_offset * next_n) / n\n\n c = next_c\n n = next_n\n\t run = run + 1", "def solve_iteratively(self, conv_crit=1e-10, maxiter=50,\n check_every=4, check_after=1, precision=None, verbose=False):\n sol = self.sol0\n terms = [(get_name(gi), get_name(gj), get_name(uij))\n for term in self.all_terms for (gi, gj, uij) in term]\n gain_map = {}\n ubl_map = {}\n for gi,gj,uij in terms:\n if not gi in gain_map:\n gain_map[gi] = len(gain_map)\n if not gj in gain_map:\n gain_map[gj] = len(gain_map)\n if not uij in ubl_map:\n ubl_map[uij] = len(ubl_map)\n ggu_indices = np.array([(gain_map[gi], gain_map[gj], ubl_map[uij]) \n for (gi, gj, uij) in terms], dtype=np.uint)\n v = sol[gi]\n shape, dtype, ndata = v.shape, v.dtype, v.size\n ngains = len(gain_map)\n nubls = len(ubl_map)\n nbls = len(self.keys)\n assert dtype in (np.complex64, np.complex128)\n if precision is None:\n if dtype == np.complex64:\n precision = 1\n else:\n precision = 2\n if precision == 1:\n real_dtype = np.float32\n else:\n real_dtype = np.float64\n gains = np.empty((ndata, ngains), dtype=dtype)\n for k,v in gain_map.items():\n gains[:,v] = sol[k].flatten()\n ubls = np.empty((ndata, nubls), dtype=dtype)\n for k,v in ubl_map.items():\n ubls[:,v] = sol[k].flatten()\n data = np.empty((ndata, nbls), dtype=dtype)\n wgts = np.empty((ndata, nbls), dtype=real_dtype)\n for i,k in enumerate(self.keys):\n data[:,i] = self.data[k].flatten()\n wgts[:,i] = self.wgts[k].flatten()\n #data = np.array([self.data[k].flatten() for k in self.keys])\n #wgts = np.array([self.wgts[k].flatten() for k in self.keys])\n if wgts.shape != data.shape:\n wgts = np.resize(wgts, data.shape)\n result = omnical(ggu_indices, gains, ubls, data, wgts, \n conv_crit, maxiter, check_every, check_after,\n nthreads=NTHREADS, precision=precision, gain=self.gain, \n verbose=verbose)\n for k,v in gain_map.items():\n sol[k] = np.reshape(result['gains'][:,v], shape)\n for k,v in ubl_map.items():\n sol[k] = np.reshape(result['ubls'][:,v], shape)\n meta = {\n 'iter': np.reshape(result['iters'], shape),\n 'chisq': np.reshape(result['chisq'], shape),\n 'conv_crit': np.reshape(result['conv'], shape),\n }\n return meta, sol", "def solve_computerphile(s):\n for i in range(9):\n for j in range(9):\n if s.arr[j][i] == 0:\n for n in range(1, 10):\n if s.possible(n, i, j):\n s.arr[j][i] = n\n solve_computerphile(s=s)\n s.arr[j][i] = 0\n return\n print(s)\n return", "def solve(self, board: List[List[str]]) -> None:\n if not board:\n return\n x=len(board)\n y=len(board[0])\n visit=[[False if board[i][j]=='X' else True for j in range(y)] for i in range(x)]\n for i in range(x):\n for j in range(y):\n if visit[i][j] and board[i][j]=='O':\n queue=[[i,j]]\n visit[i][j]=False\n k=0\n surround=True\n while len(queue)>k:\n if queue[k][0]==0 or queue[k][0]==x-1 or queue[k][1]==y-1 or queue[k][1]==0:\n surround=False\n if queue[k][1]!=y-1 and visit[queue[k][0]][queue[k][1]+1]:\n queue.append([queue[k][0],queue[k][1]+1])\n visit[queue[k][0]][queue[k][1]+1]=False\n if queue[k][1]!=0 and visit[queue[k][0]][queue[k][1]-1]:\n queue.append([queue[k][0],queue[k][1]-1])\n visit[queue[k][0]][queue[k][1]-1]=False\n if queue[k][0]!=x-1 and visit[queue[k][0]+1][queue[k][1]]:\n queue.append([queue[k][0]+1,queue[k][1]])\n visit[queue[k][0]+1][queue[k][1]]=False\n if queue[k][0]!=0 and visit[queue[k][0]-1][queue[k][1]]:\n queue.append([queue[k][0]-1,queue[k][1]])\n visit[queue[k][0]-1][queue[k][1]]=False\n k+=1\n if surround:\n for i1,j1 in queue:\n board[i1][j1]='X'\n return", "def infect(r, c):\n subset = grid[r-1:r+2, c-1:c+2]\n print(f\"Looking at ({r-1}, {c-1}) through ({r+1}, {c+1})\")\n # np.where(subset == 0)\n # subset[subset == 0] = np.fromfunction(calc_infect, shape=())\n #v_calc_infect(subset[subset == 0])\n #for i in np.nditer(subset):\n # if subset[i] == 0:\n # subset[i] = np.random.binomial(1, infect_rate)\n for x in np.nditer(subset, op_flags=['readwrite']):\n if x == 0:\n x[...] = calc_infect()\n\n # for nr in np.arange(-1, 2):\n # for nc in np.arange(-1, 2):\n # try:\n # if grid[r+nr, c+nc] == 0:\n # grid[r+nr, c+nc] = np.random.binomial(1, infect_rate)\n # except IndexError: # Out of bounds, ignore\n # pass", "def solve(self):", "def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def find_positive(self):\n if self.round == 2:\n pass\n \n elif self.subtested == 1:\n try:\n dim = self.D-1\n sample = range(1, int(self.poolSize)+1)\n self.SLICES = self.partRes\n dim_positive_slices = itemgetter(*self.results.keys())(self.results)\n dim_positive_slices_count = list(map(len,dim_positive_slices))\n one_pos_slice_count = dim_positive_slices_count.count(1)\n two_pos_slice_count = dim_positive_slices_count.count(2)\n three_pos_slice_count = dim_positive_slices_count.count(3)\n if one_pos_slice_count == dim:\n positive_slice_samples = [self.SLICES[keys][value] for keys in self.results.keys() for value in self.results[keys]]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n return set.intersection(*positive_slice_samples)\n \n elif (one_pos_slice_count == dim-1) and (two_pos_slice_count == 1 or three_pos_slice_count ==1):\n positive_slice_samples = [itemgetter(*self.results[key])(self.SLICES[key]) \n if len(self.results[key])==1 else set.union(*itemgetter(*self.results[key])(self.SLICES[key])) \n for key in self.results.keys()]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n\n else:\n self.positiveSamples.setText('Indeterministic')\n except:\n pass\n else:\n try:\n dim = self.D\n sample = range(1, int(self.poolSize)+1)\n self.SLICES = self.slicedCube\n dim_positive_slices = itemgetter(*self.results.keys())(self.results)\n dim_positive_slices_count = list(map(len,dim_positive_slices))\n one_pos_slice_count = dim_positive_slices_count.count(1)\n two_pos_slice_count = dim_positive_slices_count.count(2)\n three_pos_slice_count = dim_positive_slices_count.count(3)\n if one_pos_slice_count == dim:\n positive_slice_samples = [self.SLICES[keys][value] for keys in self.results.keys() for value in self.results[keys]]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n return set.intersection(*positive_slice_samples)\n \n elif (one_pos_slice_count == dim-1) and (two_pos_slice_count == 1 or three_pos_slice_count ==1):\n positive_slice_samples = [itemgetter(*self.results[key])(self.SLICES[key]) \n if len(self.results[key])==1 else set.union(*itemgetter(*self.results[key])(self.SLICES[key])) \n for key in self.results.keys()]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n\n else:\n self.positiveSamples.setText('Indeterministic: \\n Proceed to sub- \\n directional testing')\n self.labelsCube = self.labelledCube()\n self.subTest()\n self.sliceSelect.clear()\n self.sliceSelect.addItems(self.res)\n if self.round == 1:\n self.round = 2\n else:\n self.round = 3\n except:\n pass", "def search(values): \n # First, reduce the puzzle\n values = reduce_puzzle(values)\n if values is False:\n return False\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min_possibility = sys.maxsize\n min_box = \"\"\n for box in boxes:\n if len(values[box]) > 1 and len(values[box]) < min_possibility:\n min_possibility = len(values[box])\n min_box = box\n # Use recursion to solve each one of the resulting sudokus, and \n # if one returns a value (not False), return that answer\n for digit in values[min_box]:\n new_values = values.copy()\n new_values[min_box] = digit\n attempt = search(new_values)\n if attempt:\n return attempt", "def _undiscovered_blob_size(self, pos: Tuple[int, int],\r\n board: List[List[Tuple[int, int, int]]],\r\n visited: List[List[int]]) -> int:\r\n board_size = len(board)\r\n if pos[0] < 0 or pos[0] >= board_size \\\r\n or pos[1] < 0 or pos[1] >= board_size:\r\n return 0\r\n column = pos[0]\r\n row = pos[1]\r\n if not board[column][row] == self.colour:\r\n visited[column][row] = 0\r\n return 0\r\n score = 1\r\n visited[column][row] = 1\r\n # upper cell\r\n if row - 1 >= 0:\r\n if visited[column][row - 1] == -1:\r\n score += self._undiscovered_blob_size((column, row - 1),\r\n board, visited)\r\n # lower cell\r\n if row + 1 <= board_size - 1:\r\n if visited[column][row + 1] == -1:\r\n score += self._undiscovered_blob_size((column, row + 1),\r\n board, visited)\r\n # left cell\r\n if column - 1 >= 0:\r\n if visited[column - 1][row] == -1:\r\n score += self._undiscovered_blob_size((column - 1, row),\r\n board, visited)\r\n if column + 1 <= board_size - 1:\r\n if visited[column + 1][row] == -1:\r\n score += self._undiscovered_blob_size((column + 1, row),\r\n board, visited)\r\n return score", "def search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt", "def occam_razor() -> None:\r\n print(\"WARNING! Mode three activated. Time to complete may be several minutes\")\r\n temp = [] # x-y-conflicts\r\n global example\r\n backup = example.copy() # Backup so it can backtrack through solutions\r\n for x in range(shape):\r\n for y in range(shape):\r\n conflict_counter = 0\r\n for z in range(shape):\r\n if conflict_space[x, y] != 0:\r\n if conflict_space[x, y] == conflict_space[x, z] and z != y:\r\n conflict_counter += 1\r\n if conflict_space[x, y] == conflict_space[z, y] and z != x:\r\n conflict_counter += 1\r\n if conflict_counter > 0 and no_neighbour(x, y):\r\n temp.append([x, y, conflict_counter])\r\n threshold = [0, 0, 0]\r\n \"\"\"Takes an educated guess on the node in most conflict in case it's one move away from being solved\"\"\"\r\n for x in range(len(temp)):\r\n if temp[x][2] > threshold[2]:\r\n threshold = temp[x]\r\n if threshold[2] > 0:\r\n example[threshold[0], threshold[1]] = 0\r\n shade_neighbours(threshold[0], threshold[1])\r\n if not victory_checker():\r\n \"\"\"code now begins guessing\"\"\"\r\n for x in range(len(temp)):\r\n example = backup.copy()\r\n if no_neighbour(temp[x][0], temp[x][1]):\r\n example[temp[x][0], temp[x][1]] = 0\r\n else:\r\n continue\r\n progress_handler(False, True)\r\n while progress_handler(True, False):\r\n print_debug(\"itteration\")\r\n progress_handler(False, False)\r\n mark_check()\r\n if victory_checker():\r\n completion(True)\r\n if not progress_handler(True, False):\r\n special_corner()\r\n if not progress_handler(True, False):\r\n separation_crawler(True)\r\n if not progress_handler(True, False):\r\n occam_razor() # Recursive\r\n if not progress_handler(True, False):\r\n if victory_checker():\r\n completion(True)\r\n else:\r\n print(\"Searching...\")\r\n continue\r\n conflict_check()", "def solvable(grid):\n y = x = 1\n stack = deque([(0, y, x,)])\n goal = len(grid) - 2\n found = np.ones_like(grid, dtype=bool)\n \n while stack:\n i, y, x = stack.popleft()\n i += 1\n for y2, x2 in solve_perfect.neighbors(y, x, grid):\n if found[y2, x2]:\n if y2 == goal and x2 == goal:\n return i\n else:\n found[y2, x2] = False\n stack.append((i, y2, x2,))\n \n return 0", "def step_one(self):\n C_zeros = (self.C == 0).astype(int)\n C_zeros_uncovered = C_zeros * (1-self.row_cover[:, np.newaxis])\n C_zeros_uncovered *= (1-self.col_cover)\n\n while True:\n # find a uncovered zero\n # looks like np.argmax is fast than np.nozero, np.where\n row, col = np.unravel_index(np.argmax(C_zeros_uncovered), C_zeros_uncovered.shape)\n if C_zeros_uncovered[row, col] == 0:\n # no uncovered zeros\n return self.Steps.STEP3\n\n # prime it\n self.M[row, col] = self.Zeros.PRIME.value\n if self.star_in_row(row):\n # star in this row,\n col = self.find_star_in_row(row)\n # cover row\n self.row_cover[row] = 1\n # uncover the column\n self.col_cover[col] = 0\n C_zeros_uncovered[:, col] = C_zeros[:, col]*(1-self.row_cover)\n C_zeros_uncovered[row] = 0\n else:\n self.uncovered_zero_prime = (row, col)\n return self.Steps.STEP2", "def rectCover(self, number):\n # write code here\n dp = [1, 2, 3]\n if not number:\n return 0\n if number < 4:\n return dp[number-1]\n\n for i in range(3, number):\n dp.append(dp[i-1] + dp[i-2])\n return dp[-1]", "def part2():\n grid[(0, 0)] = 1\n coordinates_value = 0\n layer = 1\n x = 0; y = 0\n done = False\n while not done:\n # print(\"Layer: \", layer)\n # go right one step\n layer += 1; x += 1\n grid[(x,y)] = check_neighbours((x,y))\n\n # go up to the boundary of layer\n for y_up in range(y+1, layer):\n coord = (x, y_up)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_up\n\n # go left till the boundary of layer\n for x_left in range(x-1, -layer, -1):\n coord = (x_left, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_left\n\n # go down till the boundary of layer\n for y_down in range(y-1, -layer, -1):\n coord = (x, y_down)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n y = y_down\n\n # go right till the boundary of layer\n for x_right in range(x+1, layer):\n coord = (x_right, y)\n coordinates_value = check_neighbours(coord)\n if coordinates_value > puzzle_input:\n return coordinates_value\n x = x_right", "def run(brickheight,bricklength,walllength,wallheight,occupied=[],answer=[],globall=[]):\n if bricklength == brickheight:\n for t in range(walllength-bricklength+1):\n for s in range(wallheight-brickheight +1):\n column = t\n row = s\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n return answer\n else:\n return False\n if bricklength != brickheight:\n for t in range(walllength):\n for s in range(wallheight):\n column = t\n row = s\n\n if test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer) and \\\n test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n occupied2 = occupied[:]\n answer2 = answer[:]\n\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied2,answer2)\n if not end(brickheight,bricklength,walllength,wallheight,occupied2,answer2):\n run(brickheight,bricklength,walllength,wallheight,occupied2,answer2,globall)\n else:\n globall.append(answer)\n \n elif test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n put(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n \n elif test(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer):\n put(bricklength,brickheight,row,column,walllength,wallheight,occupied,answer)\n if not end(brickheight,bricklength,walllength,wallheight,occupied,answer):\n run(brickheight,bricklength,walllength,wallheight,occupied,answer,globall)\n else:\n globall.append(answer)\n return globall", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n\n templist=[]\n explored = set()\n fringe = util.PriorityQueue()\n # state, list of directions till now and the cost is pushed in the stack\n # so that algorithm can explore the node with lowest cost first\n fringe.push((problem.getStartState(),templist),1)\n\n while (not fringe.isEmpty()):\n (currentNode,currDir) = fringe.pop()\n\n if problem.isGoalState(currentNode):\n pathToGoal = currDir\n break\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n # total cost is cost till now plus cost to the child node\n totalCost = childNode[2]+problem.getCostOfActions(currDir)\n fringe.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n\n\n return pathToGoal;", "def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()", "def problem_086(limit,verbose):\n\n # Three routes:\n # *------F Sides labeled A, B, C, routes clockwise from S\n # | /| R1^2 = (A + C)^2 + B^2\n # | / n R2^2 = (B + C)^2 + A^2\n # +-----+------+-----F R3^2 = (A + B)^2 + C^2\n # | | / | . `|\n # | A / .|` / |\n # | |/. ` a-n / |\n # +-C---S-b-B--+-----+\n # | ` . |\n # | `|\n # *------+\n # | |\n # | |\n # | |\n # +------F\n \n # Genreate all triples up to perimeter 3M + sqrt((M + M)^2 + M^2)\n # Which is is 3M + sqrt(5M^2)\n\n total_found = 0\n cuboids = defaultdict(set)\n triples = set()\n under_length = []\n \n for batch in count():\n size = (batch + 1) * 500\n max_triple_perimeter = int(3 * size + sqrt(5 * size**2)) + 1\n all_triples = set(generate_triples(max_triple_perimeter))\n this_loop = all_triples - triples\n triples = all_triples\n \n with click.progressbar(this_loop, label=\"{}\".format(total_found)) as bar:\n new_cuboids = (c for t in bar for c in generate_cuboids(t))\n new_cuboids = (c for c in new_cuboids if c.a > 0)\n new_cuboids = (c for c in new_cuboids if is_shortest_route_integral(c))\n for cuboid in new_cuboids:\n cuboids[cuboid.c].add(cuboid)\n \n for i in range(batch * 500, batch * 500 + 500):\n \n total_found += len(cuboids[i])\n if total_found >= limit:\n click.echo(total_found)\n click.echo(i)\n return", "def dfs_util(item_id, item_dict, itemUsageDict, visited, candidates, selection, unique_tags, current_tags):\n\n visited.add(item_id)\n #A CANDIDATE WAS PICKED FROM CRITERIA AND ITS THE ONE WE SHOULD END WITH\n if len(selection) == 1:\n return \n\n #IF IT IS STILL A SECONDARY ITEM\n if \"into\" in item_dict[item_id]:\n for item in item_dict[item_id][\"into\"]:\n if item not in visited:\n #THE ITEM HAS NOT BEEN VISITED AND ALL POSSIBLE ROUTES SHOULD BE EXAMINED\n dfs_util(item, item_dict,itemUsageDict, visited, candidates, selection, unique_tags, current_tags)\n else:\n #THE ITEM HAS BEEN EXAMINED IN THE PAST. GOOD TO BREAK FROM HERE BECAUSE THAT ROUTE WILL PROB LEAD TO THE SAME\n return\n\n #REACHED THE END. NEEDS SOME CRITERIA.\n else:\n #ORNN IS A SPECIAL CASE IN LEAGUE. TAKE OUT HIS ITEMS BECAUSE THEY DONT FIT THE GENERAL PLAY\n if \"requiredAlly\" in item_dict[item_id] and \"from\" in item_dict[item_id]:\n item_id = item_dict[item_id][\"from\"][0]\n\n if item_id in itemUsageDict:\n #Meets the basic criteria of being over 55% win Rate(good chance of winning with this item on)\n if itemUsageDict[item_id][\"totalCount\"] > 200 and itemUsageDict[item_id][\"winRatio\"] >= 0.55:\n for tag in unique_tags:\n if tag in item_dict[item_id][\"tags\"] and tag in current_tags:\n return\n elif tag in item_dict[item_id][\"tags\"] and tag not in current_tags:\n\n current_tags.add(tag)\n\n selection.add(item_id) \n return\n\n elif itemUsageDict[item_id][\"totalCount\"] > 10:\n candidates.append(item_id)\n return", "def gcd(*args: int, safe: bool=True):\n \n # Calculating the gcd of more than two integers can be done iteratively:\n # i.e gcd(a, b, c) = gcd(gcd(a, b), c) ...\n \n # The Euclidean Algorithm can only compute the gcd of two integers a, b\n # which is what we will implement first. We can just utilize a stack to proceed\n # in the case of len(args) > 2\n \n def binary_gcd(a: int, b: int):\n \"\"\"\n Calculates the Greatest Common Divisor of `a` and `b`, using the Euclidean algorithm\n \"\"\"\n\n # There exists a very elegant method to compute the gcd:\n # we first need to assure that a >= b..\n \n # if b is greater than a, swap them.\n \n if(a < b):\n a, b = b, a\n \n def _recurse_gcd(a: int, b: int):\n \"\"\"Small stub so the unnecessary compare/swap does not occur for recursion.\"\"\"\n \n # No remainder left\n if a == 0:\n # gcd has been found, return the remainder\n return b\n \n return _recurse_gcd(b % a, a)\n \n if safe:\n old = getrecursionlimit()\n setrecursionlimit(999999999)\n result = _recurse_gcd(a, b)\n setrecursionlimit(old)\n return result\n else:\n return _recurse_gcd(a, b)\n\n if len(args) == 1:\n return args[0] # gcd(a) = a\n \n result = None\n args = list(args) \n \n while True:\n \n a, b = args.pop(), args.pop() \n result = binary_gcd(a, b)\n \n # The list is empty.. we're done!\n # if the result is 1 we can return prematurely, \n # because gcd(a, 1) == 1 for any positive integer a\n if len(args) == 0 or result == 1:\n \n # Return the last result.\n return result\n\n args.append(result)", "def Solve(bases):\r\n n = 1\r\n while 1:\r\n n += 1\r\n done = True\r\n for b in bases:\r\n if not Happy(n, b):\r\n done = False\r\n break\r\n if done:\r\n return n", "def dealing_covers(high_series,low_series):\n #dealing k\n #initialize\n pre_calculated=0\n rates_total=len(high_series)\n valid_high=high_series.copy()\n valid_low=low_series.copy()\n valid_k_line_mark=np.zeros(rates_total)\n \"\"\"\n start point use up contains dealing\n \"\"\"\n start_index=0\n pre_high=valid_high[start_index]\n pre_low=valid_low[start_index]\n pre_idx=start_index\n cur_idx=pre_idx\n is_found=False\n while(not is_found):\n cur_idx=cur_idx+1\n cur_high=valid_high[cur_idx]\n cur_low=valid_low[cur_idx]\n if (cur_high>pre_high and cur_low>pre_low)or (cur_high<pre_high and cur_low<pre_low):\n is_found=True\n valid_high[pre_idx]=pre_high\n valid_low[pre_idx]=pre_low\n valid_k_line_mark[pre_idx]=1\n else:\n if pre_high>cur_high:\n # first k cover second k\n pre_low=cur_low\n elif pre_high<cur_high:\n\n # first k be convered by second k\n pre_high=cur_high\n pre_idx=cur_idx\n else:\n # high value is equal\n pre_low=cur_low\n pre_idx=cur_idx\n\n # no start point dealing\n begin_idx=cur_idx+1\n for i in range(begin_idx,rates_total):\n post_high=valid_high[i]\n post_low=valid_low[i]\n post_idx=i\n #first classification: no contains\n if (cur_high>post_high and cur_low>post_low)or (cur_high<post_high and cur_low<post_low):\n valid_high[cur_idx]=cur_high\n valid_low[cur_idx]=cur_low\n valid_k_line_mark[cur_idx]=1\n pre_high=cur_high\n pre_low=cur_low\n pre_idx=cur_idx\n cur_high=post_high\n cur_low=post_low\n cur_idx=post_idx\n else:\n if pre_high<cur_high:#up contains\n if cur_high>post_high:\n #post be coverd by cur\n cur_low=post_low\n elif cur_high<post_high:\n #post cover cur\n cur_high=post_high\n cur_idx=post_idx\n else:\n #high be equal\n if cur_low>post_low:\n #cur be covered by post\n cur_idx=post_idx\n else:\n #cur covers post\n cur_low=post_low\n else:#down contains\n if cur_low>post_low:\n #cur be covered by post\n cur_low=post_low\n cur_idx=post_idx\n elif cur_low<post_low:\n #cur covers post\n cur_high=post_high\n else:\n # two low is equal\n if cur_high>post_high:\n cur_high=post_high\n else:\n cur_idx=post_idx\n cur_high=post_high#I think the words can be deleted\n return valid_k_line_mark,valid_high,valid_low", "def naive_recursive(k, n):\n # If no floors remaining, no more attempts need to be made.\n if n == 0:\n return 0\n # It will take n attempts to find the correct floor if there is only one egg remaining.\n if k == 1:\n return n\n # Solve the problem recursively.\n return min((max(naive_recursive(k, n - x), naive_recursive(k - 1, x - 1)) + 1 for x in range(1, n + 1)))", "def reachable(maze: list, start: tuple, goal: tuple):\n n = len(maze) # Get the dimension of the maze\n\n #========================================#\n # Some data checking statements\n\n if (not is_valid(start, n)):\n print(\"reachable: Start indices outside maze dimensions\")\n return False\n elif (not is_valid(goal, n)):\n print(\"reachable: Goal indices outside maze dimensions\")\n return False\n\n # End data checking statements\n #========================================#\n\n # We can use a copy of the maze to keep track of visited squares (Considered using a set here, thought that time efficiency was important)\n visited = copy.deepcopy(maze)\n # visited = list(map(list, maze)) # Alternative to using copy.deepcopy\n stack = [] # Define our stack of \"fringe\" squares\n stack.append(start) # Push the start square onto our stack\n visited[start[0]][start[1]] = 1 # Set our start to visited\n\n while (len(stack)): # While there exists items in the stack\n current = stack.pop() # Pop the last element\n\n if (current == goal):\n return True # If current is the goal, we found it!\n\n current_i, current_j = current # Unpack the current pair\n\n # Now we want to add all unvisited squares that are possible to get to from the current square\n for i in range(len(nearby_offsets)):\n offset_i, offset_j = nearby_offsets[i]\n possible = (current_i + offset_i, current_j + offset_j)\n # print(f\"Current possible: {possible_i} {possible_j}\") # DEBUG\n if (is_valid(possible, n)): # If the calculated square is within the maze matrix\n if (not visited[possible[0]][possible[1]]):\n stack.append(possible)\n visited[possible[0]][possible[1]] = 1\n return False # If the while loop goes out, and the stack is empty, then there is no possible path", "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def fn(mask, k):\n if mask == 0: return 0 # no more numbers \n ans = 0\n for i in range(n): \n if mask & 1 << i:\n for j in range(i+1, n): \n if mask & 1 << j: \n mask0 = mask & ~(1<<i) & ~(1<<j) # unset ith & jth bit\n ans = max(ans, k*gcd(nums[i], nums[j]) + fn(mask0, k+1))\n return ans", "def fn(k, i):\n ii = -1 \n for x in path:\n if gcd(nums[k], x) == 1: # coprime \n if path[x] and path[x][-1][1] > ii: \n ans[k] = path[x][-1][0]\n ii = path[x][-1][1]\n \n path.setdefault(nums[k], []).append((k, i))\n for kk in tree.get(k, []): \n if kk not in seen: \n seen.add(kk)\n fn(kk, i+1)\n path[nums[k]].pop()", "def recursive(eperm, guess, dpl, conc, tol=1e-7):\n\n # Calculate effective electric permittivity for this guess\n R = np.sum(1/(dpl*eperm[:, None] + (1 - dpl)*guess), axis=1)\n effe = np.sum(conc*eperm*R)/np.sum(conc*R)\n\n # If error above tolerance, call it again with new guess\n if np.abs(guess - effe) > tol:\n effe = recursive(eperm, effe, dpl, conc, tol)\n\n return effe", "def check_box(volume,point,is_queued_map,is_visited_map):\n list_not_visited=[]\n list_not_queued = []\n list_are_near = []\n\n if point[0]==1227 and point[1]==735 and point[2]==27:\n pass\n\n\n for x in xrange(-1, 2):\n\n # Edgecase for x\n if point[0] + x < 0 or point[0] + x > volume.shape[0] - 1:\n continue\n\n for y in xrange(-1, 2):\n\n # Edgecase for y\n if point[1] + y < 0 or point[1] + y > volume.shape[1] - 1:\n continue\n\n for z in xrange(-1, 2):\n\n # Edgecase for z\n if point[2] + z < 0 or point[2] + z > volume.shape[2] - 1:\n continue\n\n # Dont look at the middle point\n if x == 0 and y == 0 and z == 0:\n continue\n\n # TODO case if loop, all are queued but not visited\n if volume[point[0] + x, point[1] + y, point[2] + z] == 1:\n\n\n list_are_near.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n if is_queued_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_queued.extend([[point[0] + x, point[1] + y, point[2] + z]])\n if is_visited_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_visited.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n is_visited_map[point[0],point[1],point[2]]=1\n return list_not_queued,list_not_visited,is_visited_map,list_are_near", "def solveOneStep(self):\n ### Student code goes here\n if (self.currentState.state == self.victoryCondition) or (self.currentState not in self.visited):\n self.visited[self.currentState] = True\n win_or_not = self.currentState.state == self.victoryCondition\n return win_or_not\n\n if not self.currentState.nextChildToVisit: \n its = 0\n for movable in self.gm.getMovables():\n its += 1\n # time test\n # too long \n if its == \"too long\":\n return \"too long\"\n #make every move in movable\n self.gm.makeMove(movable)\n new = self.gm.getGameState()\n new_gs = GameState(new, self.currentState.depth+1, movable)\n \n if new_gs not in self.visited:\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.gm.reverseMove(movable) \n \n num_children = len(self.currentState.children)\n if self.currentState.nextChildToVisit < num_children:\n new = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.makeMove(new.requiredMovable)\n self.currentState = new\n #recurse\n return self.solveOneStep()\n else:\n self.currentState.nextChildToVisit = self.currentState.nextChildToVisit + 1\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n #recurse\n return self.solveOneStep()", "def evaluate(self, board):\r\n\r\n self_moves = self.find_possible_moves(board, self.my_color)\r\n opponent_moves = self.find_possible_moves(board, self.opponent_color)\r\n\r\n mobility = 0 # Mobility captures Self's profit in amount of available moves\r\n disk_parity = 0 # Disk parity captures Self's profit in raw disk amount\r\n corners = 0 # Corners captures Self's profit in occupied corners\r\n corner_proximity = 0 # Corner proximity captures the risk of giving away a free corner\r\n stability = 0 # Stability captures Self's profit in unflippable disks\r\n\r\n # Calculating mobility heuristic\r\n self_immediate_mobility = len(self_moves)\r\n opponent_immediate_mobility = len(opponent_moves)\r\n\r\n if self_immediate_mobility + opponent_immediate_mobility != 0:\r\n mobility = 100 * (self_immediate_mobility - opponent_immediate_mobility) / (self_immediate_mobility + opponent_immediate_mobility)\r\n\r\n # Calculate disk parity heuristic\r\n self_disks = self.get_disk_count(self.my_color, board)\r\n opponent_disks = self.get_disk_count(self.opponent_color, board)\r\n\r\n disk_parity = 100 * (self_disks - opponent_disks) / (self_disks + opponent_disks)\r\n\r\n # Calculating corner heuristic\r\n corners_list = [(0,0), (0,7), (7,0), (7,7)]\r\n self_corners = 0\r\n opponent_corners = 0\r\n\r\n for corner in corners_list:\r\n if board[corner[0]][corner[1]] == self.my_color:\r\n self_corners += 1\r\n if board[corner[0]][corner[1]] == self.opponent_color:\r\n opponent_corners += 1\r\n\r\n if self_corners + opponent_corners != 0:\r\n corners = 100 * (self_corners - opponent_corners) / (self_corners + opponent_corners)\r\n\r\n # Calculating corner proximity heuristic\r\n corners_proximity_list = [(0, 1), (1, 0), (1, 1), (0, 6), (1, 6), (1, 7), (6, 0), (6, 1), (7, 1), (6, 6), (7, 6), (6, 7)]\r\n self_corner_proximity = 0\r\n opponent_corner_proximity = 0\r\n\r\n for cell in corners_proximity_list:\r\n if board[cell[0]][cell[1]] == self.my_color:\r\n self_corner_proximity += 1\r\n if board[cell[0]][cell[1]] == self.opponent_color:\r\n opponent_corner_proximity += 1\r\n\r\n if self_corner_proximity + opponent_corner_proximity != 0:\r\n corner_proximity = 100 * (self_corner_proximity - opponent_corner_proximity) / (self_corner_proximity + opponent_corner_proximity)\r\n\r\n # Calculating stability heuristic\r\n self_stability = self.get_stable_disks(board, self.my_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 7))\r\n\r\n opponent_stability = self.get_stable_disks(board, self.opponent_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 7))\r\n\r\n if self_stability + opponent_stability != 0:\r\n stability = 100 * (self_stability - opponent_stability) / (self_stability + opponent_stability)\r\n\r\n # Calculating the final value\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n # In early-game, focus on maximal mobility and stability. Avoid amassing too many disks.\r\n if disk_total < 15:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 30 * mobility + \\\r\n 30 * stability\r\n\r\n # In mid-game, focus on capturing corners and further building stability\r\n elif disk_total < 45:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 20 * mobility + \\\r\n 35 * stability\r\n\r\n # In late-game, focus on getting as many discs as possible\r\n else:\r\n heuristic_value = 30 * corners + \\\r\n 15 * mobility + \\\r\n 30 * stability + \\\r\n 35 * disk_parity\r\n\r\n return heuristic_value", "def most_discriminating( features_df, labels_df, top=5):\n \n columns = features_df.shape[1]\n labels_df = labels_df[['file', 'candy_id']].set_index('file')\n qualities = np.zeros(columns)\n \n _left = 0\n _right = 1\n\n _c = 0\n _h = 1\n\n # globals\n cases = float(labels_df['candy_id'].count()) # total cases\n\n p_c_A = (labels_df['candy_id'] == 0).sum() / cases\n p_h_A = 1.0 - p_c_A\n\n\n for feature in range(columns):\n\n branch_cases = np.zeros(2) # total on each branch\n pi = np.zeros(2) # proportion on each branch\n\n split = np.array([\n #c, h\n [0, 0], #left\n [0, 0] #right\n ])\n\n for index, value in features_df[feature].iteritems():\n split[value][labels_df.loc[index][0]] += 1\n\n branch_cases[_left] = split[_left].sum()\n branch_cases[_right] = split[_right].sum()\n \n if branch_cases[_left] == 0.0 or branch_cases[_right] == 0.0:\n qualities[feature] = 0\n continue\n \n pi[_left] = branch_cases[_left] / cases\n pi[_right] = branch_cases[_right] / cases\n\n p_c_B = split[_left][_c] / branch_cases[_left]\n p_h_B = split[_left][_h] / branch_cases[_left]\n\n p_c_C = split[_right][_c] / branch_cases[_right]\n p_h_C = split[_right][_h] / branch_cases[_right]\n\n gini_tree = 1.0 - (math.pow(p_c_A, 2) + math.pow(p_h_A, 2))\n\n gini_left = 1.0 - (math.pow(p_c_B, 2) + math.pow(p_h_B, 2))\n gini_right = 1.0 - (math.pow(p_c_C, 2) + math.pow(p_h_C, 2))\n\n quality = gini_tree - pi[_left] * gini_left - pi[_right] * gini_right\n\n qualities[feature] = quality\n return list(reversed(qualities.argsort()))[:top]", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def solve(board):\r\n \r\n #An O(2mn) time solution; the first O(mn) traversal is to preform a bfs on all tiles attached to edge 'O' tiles (can't convert to 'X's); the second is to convert all remaining 'O's into 'X's\r\n \r\n def bfs(curr,r,c):\r\n if not curr: return\r\n prev = len(curr)\r\n for n in range(0,prev):\r\n i,j = curr[n][0],curr[n][1]\r\n board[i][j] = 'A'\r\n for x,y in [(-1, 0), (0, -1), (1, 0), (0, 1)]:\r\n x_n = i+x\r\n y_n = j+y\r\n if x_n >= 0 and x_n < r and y_n >= 0 and y_n < c and board[x_n][y_n] == \"O\":\r\n curr += [(x_n,y_n)]\r\n bfs(curr[prev:],r,c)\r\n\r\n \r\n q,r,c = [],len(board),len(board[0])\r\n if not r or q: return\r\n\r\n for i in range(r):\r\n for j in range(c):\r\n if (i==0 or j==0 or i==r-1 or j==c-1) and board[i][j] == \"O\":\r\n q += [(i,j)]\r\n \r\n bfs(q,r,c)\r\n\r\n for i in range(r):\r\n for j in range(c): \r\n if board[i][j] == \"O\": \r\n board[i][j] = \"X\"\r\n elif board[i][j] == \"A\":\r\n board[i][j] = \"O\"\r\n \r\n return", "def recursion_step(value_n, r_grid, discount):\n\n n = value_n.shape[0]\n r_len = r_grid.shape[0]\n value_n_minus_1 = np.zeros([n - 1, r_len]) # Value function length reduced by 1\n gittins_n_minus_1 = np.zeros(n - 1) # Value function length reduced by 1\n for k in range(0, n - 1):\n a = k + 1 # a in range [1,n-1]\n b = n - k - 1 # b in range [1,n-1]\n value_n_minus_1[k, :] = np.maximum((r_grid / float(1 - discount)),\n (a / float(n)) * (1 + discount * value_n[k + 1, :]) +\n (b / float(n)) * discount * value_n[k, :]\n )\n try:\n # Find first index where Value = (Value of Safe Arm)\n idx_git = np.argwhere((r_grid / float(1 - discount)) == value_n_minus_1[k, :]).flatten()\n gittins_n_minus_1[k] = 0.5 * (r_grid[idx_git[0]] + r_grid[idx_git[0] - 1]) # Take average\n except:\n print(\"Error in finding Gittins index\")\n\n return gittins_n_minus_1, value_n_minus_1", "def quickbb(graph, fast=True):\n\n \"\"\"Given a permutation of the nodes (called an elimination ordering),\n for each node, remove the node and make its neighbors into a clique.\n The maximum degree of the nodes at the time of their elimination is\n the width of the tree decomposition corresponding to that ordering.\n The treewidth of the graph is the minimum over all possible\n permutations.\n \"\"\"\n\n best = Solution() # this gets around the lack of nonlocal in Python 2\n best.count = 0\n\n def bb(graph, order, f, g):\n best.count += 1\n if len(graph) < 2:\n if f < best.ub:\n assert f == g\n best.ub = f\n best.order = list(order) + list(graph)\n\n else:\n vs = []\n for v in graph:\n # very important pruning rule\n if simplicial(graph, v) or almost_simplicial(graph, v) and len(graph[v]) <= lb:\n vs = [v]\n break\n else:\n vs.append(v)\n\n for v in vs:\n graph1 = copy_graph(graph)\n eliminate_node(graph1, v)\n order1 = order + [v]\n # treewidth for current order so far\n g1 = max(g, len(graph[v]))\n # lower bound given where we are\n f1 = max(g, lower_bound(graph1))\n if f1 < best.ub:\n bb(graph1, order1, f1, g1)\n return\n\n graph = {u: set(graph[u]) for u in graph}\n\n order = []\n best.ub, best.order = upper_bound(graph)\n lb = lower_bound(graph)\n\n # This turns on the branch and bound algorithm that\n # gets better treewidth results, but takes a lot\n # longer to process\n if not fast:\n if lb < best.ub:\n bb(graph, order, lb, 0)\n\n # Build the tree decomposition\n tree = defaultdict(set)\n\n def build(order):\n if len(order) < 2:\n bag = frozenset(order)\n tree[bag] = set()\n return\n v = order[0]\n clique = graph[v]\n eliminate_node(graph, v)\n build(order[1:])\n for tv in tree:\n if clique.issubset(tv):\n break\n bag = frozenset(clique | {v})\n tree[bag].add(tv)\n tree[tv].add(bag)\n\n build(best.order)\n return tree", "def evaluate(self):\n #fac o lista cu toate perechile si vad daca se repeta vreuna (pana acum)\n nr=0\n \n pairs = []\n for i in range(0,self.__size):\n for j in range(0, self.__size):\n if self.__solution[i] != [] and self.__solution[i+self.__size] != [] : #sa am de unde face perechea\n p=[]\n p.append(self.__solution[i][j])\n p.append(self.__solution[i+self.__size][j])\n pairs.append(p)\n for p in pairs:\n if pairs.count(p) == 1:\n nr += 1\n\n return self.__size*self.__size - nr + 1 # pun acel +1 ca sa nu fie 0 niciodata -> ca sa nu am probleme la impartire\n # la 0 mai incolo\n #return nr", "def search2(values):\n\t# First, reduce the puzzle using the previous function\n\n\tvalues = reduce_puzzle(values)\n\tif values is False:\n\t\treturn False ## Failed earlier\n\tif all(len(values[s]) == 1 for s in boxes):\n\t\treturn values ## Solved!\n\t# Choose one of the unfilled squares with the fewest possibilities\n\tn, s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n\n\t# Now use recurrence to solve each one of the resulting sudokus, and\n\toldValue = values[s]\n\tfor value in values[s]:\n\t\tvalues[s] = value\n\t\tattempt = search(values)\n\t\tif attempt:\n\t\t\treturn attempt\n\tvalues[s] = oldValue\n\treturn False", "def cherryPickup(self, grid: List[List[int]]) -> int:\n # Solution 1 - 752 ms\n # Solution 2 - 296 ms\n n, m = len(grid), len(grid[0])\n dp = [[0] * (m + 2) for _ in range(m + 2)]\n dp[0][m - 1] = grid[0][0] + grid[0][m - 1]\n\n for i in range(1, n): # each level\n tmp = [[0] * (m + 2) for _ in range(m + 2)]\n # each time, you can move one more to left or right, so the most one is i+1 or m-1-i\n for j in range(min(i + 1, m)): # robot 1'col,\n for k in range(max(m - i - 1, 0), m): # robot 2'col\n if j != k:\n tmp[j][k] = max(dp[j - 1][k], dp[j][k], dp[j + 1][k],\n dp[j - 1][k - 1], dp[j][k - 1], dp[j + 1][k - 1],\n dp[j - 1][k + 1], dp[j][k + 1], dp[j + 1][k + 1])\n tmp[j][k] += grid[i][j] + grid[i][k]\n\n dp = tmp[:][:]\n\n return max(max(i) for i in dp)", "def solve(self, board: List[List[str]]) -> None:\n self.boarder_area = set([0])\n\n def dfs(board: List[List[int]], mark: List[List[int]], cur_count: int, row: int, col: int):\n if board[row][col] != 'O' or mark[row][col] != 0:\n return -1\n\n mark[row][col] = cur_count\n if row == 0 or row == len(board) - 1 or col == 0 or col == len(board[0])-1:\n self.boarder_area.add(cur_count)\n if row + 1 < len(board):\n dfs(board, mark, cur_count, row + 1, col)\n if row - 1 >= 0:\n dfs(board, mark, cur_count, row - 1, col)\n if col + 1 < len(board[0]):\n dfs(board, mark, cur_count, row, col + 1)\n if col - 1 >= 0:\n dfs(board, mark, cur_count, row, col - 1)\n return cur_count + 1\n\n m, n = len(board), len(board[0])\n mark = [[0 for _ in range(n)] for _ in range(m)]\n count = 1\n for i in range(m):\n for j in range(n):\n if board[i][j] != 'X':\n cur_count = dfs(board, mark, count, i, j)\n count = cur_count if cur_count != -1 else count\n\n for i in range(m):\n for j in range(n):\n if mark[i][j] not in self.boarder_area:\n board[i][j] = 'X'", "def find_all_zeros(min_re, max_re, min_im, max_im, fn,\r\n grid_points, iterations, reduction_factor,\r\n plot_full_region, show_progress):\r\n # Check arguments\r\n assert reduction_factor > 1 and max_re > min_re and max_im > min_im\r\n assert (max_re.imag == 0 and min_re.imag == 0\r\n and max_im.imag == 0 and min_im.imag == 0)\r\n # Edge-point rejection (see below) relies on the following assumption:\r\n assert grid_points > 2 * reduction_factor\r\n \r\n \r\n if plot_full_region:\r\n \r\n def inverse_fn(z):\r\n \"\"\" 1 / fn(z) \"\"\"\r\n f = fn(z)\r\n return inf if f == 0 else 1/f\r\n \r\n def contour_int(z, d_re, d_im):\r\n \"\"\"\r\n Approximate the contour integral of inverse_fn around a point z,\r\n using a rectangle of half-width d_re (in real direction) and\r\n half-height d_im. Just a nice plot that makes zeros stand out.\r\n \"\"\"\r\n assert d_re.imag == 0 and d_im.imag == 0 and d_re > 0 and d_im > 0\r\n below = inverse_fn(z - 1j * d_im)\r\n above = inverse_fn(z + 1j * d_im)\r\n left = inverse_fn(z - d_re)\r\n right = inverse_fn(z + d_re)\r\n return (below * (2 * d_re) + right * (2j * d_im)\r\n + above * (-2 * d_re) + left * (-2j * d_im))\r\n \r\n res, re_step = np.linspace(min_re, max_re, num=100, retstep=True)\r\n ims, im_step = np.linspace(min_im, max_im, num=100, retstep=True)\r\n \r\n fig = plt.figure()\r\n direct_plot = fig.add_subplot(111)\r\n data = [[math.log10(abs(fn(re + 1j * im))) for re in res] for im in ims]\r\n direct_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,\r\n min_im * nu.um, max_im * nu.um),\r\n origin='lower')\r\n direct_plot.set_xlabel('Re(kx) [rad/um]')\r\n direct_plot.set_ylabel('Im(kx) [rad/um]')\r\n direct_plot.set_title('log(|fn(z)|) -- Looking for minima (blue)')\r\n\r\n fig = plt.figure()\r\n contour_plot = fig.add_subplot(111)\r\n data = [[-math.log10(abs(contour_int(re + 1j * im, re_step, im_step)))\r\n for re in res] for im in ims]\r\n contour_plot.imshow(data, extent=(min_re * nu.um, max_re * nu.um,\r\n min_im * nu.um, max_im * nu.um),\r\n origin='lower')\r\n contour_plot.set_xlabel('Re(kx) [rad/um]')\r\n contour_plot.set_ylabel('Im(kx) [rad/um]')\r\n contour_plot.set_title(\r\n '-log(|contour integral of 1/fn(z) around a little rectangle|)\\n'\r\n + ' -- This plot highlights zeros in fn(z), but also lines of\\n'\r\n + 'discontinuity (where top or bottom kz is pure-imaginary)')\r\n \r\n # \"regions\" is a list where each entry has the form\r\n # [min_re, max_re, min_im, max_im]. Each entry describes a region in which we\r\n # are seeking local minima.\r\n regions = [[min_re, max_re, min_im, max_im]]\r\n \r\n region_width_re = max_re - min_re\r\n region_width_im = max_im - min_im\r\n \r\n for iteration_number in range(iterations):\r\n # all_local_mins will be a list of (x, y) for every local minimum in\r\n # every region. This is used to generate the next iteration.\r\n all_local_mins = []\r\n for region_index in range(len(regions)):\r\n min_re_now, max_re_now, min_im_now, max_im_now = regions[region_index]\r\n results_grid = []\r\n re_list, re_step = np.linspace(min_re_now, max_re_now, num=grid_points, retstep=True)\r\n im_list, im_step = np.linspace(min_im_now, max_im_now, num=grid_points, retstep=True)\r\n fn_to_minimize = lambda z : abs(fn(z))\r\n \r\n results_grid = [[fn_to_minimize(re + 1j * im) for im in im_list]\r\n for re in re_list]\r\n results_grid = np.array(results_grid)\r\n # local_mins will be a list of (i,j) where (re_list[i], im_list[j])\r\n # is a local minimum on the results_grid\r\n local_mins = []\r\n for i in range(grid_points):\r\n for j in range(grid_points):\r\n is_min = all(results_grid[i2, j2] >= results_grid[i,j]\r\n for i2 in [i-1, i, i+1]\r\n for j2 in [j-1, j, j+1]\r\n if (0 <= i2 < grid_points\r\n and 0 <= j2 < grid_points))\r\n if is_min:\r\n local_mins.append((i,j))\r\n # local_mins_OK is the subset of local_mins that passes the\r\n # the edge-rejection test.\r\n # The edge-rejection test says that after the 0'th iteration, any\r\n # point at an edge is probably not a true minimum.\r\n \r\n local_mins_OK = []\r\n for (i,j) in local_mins:\r\n z_now = re_list[i] + 1j * im_list[j]\r\n if iteration_number >= 2 and (i == 0 or j == 0 or\r\n i == grid_points-1 or j == grid_points-1):\r\n # Rejecting an edge point...\r\n if show_progress:\r\n print('----')\r\n print('Deleting edge point: region #'\r\n + str(region_index+1) + ' (i,j)=', (i,j),\r\n ' kx in rad/um=',\r\n z_now / nu.um**-1,\r\n ' fn(z)=', fn(z_now))\r\n else:\r\n local_mins_OK.append((i,j))\r\n \r\n # Add local_mins_OK entries into all_local_mins\r\n for (i,j) in local_mins_OK:\r\n all_local_mins.append(re_list[i] + 1j * im_list[j])\r\n \r\n if show_progress:\r\n print('----')\r\n print('iter #' + str(iteration_number)\r\n + ' , region #' + str(region_index+1) + ' of ' + str(len(regions))\r\n + ' , ' + str(len(local_mins_OK)) + ' minima')\r\n if len(local_mins_OK) > 0:\r\n print('For each, here is ((i, j), kx in rad/um, fn(kx)):')\r\n print([((i, j), (re_list[i] + 1j * im_list[j]) / nu.um**-1,\r\n fn(re_list[i] + 1j * im_list[j]))\r\n for (i,j) in local_mins_OK])\r\n\r\n # Now we've gone through every region.\r\n # Delete redundant minima that showed up in overlapping regions.\r\n all_local_mins_norepeat = []\r\n def is_repeat(z1, z2):\r\n return ((abs((z1 - z2).real) <= 0.5 * re_step) and\r\n (abs((z1 - z2).imag) <= 0.5 * im_step))\r\n for z_now in all_local_mins:\r\n if not any(is_repeat(z_now, z) for z in all_local_mins_norepeat):\r\n all_local_mins_norepeat.append(z_now)\r\n if show_progress:\r\n num_deleted = len(all_local_mins) - len(all_local_mins_norepeat)\r\n if num_deleted > 0:\r\n print('----')\r\n print('After iter #' + str(iteration_number)\r\n + ', deleted ' + str(num_deleted) + ' redundant point(s)')\r\n\r\n all_local_mins = all_local_mins_norepeat\r\n \r\n if show_progress:\r\n print('----')\r\n print('** After iter #' + str(iteration_number) + ', we have '\r\n + str(len(all_local_mins)) + ' candidate minima')\r\n \r\n region_width_re /= reduction_factor\r\n region_width_im /= reduction_factor\r\n \r\n regions = [[z.real - region_width_re / 2, z.real + region_width_re / 2,\r\n z.imag - region_width_im / 2, z.imag + region_width_im / 2]\r\n for z in all_local_mins]\r\n \r\n # Done with main algorithm. Show the discovered minima on the plots as\r\n # white X's. Note: Zeros outside the plot region will not be seen here,\r\n # but the function still returns them.\r\n if plot_full_region:\r\n # Keep the image filling the plot area\r\n direct_plot.autoscale(False)\r\n contour_plot.autoscale(False)\r\n for z in all_local_mins:\r\n direct_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')\r\n contour_plot.plot(z.real * nu.um, z.imag * nu.um, 'wx')\r\n return all_local_mins", "def part2():\r\n my_input = 368078\r\n coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]\r\n x = y = dx = 0\r\n dy = -1\r\n grid = {}\r\n\r\n while True:\r\n total = 0\r\n for offset in coords:\r\n ox, oy = offset\r\n if (x+ox, y+oy) in grid:\r\n total += grid[(x+ox, y+oy)]\r\n if total > int(my_input):\r\n return total\r\n if (x, y) == (0, 0):\r\n grid[(0, 0)] = 1\r\n else:\r\n grid[(x, y)] = total\r\n if (x == y) or (x < 0 and x == -y) or (x > 0 and x == 1-y):\r\n dx, dy = -dy, dx\r\n x, y = x+dx, y+dy", "def fn(lo, hi, k):\n if lo == hi: return 0 \n while lo+1 < hi and boxes[lo] == boxes[lo+1]: lo, k = lo+1, k+1\n ans = (k+1)*(k+1) + fn(lo+1, hi, 0)\n for mid in range(lo+2, hi): \n if boxes[lo] == boxes[mid]: \n ans = max(ans, fn(lo+1, mid, 0) + fn(mid, hi, k+1))\n return ans", "def find_coverage(self, zoom):\n # Find a group of adjacent available tiles at this zoom level\n return self.mbtiles_db_input.find_coverage(zoom)", "def test_defect_calculation():\n slope1, slope2 = 2., 3.\n step1, step2 = Fraction(5), Fraction(7)\n cosim = ramp_cosimulation(slope1, slope2, step1, step2)\n t_end = Fraction(20)\n defect = cs.evaluate(cosim, t_end)\n\n alpha = Fraction(int(lcm(step1.numerator, step2.numerator)),\n int(gcd(step1.denominator, step2.denominator)))\n num1, num2 = tuple(map(int, [alpha / step for step in (step1, step2)]))\n big = max(num1, num2) + 1\n small = min(num1, num2) - 1\n assert defect.connection['Ramp1', 'u'] > small * slope2 * step2\n assert defect.connection['Ramp1', 'u'] < big * slope2 * step2\n assert defect.connection['Ramp2', 'u'] > small * slope1 * step1\n assert defect.connection['Ramp2', 'u'] < big * slope1 * step1\n\n assert defect.output['Ramp1', 'y'] == pytest.approx(slope1 * step1)\n assert defect.output['Ramp2', 'y'] == pytest.approx(slope2 * step2)", "def find_sudoku(img, kernel_size=7, canny_threshold=100, printer=\"nothing\", squared=False):\n if img is None:\n sys.exit(\"Could not read the image.\")\n pass\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n blurred = cv.GaussianBlur(gray, (kernel_size, kernel_size), cv.BORDER_DEFAULT)\n thresh = cv.adaptiveThreshold(blurred, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)\n thresh = cv.bitwise_not(thresh)\n edges = cv.Canny(img,canny_threshold,canny_threshold * 2)\n\n cnts, hierarchy = cv.findContours(thresh.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv.contourArea, reverse=True)\n\n puzzle = None\n for c in cnts:\n # approximate the contour\n peri = cv.arcLength(c, True)\n approx = cv.approxPolyDP(c, 0.02 * peri, True)\n # if our approximated contour has four points, then we can\n # assume we have found the outline of the puzzle\n \n if len(approx) == 4:\n puzzle = approx\n break\n \n if type(puzzle) == None:\n print(\"that didn't work\")\n \n outline = img.copy()\n cv.drawContours(outline, [puzzle], -1, (0,255,0), 3)\n #### if we put [puzzle] we get the whole grid. Without it we only get the corners\n \n corners = puzzle.sum(1)\n warped = transform(img, corners, squared)\n\n if printer != \"nothing\":\n if printer in [\"gray\", \"blurred\", \"thresh\", \"edges\"]:\n plt.imshow(eval(printer), cmap=\"gray\")\n plt.title(printer, size=20)\n else:\n plt.imshow(eval(printer))\n plt.title(printer, size=20)\n \n return warped", "def SolveSCP(self):\n\n t0 = time()\n\n # Some predicates\n Lu_min = 0.\n niters_max = self._maxiters\n maxfracchange = self._maxfracchange\n\n # initialization, resetting ...\n self.reset_all() # including _u_naught(), first application\n scp_min = self.greedy()\n\n # re-initialization iteration; col fixing ignored for the moment\n niters = 0\n f_change = _largenumber\n while (f_change>maxfracchange) and (niters<niters_max):\n # re-initialize u\n if (np.mod(niters, 2)==0): \n self.reset_u(random=True)\n else:\n self.reset_u()\n u_tmp, Lu_tmp = self.subgradient() # find a near-optimal solution \n u, Lu = self.subgradient() # rerun subgradient to get a set of Lagrangian multipliers\n\n scp_all = np.zeros(self._subg_nsteps)\n for i in np.arange(self._subg_nsteps):\n #self.reset_s()\n self.s = np.copy(self.f)\n scp_all[i] = self.greedy(u=u[:,i])\n\n # check if the solution is gettting better\n imin_tmp = (np.where(scp_all==np.amin(scp_all)))[0]\n imin = imin_tmp[np.argmax(Lu[imin_tmp])]\n imax = np.argmax(Lu)\n if (np.mod(niters, 5)==0):\n print(\"This Best solution: UB={0}, LB={1}, UB1={2}, LB1={3}\".format(scp_all[imin], Lu[imin], scp_all[imax], Lu[imax]))\n if (niters==0) or ((scp_all[imin]<=scp_min) and ((Lu[imin]-Lu_min)>-(np.fabs(Lu_min)*self._LB_maxfracchange))):\n scp_min = scp_all[imin]\n u_min = np.copy(u[:,imin])\n Lu_min = Lu[imin]\n self.stepsize = _stepsize\n\n LB = Lu_min\n\n # final step, needs to get u_min back\n self.u = np.copy(u_min)\n self.s = np.copy(self.f)\n UB = self.greedy()\n\n # Which is better? absolute change or fractional change? \n # Both are fine, but cost should be normalized over the mean/median.\n GAP = (UB-LB)/np.fabs(UB)\n f_change = GAP\n if (np.mod(niters, 5)==0):\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n niters = niters + 1\n if (niters == niters_max): \n #warnings.warn(\"Iteration reaches maximum = {0}\".format(niters))\n print(\"Iteration in re-initialization reaches maximum number = {0}\".format(niters))\n\n # Need to remove redundant columns\n # self.remove_redundant() # this itself is NP-hard ...\n\n print(\"Current Best Solution: UB={0}, LB={1}, change={2}% @ niters={3}\".format(UB,LB,f_change*100.,niters))\n print(\"Final Best solution: {0}\".format(UB))\n time_used = (time()-t0)/60.\n print(\"Took {0:.3f} minutes to reach current solution.\".format(time_used))\n\n return (UB,time_used)", "def helper(self,nums,start_idx) :\n if start_idx == None :\n return None, 0\n \n if self.solutions.get(start_idx) :\n return self.solutions[start_idx]\n \n \n if len(nums) - start_idx == 0 :\n return None, 0\n\n return_idx = None\n heist_total = None\n\n if len(nums) - start_idx == 1 :\n self.solutions[start_idx] = (start_idx,nums[start_idx])\n return_idx,heist_total = start_idx, nums[start_idx]\n elif len(nums) - start_idx == 2 :\n if nums[start_idx] > nums[start_idx + 1] :\n return_idx,heist_total = start_idx,nums[start_idx]\n else :\n return_idx,heist_total = start_idx+1,nums[start_idx+1] \n elif len(nums) - start_idx == 3 :\n if (nums[start_idx] + nums[start_idx+2]) > nums[start_idx + 1] :\n return_idx,heist_total = start_idx,(nums[start_idx]+nums[start_idx+2])\n else :\n return_idx,heist_total = (start_idx+1),nums[start_idx+1]\n else : # array is greater than size 3 \n r1 = self.helper(nums, start_idx +1)\n r2 = self.helper(nums, start_idx +2)\n r3 = self.helper(nums, start_idx +3)\n \n valid_cases = []\n if (r1[0] != None) and (r1[0] == start_idx +1) :\n valid_cases.append(r1)\n \n if (r2[0] != None) and (r2[0] == start_idx +2) :\n valid_cases.append((start_idx, nums[start_idx] + r2[1]))\n\n if (r3[0] != None) and (r3[0] == start_idx +3) :\n valid_cases.append((start_idx, nums[start_idx] + r3[1]))\n \n valid_cases.sort(key = lambda x : x[1],reverse = True)\n return_idx, heist_total = valid_cases[0][0], valid_cases[0][1]\n\n \n self.solutions[start_idx] = (return_idx,heist_total)\n return (return_idx, heist_total)", "def compute_complexity(folder: str,\n neat: bool = False,\n neat_gru: bool = False,\n neat_lstm: bool = False,\n neat_sru: bool = False,\n neat_sru_s: bool = False,\n gen: int = 500,\n max_v: int = 50,\n ):\n # Collect all the populations\n populations = []\n if neat: populations.append(D_NEAT)\n if neat_gru: populations.append(D_NEAT_GRU)\n if neat_lstm: populations.append(D_NEAT_LSTM)\n if neat_sru: populations.append(D_NEAT_SRU)\n if neat_sru_s: populations.append(D_NEAT_SRU_S)\n if len(populations) == 0: return\n \n # Go over all possibilities\n print(f\"\\n===> COMPUTING POPULATION'S ELITE COMPLEXITY <===\")\n path = f\"population_backup/storage/{folder}/\"\n genes_dict = dict()\n for pop in populations:\n path_eval = get_subfolder(f\"{path}{pop}/\", 'evaluation')\n complexity = Counter()\n genes = Counter()\n genes_detailed = dict()\n for v in range(1, max_v + 1):\n population = Population(\n name=f'{pop}/v{v}',\n folder_name=folder,\n use_backup=True,\n )\n if population.generation == 0: raise Exception(f\"Population {pop}/v{v} loaded incorrectly\")\n if population.generation != gen: population.load(gen=gen)\n s = population.best_genome.size()\n complexity[str(s)] += 1\n c = str(s[0] + s[1])\n genes[c] += 1\n if c in genes_detailed:\n genes_detailed[c].append(v)\n else:\n genes_detailed[c] = [v]\n \n # Store results at populations themselves\n update_dict(f'{path_eval}complexity_topology', complexity, overwrite=True)\n update_dict(f'{path_eval}complexity_genes', genes, overwrite=True)\n update_dict(f'{path_eval}complexity_genes_detailed', genes_detailed, overwrite=True)\n \n # Update global dictionary\n keys = list(genes.keys())\n for k in keys:\n genes[int(k)] = genes[k]\n del genes[k]\n genes_dict[pop] = list(sorted(genes.items()))\n \n plt.figure(figsize=(10, 2.5))\n max_x = max([max([a for a, _ in genes_dict[pop]]) for pop in populations])\n min_x = min([min([a for a, _ in genes_dict[pop]]) for pop in populations])\n for idx, pop in enumerate(populations):\n keys = [a for a, _ in genes_dict[pop]]\n for x in range(max_x):\n if x not in keys: genes_dict[pop].append((x, 0))\n x, y = zip(*genes_dict[pop])\n width = 0.8 / len(populations)\n plt.bar(x=np.asarray(x) - 0.4 + width / 2 + idx * width,\n height=y,\n width=width,\n linewidth=2,\n label=pop,\n color=COLORS[pop])\n \n # Beautify the plot\n plt.xlim(min_x - .5, max_x + .5)\n plt.xticks([i for i in range(min_x, max_x + 1)])\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.18),\n fancybox=True,\n fontsize=10,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n plt.grid(axis='y')\n plt.tight_layout()\n plt.xlabel(\"complexity expressed in #genes\")\n plt.ylabel(\"#elites\")\n plt.savefig(f\"population_backup/storage/{folder}/images/complexity.png\", bbox_inches='tight', pad_inches=0.02)\n plt.savefig(f\"population_backup/storage/{folder}/images/complexity.eps\",\n format='eps',\n bbox_inches='tight',\n pad_inches=0.02)\n # plt.show()\n plt.close()\n \n # Also create a violin plot of the distribution if only two populations\n if len(populations) == 2:\n max_x = 0\n min_x = float('inf')\n df = pd.DataFrame()\n palette = []\n for idx, pop in enumerate(populations):\n values = []\n for a, b in genes_dict[pop]:\n for _ in range(b):\n values.append(a)\n \n # Remove outliers\n values = sorted(values)\n q1 = min(values[int(round(1 / 4 * len(values)))], values[int(round(3 / 4 * len(values)))])\n q3 = max(values[int(round(1 / 4 * len(values)))], values[int(round(3 / 4 * len(values)))])\n iqr = q3 - q1\n \n for i in range(len(values) - 1, -1, -1):\n if (values[i] < (q1 - 1.5 * iqr)) or (values[i] > (q3 + 1.5 * iqr)): del values[i]\n if min(values) < min_x: min_x = min(values)\n if max(values) > max_x: max_x = max(values)\n df = df.append(pd.DataFrame({'complexity': values, 'y': 'ignore', 'pop': pop}))\n palette.append(COLORS[pop])\n \n # Create the plot\n plt.figure(figsize=(10, 2.5))\n sns.violinplot(data=df,\n x=\"complexity\", y=\"y\", hue=\"pop\",\n palette=palette, split=True,\n inner=\"quartile\")\n plt.xlim(min_x - .5, max_x + .5)\n plt.xticks([i for i in range(min_x, max_x + 1)])\n plt.xlabel(\"complexity expressed in #genes\")\n plt.yticks([])\n plt.ylabel('elite genome density')\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.25),\n fancybox=True,\n fontsize=10,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n plt.tight_layout()\n plt.savefig(f\"population_backup/storage/{folder}/images/complexity_violin.png\",\n bbox_inches='tight',\n pad_inches=0.02)\n plt.savefig(f\"population_backup/storage/{folder}/images/complexity_violin.eps\",\n format='eps',\n bbox_inches='tight',\n pad_inches=0.02)\n plt.show()\n plt.close()", "def perc_greedy(population, percentage=80):\n \n\n #initialization\n res_arr = [2] * 10\n total_knights = 80\n\n medians = get_medians(population, percentage);\n\n while(total_knights > 0):\n \n # find \"easiest\" to acheive\n ind = medians.index(min(medians))\n\n # calculate the number of knights to assign to that castle\n assign = min(total_knights, medians[ind]-res_arr[ind] + 1)\n\n # make assignment\n res_arr[ind] += assign\n total_knights -= assign\n\n # mark that castle as \"done\"\n medians[ind] = 100\n \n # get the score of result inst against input population\n res_inst = CBInstance(res_arr)\n res_score = grade_inst(res_inst, population)\n \n return res_inst", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def solve(args):\n\n global a\n global b\n global c\n\n a, b, c = args\n mem = {}\n\n # a tree of 30 levels should be enough (all values are < 2^30)\n sol = count_pairs(30, 1, 1, 1, mem)\n\n return sol", "def hill_climbing(\n search_prob,\n find_max: bool = True,\n max_x: float = math.inf,\n min_x: float = -math.inf,\n max_y: float = math.inf,\n min_y: float = -math.inf,\n visualization: bool = False,\n max_iter: int = 10000,\n) -> SearchProblem:\n current_state = search_prob\n scores = [] # list to store the current score at each iteration\n iterations = 0\n solution_found = False\n visited = set()\n while not solution_found and iterations < max_iter:\n visited.add(current_state)\n iterations += 1\n current_score = current_state.score()\n scores.append(current_score)\n neighbors = current_state.get_neighbors()\n max_change = -math.inf\n min_change = math.inf\n next_state = None # to hold the next best neighbor\n for neighbor in neighbors:\n if neighbor in visited:\n continue # do not want to visit the same state again\n if (\n neighbor.x > max_x\n or neighbor.x < min_x\n or neighbor.y > max_y\n or neighbor.y < min_y\n ):\n continue # neighbor outside our bounds\n change = neighbor.score() - current_score\n if find_max: # finding max\n # going to direction with greatest ascent\n if change > max_change and change > 0:\n max_change = change\n next_state = neighbor\n else: # finding min\n # to direction with greatest descent\n if change < min_change and change < 0:\n min_change = change\n next_state = neighbor\n if next_state is not None:\n # we found at least one neighbor which improved the current state\n current_state = next_state\n else:\n # since we have no neighbor that improves the solution we stop the search\n solution_found = True\n\n if visualization:\n from matplotlib import pyplot as plt\n\n plt.plot(range(iterations), scores)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Function values\")\n plt.show()\n\n return current_state", "def score(tmp_dir, locusList, ranges):\r\n\t\r\n\tloc = ''\r\n\tpos = 1\r\n\tcount_indel = 0\r\n\tholes = 0\r\n\tsnps = 0\r\n\tcovMax=combined_covMax=covSum=covSum2= 0 \r\n\tcovMin = combined_covMin =99999\r\n\tpercentage_coverages =[]\r\n\tsnpList = []\r\n\tindelList = []\r\n\tresults = {} \r\n\t\r\n\tpileup_file = os.path.join(tmp_dir, 'all.pileup')\r\n\tfor l in open(pileup_file):\r\n\t\tt = l.split()\r\n\t\tif loc == '':\r\n\t\t\tloc = t[0] \r\n\t\t\tpos = ranges[loc][0] + 1 \r\n\t\tif t[0] != loc:\r\n\t\t\tresults =GenerateResult(ranges,\r\n\t\t\t\t\t\t\t\t\tholes, locusList,\r\n\t\t\t\t\t\t\t\t\tloc,snps,count_indel,\r\n\t\t\t\t\t\t\t\t\tsnpList, indelList,\r\n\t\t\t\t\t\t\t\t\tpercentage_coverages,combined_covMin,\r\n\t\t\t\t\t\t\t\t\tcombined_covMax, covMin, covMax,covSum, results)\r\n\t\t\t# reset locus vars\r\n\t\t\tloc = t[0] \r\n\t\t\tpos = ranges[loc][0] + 1 \r\n\t\t\tcount_indel = 0\r\n\t\t\tholes =snps=covMax=combined_covMax=covSum=covSum2= 0 \r\n\t\t\tcovMin =combined_covMin= 99999\r\n\t snpList = []\r\n\t\t\tindelList = []\r\n\t\t\tpercentage_coverages =[]\r\n\t\there = int(t[1])\r\n\t\tif here - 1 < ranges[loc][0]: \r\n\t\t\tcontinue\r\n\t\telif here - 1 >= ranges[loc][1]: \r\n\t\t\tcontinue\r\n\t\twhile pos < here: \r\n\t\t\tholes += 1 \r\n\t\t\tpos += 1\r\n\r\n\t\tv, indel, array_of_all_indels,most_common_indel = pile(t[2], t[4])\r\n\t\tx = v.items()\r\n\t\tx.sort(lambda a,b: compGreater(t[2], a, b))\r\n\t\t\r\n\t\tif x[0][0] != t[2].lower():\r\n\t\t\tsnps += 1\r\n\t\t\tsnpList.append((pos,t[2],v));\r\n\t\tc = x[0][1] \r\n\t\tcov= int(most_common_indel)/float(t[3]) \r\n\t\tif cov > 0.5: \r\n count_indel += 1\r\n indel_type = Counter(array_of_all_indels) \r\n indel_type = indel_type.items()\r\n indelList.append((int(pos),t[2], indel_type))\r\n\t\tcovSum += c \r\n\t\tcovSum2 += c * c\r\n\t\tif c > covMax:\r\n\t\t\tcovMax = c\r\n\t\tif c < covMin:\r\n\t\t\tcovMin = c\r\n\t\tcombined_c = x[0][1] + x[1][1] + x[2][1] + x[3][1] \r\n\t\tif combined_c > combined_covMax:\r\n\t\t\tcombined_covMax = c \r\n\t\tif combined_c < combined_covMin:\r\n\t\t\tcombined_covMin = c \r\n\t\t\r\n\t\tn = int(t[3]) \r\n\t\tjs = []\r\n\t\tfor (_,j) in x[1:]: \r\n\t\t\tjs.append(j) \r\n\t\tpercentage_coverage = sum(js)/float(n)*100 \r\n\t\tpercentage_coverages.append(round(float(percentage_coverage),2))\r\n\t\tpos = here + 1\r\n\tresults =GenerateResult(ranges,\r\n\t\t\t\t\t\t\tholes,\r\n\t\t\t\t\t\t\tlocusList,loc,\r\n\t\t\t\t\t\t\tsnps,count_indel,\r\n\t\t\t\t\t\t\tsnpList,indelList,\r\n\t\t\t\t\t\t\tpercentage_coverages,combined_covMin,\r\n\t\t\t\t\t\t\tcombined_covMax, covMin, covMax,\r\n\t\t\t\t\t\t\tcovSum, results)\r\n\t\r\n\treturn results", "def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)", "def solve(self, board: List[List[str]]) -> None:\r\n\r\n graph = defaultdict(list)\r\n all_o = set()\r\n\r\n for i in range(len(board)):\r\n for j in range(len(board[0])):\r\n if board[i][j] == \"O\":\r\n if i != 0 and board[i-1][j] == \"O\":\r\n graph[(i,j)].append((i-1,j))\r\n if i != len(board)-1 and board[i+1][j] == \"O\":\r\n graph[(i,j)].append((i+1,j))\r\n if j != 0 and board[i][j-1] == \"O\":\r\n graph[(i,j)].append((i,j-1))\r\n if j != len(board[0])-1 and board[i][j+1] == \"O\":\r\n graph[(i,j)].append((i,j+1))\r\n all_o.add((i,j))\r\n\r\n explored_so_far = set()\r\n for i, j in all_o:\r\n if i != 0 \\\r\n and i != len(board)-1 \\\r\n and j != 0 \\\r\n and j != len(board[0])-1 \\\r\n and board[i-1][j] == \"X\" \\\r\n and board[i+1][j] == \"X\" \\\r\n and board[i][j-1] == \"X\" \\\r\n and board[i][j+1] == \"X\":\r\n board[i][j] = \"X\"\r\n else:\r\n if (i,j) not in explored_so_far:\r\n is_surrounded, explored = self.dfs(graph, (i,j), len(board)-1, len(board[0])-1)\r\n explored_so_far.update(explored)\r\n if is_surrounded:\r\n for (i,j) in explored:\r\n board[i][j] = \"X\"", "def crop_data(vol):\n\n thres = 250\n\n num_x = vol.shape[0]\n num_y = vol.shape[1]\n num_z = vol.shape[2]\n\n \n # set up starting positions\n starts = []\n\n # front and back\n for i in range(num_x):\n for j in range(num_z):\n starts.append( (i, 0, j) )\n starts.append( (i, num_y-1, j) )\n\n # left and right\n for i in range(num_y):\n for j in range(num_z):\n starts.append( (0, i, j) )\n starts.append( (num_x-1, i, j) )\n\n # DFS\n seenpositions = set()\n currentpositions = set(starts)\n\n while currentpositions:\n nextpositions = set()\n for p in currentpositions:\n seenpositions.add(p)\n succ = possiblesuccessors(vol, p, thres)\n for np in succ:\n if np in seenpositions: continue\n nextpositions.add(np)\n\n currentpositions = nextpositions\n\n print \"cropping %i (%i addional) voxels\" % (len(seenpositions), len(seenpositions) - len(starts))\n\n # crop visited voxels\n for pos in seenpositions:\n vol[pos[0], pos[1], pos[2]] = 0.0\n\n return vol", "def exact_min_vertex_cover(graph):\n for N in range(1,len(graph.nodes())+1):\n for graph_sub in it.combinations(sorted(graph.nodes(), reverse=True), N):\n graph_temp = graph.copy()\n graph_temp.remove_nodes_from(graph_sub)\n if len(graph_temp.edges()) == 0:\n return list(graph_sub)", "def reduce(self, threshold):\n def percentage_change(old, new):\n return (old - new) / old\n real_reduction_iterations = 0\n padic_reduction_iterations = 0\n cont_reduction_iterations = 0\n factor = len(self.constants.primes) + 1\n \n print('initial bound',max(self.coefficients['n1_bound'],max(self.coefficients['Z_bounds'])))\n\n # First, go through the real reduction loop.\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = None\n while True:\n real_reduction_iterations += 1\n logging.info(\"Real Reduction - Iteration %d\" % real_reduction_iterations)\n\n large_constant = self.calculate_large_constant(current_n1_bound, factor)\n logging.info(\"Large constant contains %d digits \" % large_constant.ndigits())\n\n # Find a new bound on n_1 - n_k\n new_diff_bound = self.real_reduce(current_n1_bound, large_constant)\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n self.update_real_constants(new_diff_bound)\n logging.info(\"new diff bound: \" + str(new_diff_bound))\n logging.info(\"New bound on n1: \" + str(self.coefficients[\"n1_bound\"]))\n logging.info(\"New bound on zi: \" + str(self.coefficients['Z_bounds']))\n \n if percentage_change(current_n1_bound, self.coefficients[\"n1_bound\"]) < self.threshold:\n logging.info(\"New bound did not improve in the real step; real reduction process is done.\")\n factor = factor + 5\n break\n\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = new_diff_bound\n\n # Second, go through the p-adic reduction loop.\n current_Z_bounds = self.coefficients['Z_bounds']\n while True:\n padic_reduction_iterations += 1\n logging.info(\"p-adic Reduction - Iteration %d\" % padic_reduction_iterations)\n\n new_Z_bounds = self.padic_reduce(math.ceil(current_diff_bound))\n logging.info(\"New bound on zi: \" + str(new_Z_bounds))\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n new_n1_bound = self.update_padic_constants(new_Z_bounds)\n logging.info(\"New bound on n1: \" + str(new_n1_bound))\n if percentage_change(current_n1_bound, new_n1_bound) < self.threshold:\n logging.info(\"New bound did not improve in the p-adic step; p-adic reduction process is done.\")\n break\n\n current_n1_bound = new_n1_bound\n\n print(current_n1_bound)\n\n return self.constants", "def readSudoku(self, path):\n #### 1. PREPARE THE IMAGE\n img = cv2.imread(path)\n img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE TO MAKE IT A SQUARE IMAGE\n imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGING IF REQUIRED\n imgThreshold = preProcess(img)\n\n # #### 2. FIND ALL COUNTOURS\n imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES\n imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES\n contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS\n #cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 3) # DRAW ALL DETECTED CONTOURS\n\n #### 3. FIND THE BIGGEST COUNTOUR AND USE IT AS SUDOKU\n biggest, maxArea = biggestContour(contours) # FIND THE BIGGEST CONTOUR\n print(biggest)\n if biggest.size != 0:\n\n model = MNIST_classifier() # create an instance of the model\n model.load_state_dict(torch.load('models/Digit_CNN.pt', map_location=torch.device('cpu')))\n model.eval()\n\n biggest = reorder(biggest)\n print(biggest)\n cv2.drawContours(imgBigContour, biggest, -1, (0, 0, 255), 25) # DRAW THE BIGGEST CONTOUR\n pts1 = np.float32(biggest) # PREPARE POINTS FOR WARP\n pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP\n matrix = cv2.getPerspectiveTransform(pts1, pts2) # GER\n imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))\n imgDetectedDigits = imgBlank.copy()\n imgWarpColored = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)\n boxes = splitBoxes(imgWarpColored)\n print(len(boxes))\n cv2.imshow('a',boxes[0])\n cv2.imshow('b',boxes[9])\n cv2.imshow('c',boxes[80])\n\n\n else:\n print(\"No Sudoku Found\")\n return 0\n\n cv2.waitKey(0)\n \n \n \n \n ##########\n grid = [\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0]\n ]\n\n for cellno,c in enumerate(boxes):\n img = Image.fromarray()\n \n \n i = int(np.round(x/cwidth))\n j = int(np.round(y/cheight))\n grid[j][i] = str(i)+str(j)\n\n ink_percent = (np.sum(c == 255)/(np.sum(c == 0) + np.sum(c == 255)))*100\n\n if ink_percent>3.5:\n grid[j][i] = int(pytesseract.image_to_string(c, config=\"--psm 13\")[0])\n else:\n grid[j][i] = 0\n\n cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)\n #print(f\"{x}, {y}, {w}, {h}\")\n square_ct += 1\n print(f\"Analysing cell {square_ct}/81\")\n\n if square_ct!= 9*9:\n print('Did not find correct number of boxes')\n print('Number of boxes: '+str(square_ct))\n plt.imshow(image)\n return 0\n else:\n return grid", "def solve(sudoku):\n\n # Go through all numbers in the Sudoku.\n for row in range(9):\n for column in range(9):\n # Try all possible combinations of numbers recursively and look for\n # one that is a correct solution.\n if sudoku[row][column] is None:\n # Filter combinations that we see are not going to be possible\n # up front.\n seen = set([])\n box_row_base = (row / 3) * 3\n box_col_base = (column / 3) * 3\n for i in range(9):\n # Numbers seen in this row.\n seen.add(sudoku[row][i])\n # Numbers seen in this column.\n seen.add(sudoku[i][column])\n # Numbers seen in this box.\n seen.add(sudoku[box_row_base + i / 3][box_col_base + i % 3])\n\n # Try all solutions we consider possible at this point.\n for candidate in set(range(1, 10)) - seen:\n sudoku[row][column] = candidate\n if solve(sudoku):\n return True\n\n # If none of the numbers returned a valid solution, restore the\n # state of the Sudoku and return to the parent so it can try a\n # different solution.\n sudoku[row][column] = None\n return False\n\n return True", "def chessboardGame(x, y):\n xin = x\n yin = y\n\n # These squares have no possible move, therefore, are losing;\n # we chose these squares by sight; while loop below expands these sets\n # until we encompass whole board\n # it was not clear to me in the beginning that every square has a unique\n # determinant ending under optimal play\n losing_start = set([(1, 1), (2, 1), (1, 2), (2, 2)])\n\n # These squares can jump to losing_start in one move, so are winning\n winning_start = set([(1, 3), (1, 4), (2, 3), (2, 4),\n (3, 1), (3, 2), (3, 3), (3, 4),\n (4, 1), (4, 2), (4, 3)])\n\n def nextset(x, y):\n def isvalid(coord):\n return True if coord[0] >= 1 and coord[1] >= 1 \\\n and coord[0] <= 15 and coord[1] <= 15 else False\n\n nextsquares = [(x - 2, y + 1), (x - 2, y - 1), (x + 1, y - 2),\n (x - 1, y - 2)]\n nextsquares = set([*filter(isvalid, nextsquares)])\n # print(nextsquares)\n return nextsquares\n\n # run a few times through whole board;\n # it takes 5 times to find a definitive win path for all 225 squares\n # 161 squares are winning for first player\n # 64 squares are losing starting for first player\n test_set = [(i, j) for i in range(1, 16) for j in range(1, 16)]\n times = 1\n while (len(winning_start) + len(losing_start)) < 225:\n for coords in test_set:\n x_ = coords[0]\n y_ = coords[1]\n thenextset = nextset(x_, y_)\n # print('testing', x_, y_, thenextset)\n\n if (x_, y_) in losing_start:\n # print('No Path, Second wins')\n pass\n elif (x_, y_) in winning_start:\n # print('One jump to terminal square, First wins')\n pass\n elif (len(winning_start.intersection(thenextset))\n == len(thenextset)):\n # if next set ONLY includes winning_starts, First loses because\n # he has no choice but give win to opponent\n # need to add x,y to losing_start\n losing_start.add((x_, y_))\n # print('we lose, Second wins')\n elif len(losing_start.intersection(thenextset)) > 0:\n # if next set includes ANY losing_start, we win by choosing it\n # need to add x,y to winning_start\n winning_start.add((x_, y_))\n # print('First wins')\n else:\n # print('do not know')\n pass\n\n print('Run', times, len(winning_start) + len(losing_start))\n times += 1\n\n print(len(winning_start))\n print(len(losing_start))\n\n # prints schematic of Winor Loss of each of 15x15 squares\n\n print(' '.join(map(str, [i for i in range(1, 16)])))\n for i in range(15):\n row = ''\n for j in range(15):\n if test_set[i * 15 + j] in winning_start:\n row = row + 'W '\n else:\n row = row + 'L '\n print(row + str(i))\n\n if (xin, yin) in winning_start:\n print('First wins with', xin, yin)\n return 'First'\n else:\n print('Second wins with', xin, yin)\n return 'Second'", "async def solve(self):\n\n \"\"\"TODO:\n Handle guess and checking:\n 1) Make guess (Make logical guess based on what could be most impactful...i.e. if two spots can have either number in a row)\n 2) Fork based on guess\n 3) Check if one raises from impossible square (delete this fork)\n 4) Check if one completes (will simply return from .gather)\n 5) Each board can recurse through this guess and checking, just in case\n \"\"\"\n tasks = [square.check() for row in self for square in row]\n\n return await asyncio.gather(*tasks, return_exceptions=False)", "def solve_part1(puzzle_input):\n try:\n banks = [int(bank) for bank in puzzle_input[0].split('\\t')]\n except:\n banks = puzzle_input\n\n existing_patterns = []\n current_pattern = banks\n existing_patterns.append(make_pattern(current_pattern))\n\n cont = True\n\n print('start here')\n while cont:\n next_pattern = update_banks(current_pattern)\n cp = make_pattern(next_pattern)\n\n if cp in existing_patterns:\n cont = False\n else:\n existing_patterns.append(cp)\n\n current_pattern = next_pattern\n\n return len(existing_patterns)", "def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))", "def clumpfind(\n self,\n levels=None,\n corners=False,\n seeded=False,\n allow_new_peaks=True,\n timer=True\n ):\n\n # ...................................................\n # Check user options\n # ...................................................\n\n if self.linked_data == None:\n print \"Clumpfind assignment requires data.\"\n return\n \n if seeded == True:\n if self.linked_lmax == None:\n print \"Seeded clumpfind assignment requires local maxima.\"\n return\n \n if seeded == False and allow_new_peaks == False:\n print \"Cannot run an unseeded (classic) clumpfind without being able to add seeds.\"\n return\n\n # ...................................................\n # Get data to use\n # ................................................... \n\n # Get the data and set the values we will not use to a low\n # number that will be ignored by the algorithm.\n\n data = copy.deepcopy(self.linked_data.data)\n if self.linked_mask != None:\n use = self.linked_mask.data*self.linked_data.valid\n else:\n use = self.linked_data.valid\n min_use = np.min(self.linked_data.data[use])\n max_use = np.max(self.linked_data.data[use])\n low_value = min_use-1.\n data[(use==False)] = low_value\n\n # ...................................................\n # Calculate contour levels\n # ...................................................\n\n if levels == None:\n if self.linked_data.noise != None:\n print \"Defaulting to 2 sigma spacing.\"\n levels = contour_values(\n linspace = True,\n maxval = max_use,\n minval = min_use, \n spacing = 2.0*self.linked_data.noise.scale\n )\n else:\n print \"Need a noise estimate.\"\n return\n\n self.levels = levels\n\n # ...................................................\n # Build the structuring element\n # ...................................................\n\n structure = (Struct(\n \"simple\", \n ndim=self.linked_data.data.ndim, \n corners=corners)).struct\n\n # ...................................................\n # Initialize the output\n # ...................................................\n\n # ... data\n self.data = np.zeros_like(data, dtype=np.int)\n\n # ... local maxima\n if seeded == False:\n print \"Initializing a new set of local maxima\"\n self.linked_lmax = \\\n lmax.Lmax(self.linked_data, self.linked_mask)\n\n # ...................................................\n # Loop over levels (from high to low)\n # ...................................................\n\n nlev = len(levels)\n count = 0\n\n for level in levels: \n\n # ........................\n # Print a counter\n # ........................\n\n perc = count*1./nlev\n sys.stdout.write('\\r') \n sys.stdout.write(\"Clumpfind level %d out of %d\" % (count, nlev))\n sys.stdout.flush()\n count += 1\n\n # ............................\n # Label regions for this level\n # ............................\n\n thresh = (data >= level)\n labels, ncolors = ndimage.label(\n thresh,\n structure=structure)\n \n # ...........................\n # Vectorize the labeled data\n # ...........................\n\n # This gives a big speedup for sparse data.\n\n ind = np.where(thresh)\n val = self.linked_data.data[ind]\n ind_arr = cube.xyztup_to_array(ind, coordaxis=1)\n label_vec = labels[ind]\n\n # Get the assignments for the current seeds\n if self.linked_lmax.num > 0:\n seed_labels = labels[self.linked_lmax.as_tuple()]\n \n # ........................................\n # Loop over discrete regions at this level\n # ........................................\n\n for label in range(1,ncolors+1):\n \n # ........................................\n # Get the indices for this region\n # ........................................\n\n this_color = np.where(label_vec == label)\n this_val = val[this_color]\n this_ind_arr = ind_arr[this_color[0],:]\n this_ind = cube.xyzarr_to_tuple(this_ind_arr,coordaxis=1)\n\n # ........................................\n # Check if we should add a new peak\n # ........................................\n\n # If there are no peaks or if there are no peaks in\n # this region, we want to add a new one --- but only\n # if that's allowed! \n\n # A future extension is to add additional criteria\n # that must be met to add a peak (volume, area, etc.)\n\n if self.linked_lmax.num == 0:\n if allow_new_peaks:\n add_a_new_peak = True\n else:\n continue\n elif np.sum(seed_labels == label) == 0:\n if allow_new_peaks:\n add_a_new_peak = True\n else:\n continue\n else:\n add_a_new_peak = False\n \n # ........................................\n # Add a new peak\n # ........................................\n\n if add_a_new_peak:\n\n # Find the location of the maximum value\n maxind = np.argmax(this_val)\n\n # Get the corresponding coordinates\n peak_index = this_ind_arr[maxind,:]\n\n # Add a local maximum\n new_name = self.linked_lmax.add_local_max(peak_index)\n\n # Label these data in the assignment cube\n self.data[this_ind] = new_name\n\n continue\n\n # ........................................\n # Deal with the case of a signle seed\n # ........................................\n\n if np.sum(seed_labels == label) == 1:\n \n maxind = np.where((seed_labels == label))\n\n self.data[this_ind] = self.linked_lmax.name[maxind]\n\n continue\n\n # ........................................\n # Deal with the case of competing seeds\n # ........................................\n\n # Several matching labels\n if np.sum(seed_labels == label) > 1:\n\n # Initialize an assignment vector\n this_assign = np.zeros_like(this_val)\n best_dist = np.zeros_like(this_val)\n\n # Identify the competing seeds\n maxind = np.where((seed_labels == label))\n\n n_max = len(maxind[0])\n\n for i in range(n_max):\n \n this_max_name = self.linked_lmax.name[maxind[0][i]]\n\n this_max_coord = self.linked_lmax.indices[this_max_name-1]\n\n dist_to_this_max = \\\n np.sum((this_ind_arr - this_max_coord)**2,axis=1)\n \n if i == 0:\n # ... all true for the first test\n is_closest = (dist_to_this_max == dist_to_this_max)\n else:\n is_closest = (dist_to_this_max < best_dist)\n\n this_assign[is_closest] = this_max_name\n best_dist[is_closest] = dist_to_this_max[is_closest]\n\n\n self.data[this_ind] = this_assign", "def task4(x: List[List[int]]) -> int:\n ###\n ###\n row = len(x)\n max_circle = 0\n i = 0\n while i < row:\n for j in range(i, row):\n if x[i][j] != 1:\n if i == j - 1:\n max_circle += 1\n j += 1\n i = j - 1\n break\n if j == row - 1 and x[i-1][j] == 0:\n max_circle += 1\n if j == row - 1:\n i += 1\n\n return max_circle\n #a b c d", "def sudokuDepthFirstSearch(problem):\n\n def convertStateToHash(values):\n \"\"\" \n values as a dictionary is not hashable and hence cannot be used directly in the explored/visited set.\n This function changes values dict into a unique hashable string which can be used in the explored set.\n You may or may not use this\n \"\"\"\n l = list(sorted(values.items()))\n modl = [a+b for (a, b) in l]\n return ''.join(modl)\n\n ## YOUR CODE HERE\n root_node = Node(problem.getStartState(), [], 0, None, 0)\n frontier = util.Stack()\n frontier.push(root_node)\n explored = []\n\n while not(frontier.isEmpty()):\n node_to_explore = frontier.pop()\n\n if problem.isGoalState(node_to_explore.state):\n return node_to_explore.state\n else:\n copy_state = node_to_explore.state.copy()\n \n if convertStateToHash(copy_state) not in explored:\n\t explored.append(convertStateToHash(copy_state))\n\t successors_state = problem.getSuccessors(copy_state)\n\t if len(successors_state) > 0:\n\t\t for state_action_cost in successors_state:\n\t\t if convertStateToHash(state_action_cost[0]) in explored:\n\t\t continue\n\t\t else:\n\t\t frontier.push(Node(state_action_cost[0], state_action_cost[1], node_to_explore.path_cost + 1, node_to_explore, node_to_explore.depth + 1))\n\n return False\n # util.raiseNotDefined()", "def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)" ]
[ "0.6313552", "0.6258296", "0.6128886", "0.57589465", "0.5733039", "0.57001925", "0.5643672", "0.55785424", "0.5561169", "0.5516535", "0.5466868", "0.5454839", "0.5434189", "0.5430339", "0.5400842", "0.5336104", "0.5285302", "0.5259781", "0.52392155", "0.5232197", "0.5226537", "0.5223419", "0.5212573", "0.5177914", "0.5162023", "0.51566017", "0.5144013", "0.5131692", "0.51234907", "0.5116657", "0.5056829", "0.5049999", "0.5048595", "0.50471354", "0.5039286", "0.5033754", "0.50321597", "0.50300866", "0.5027514", "0.5027291", "0.50245595", "0.50156456", "0.50090146", "0.5006627", "0.5005271", "0.5001826", "0.50007594", "0.49684128", "0.49655607", "0.49647853", "0.4961518", "0.49536118", "0.4952256", "0.49390143", "0.4933748", "0.49335214", "0.49276954", "0.49198484", "0.49187493", "0.4918207", "0.49173084", "0.49141872", "0.49113634", "0.49107566", "0.49099904", "0.4906632", "0.49045688", "0.4903097", "0.4897418", "0.48940963", "0.48874092", "0.48825854", "0.48821136", "0.4874988", "0.48682702", "0.48626882", "0.48595583", "0.48566633", "0.4852994", "0.4852859", "0.48499003", "0.4847803", "0.48470393", "0.4843965", "0.48415318", "0.4840381", "0.48399633", "0.48368338", "0.48293343", "0.48272002", "0.482664", "0.4825263", "0.48204708", "0.4815085", "0.48125142", "0.48112565", "0.48084962", "0.48061758", "0.48056784", "0.4803803", "0.48002416" ]
0.0
-1
Find the minimum volume ellipsoid which holds all the points Based on work by Nima Moshtagh
def getMinVolEllipse(P, tolerance=0.01): (N, d) = np.shape(P) d = float(d) # Q will be our working array Q = np.vstack([np.copy(P.T), np.ones(N)]) QT = Q.T # initializations err = 1.0 + tolerance u = (1.0 / N) * np.ones(N) # Khachiyan Algorithm while err > tolerance: V = np.dot(Q, np.dot(np.diag(u), QT)) M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix j = np.argmax(M) maximum = M[j] step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0)) new_u = (1.0 - step_size) * u new_u[j] += step_size err = np.linalg.norm(new_u - u) u = new_u # center of the ellipse center = np.dot(P.T, u) # the A matrix for the ellipse A = linalg.inv( np.dot(P.T, np.dot(np.diag(u), P)) - np.array([[a * b for b in center] for a in center]) ) / d # Get the values we'd like to return U, s, rotation = linalg.svd(A) radii = 1.0/np.sqrt(s) rot_err = linalg.norm(np.identity(3)-abs(rotation)) if(rot_err > 0.05): radii = np.array([radii[1],radii[0],radii[2]]) return radii
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def smallest_ellipse(points, tol = 0.001):\n points = np.asmatrix(points)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n \n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d\n return np.asarray(A), np.squeeze(np.asarray(c))", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def ellipsoid_area(radius1: number, radius2: number, radius3: number) -> number:\n p = 1.6075\n volume = 4*pi((radius1**p*radius2**p+radius2**p*radius3**p+radius3**p*radius1**p)**1/p)/3\n return volume", "def test_inv_sphere_v3_root_find(self):\n import itertools\n\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for m in itertools.chain(\n range(30),\n range(31,3000,100),\n map((1).__truediv__, range(3, 30, 2)),\n ):\n r = s.inv_sphere_v3(m)\n self.assertTrue(r >= 0)\n v = s.sphere_v3(r)\n self.assertTrue(isclose(\n m,\n v\n ))", "def getHoracekFromEQ(self, ep):\n #check if power is in MW or W (assumes Pinj > 500W)\n if self.Pinj < 500:\n Pinj = self.Pinj * 1e6\n else:\n Pinj = self.Pinj\n\n #find the plasma volume from the equilibrium\n Rlim = ep.g['lcfs'][:,0]\n Zlim = ep.g['lcfs'][:,1]\n\n #calculate cross sectional area using shoelace formula\n i=np.arange(len(Rlim))\n crossSecArea=np.abs(np.sum(Rlim[i-1]*Zlim[i]-Rlim[i]*Zlim[i-1])*0.5)\n\n #calculate (approximate) volume inside separatrix [m^3]\n #assumes RmAxis is close to R of center of mass of crossSecArea\n vol = crossSecArea * 2 * np.pi * ep.g['RmAxis']\n\n #assuming plasma is centered in machine here\n zMin = ep.g['ZmAxis'] - 0.25\n zMax = ep.g['ZmAxis'] + 0.25\n zLCFS = ep.g['lcfs'][:,1]\n rLCFS = ep.g['lcfs'][:,0]\n #this prevents us from getting locations not at midplane\n idx = np.where(np.logical_and(zLCFS>zMin,zLCFS<zMax))\n Rmax = ep.g['lcfs'][:,0][idx].max()\n Rmin = ep.g['lcfs'][:,0][idx].min()\n # geometric quantities\n Rgeo = (Rmax + Rmin) / 2.0\n a = (Rmax - Rmin) / 2.0\n aspect = a/Rgeo\n\n #maximum z point and elongation\n idx2 = np.where(np.logical_and(rLCFS>Rmin,rLCFS<Rmax))\n b = ep.g['lcfs'][:,1][idx].max() #assumes equatorial plane is z=0\n k = b / a\n\n #lambda q from Horacek engineering scaling figure 6a\n self.lqCF = 10 * (Pinj / vol)**(-0.38) * aspect**(1.3) * k**(-1.3) * 1e3 #in mm\n return", "def ellipsoid_volume(radius1: number, radius2: number, radius3: number) -> number:\n volume = 4/3*(pi*radius1*radius2*radius3)\n return volume", "def mvee(atoms, tol = 0.00001):\n points_asarray = np.array([atom.coordinates for atom in atoms])\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n try:\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid\n centroid = np.average(points_asarray,axis=0)\n plane = Plane(atoms)\n normal = np.array([plane.a,plane.b,plane.c])\n norm_mag = np.sqrt(np.dot(normal,normal))\n for i, norm in enumerate(normal):\n normal[i] = norm * 1 / norm_mag\n centroid = np.average(points,axis=0).reshape(-1,3)\n p1 = centroid + normal*0.00001\n p2 = centroid - normal*0.00001\n points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n \n return np.asarray(A), np.squeeze(np.asarray(c))", "def getSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n ys = np.array(self.XYProjections)[:,1]\n zs = np.array(self.XZProjections)[:,1]\n\n L = xs[-1] - xs[0]\n self.L = L\n xis = (xs - xs[0]) / L\n\n errorValue = lambda x,y,A: y - np.dot(A, x)\n a_init = np.array([1] * 4)\n\n # Calculate the derivation equation on x-y plane\n # Get the optimal parameters using least squre error method\n a1 = sp.optimize.leastsq(errorValue, a_init, args=(ys, self._H(xis, L)))[0]\n self.alpha_xyPlane = a1\n \n # Derivation\n xi = sy.symbols('xi')\n self.u_xyPlane = (self._H(xi, L, ifsymbol=True) * a1).sum()\n \n # Then calculate the derivation equation on x-z plane\n a2 = sp.optimize.leastsq(errorValue, a_init, args=(zs, self._H(xis, L)))[0]\n self.alpha_xzPlane = a2\n self.u_xzPlane = (self._H(xi, L, ifsymbol=True) * a2).sum()", "def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh", "def compute_convex_hull_volume(points):\n try:\n return ConvexHull(points).volume\n except:\n return 0", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def fn_Calc_SearchVolume(az,el):\r\n return az*el/(57.296**2) # steradians\r", "def ellipse_ellipticity(S):\n return 1/2 * np.arcsin(S[..., 3]/S[..., 0])", "def insphere(network,\n geometry,\n **kwargs):\n import warnings\n try:\n import pulp as pu\n Np = geometry.num_pores()\n value = _sp.zeros(Np)\n pore_map = geometry.map_pores(geometry.pores(),geometry._net)\n for geom_pore,net_pore in pore_map:\n net_throats = geometry._net.find_neighbor_throats(net_pore)\n geom_throats = geometry._net.map_throats(net_throats,geometry)[:,1]\n verts = geometry['throat.offset_vertices'][geom_throats]\n if len(verts) > 1:\n try:\n pts = np.vstack((i for i in verts if len(i)>0))\n except ValueError:\n pts = []\n if len(pts) > 4:\n \"Work out central point to use as initial guess\"\n c0 = np.mean(pts,axis=0)\n \"Compute convex hull to find points lying on the hull in order\"\n hull = ConvexHull(pts, qhull_options='QJ Pp')\n \"For each simplex making up the hull collect the end points\"\n A = pts[hull.simplices[:,0]]\n B = pts[hull.simplices[:,1]]\n C = pts[hull.simplices[:,2]]\n #I = np.array([[0,1],[-1,0]])\n \"Normal of the simplices\"\n #N = np.dot((B-A),I)\n N = np.cross((B-A),(C-A),axis=1)\n #L = np.sqrt(np.sum(np.square(N),axis=1))\n \"Normalize the normal vector\"\n L = np.linalg.norm(N,axis=1)\n F = np.vstack((L,L,L)).T\n N /= F\n \"If normals point out of hull change sign to point in\"\n pointing_out = (np.sum((A-c0)*N,axis=1)>0)\n N[pointing_out]*= -1\n \"Define Linear Program Variables\"\n \"The centre of the incircle adjustment\"\n cx = pu.LpVariable(\"cx\",None,None,pu.LpContinuous)\n cy = pu.LpVariable(\"cy\",None,None,pu.LpContinuous)\n cz = pu.LpVariable(\"cz\",None,None,pu.LpContinuous)\n \"Radius of the incircle\"\n R = pu.LpVariable(\"R\",0,None,pu.LpContinuous)\n \"Slack variables for shortest distance between centre and simplices\" \n S = pu.LpVariable.dict(\"SlackVariable\",range(len(A)),0,None,pu.LpContinuous)\n \"Set up LP problem\"\n prob = pu.LpProblem(\"FindInRadius\",pu.LpMaximize)\n \"Objective Function\"\n prob += R\n for i in range(len(A)):\n \" Ni.(C-Ai)-Si = 0\"\n prob += N[i][0]*(c0[0]+cx) + N[i][1]*(c0[1]+cy) + N[i][2]*(c0[2]+cz)- N[i][0]*A[i][0] - N[i][1]*A[i][1] - N[i][2]*A[i][2]- S[i] == 0\n \"Si >= R\"\n prob += S[i] >= R\n \"Solve the LP\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n prob.solve()\n \"As the radius is the objective function we can get it from the objective or as R.value()\"\n rad = prob.objective.value()\n #cen = c0 + np.array([cx.value(),cy.value(),cz.value()])\n value[geom_pore]=rad*2\n \n \n return value\n except ImportError:\n print(\"Cannot use insphere method without installing pulp package\")", "def ellipsoid(center, radii, rotation, scales=None, shape=None, minarea=0):\n center = np.array(center)\n radii = np.array(radii)\n rotation = np.array(rotation)\n assert center.shape == (3,)\n assert radii.shape == (3,)\n assert 0 < radii.max(), \"radii should contain at least one positive value\"\n assert rotation.shape == (3, 3)\n if scales is None:\n scales = (1.,) * 3\n scales = np.array(scales)\n assert scales.shape == (3,)\n\n scaled_center = center / scales\n\n # The upper_left_bottom and lower_right_top corners of the smallest cuboid\n # containing the ellipsoid.\n factor = np.array([\n [i, j, k] for k in (-1, 1) for j in (-1, 1) for i in (-1, 1)]).T\n while True:\n radii_rot = np.abs(\n np.diag(1. / scales).dot(rotation.dot(np.diag(radii).dot(factor)))\n ).max(axis=1)\n # In the original scikit-image code, ceil and floor were replaced.\n # https://github.com/scikit-image/scikit-image/blob/master/skimage/draw/draw.py#L127\n upper_left_bottom = np.floor(scaled_center - radii_rot).astype(int)\n lower_right_top = np.ceil(scaled_center + radii_rot).astype(int)\n\n if shape is not None:\n # Constrain upper_left and lower_ight by shape boundary.\n upper_left_bottom = np.maximum(\n upper_left_bottom, np.array([0, 0, 0]))\n lower_right_top = np.minimum(\n lower_right_top, np.array(shape[:3]) - 1)\n\n bounding_shape = lower_right_top - upper_left_bottom + 1\n\n d_lim, r_lim, c_lim = np.ogrid[0:float(bounding_shape[0]),\n 0:float(bounding_shape[1]),\n 0:float(bounding_shape[2])]\n d_org, r_org, c_org = scaled_center - upper_left_bottom\n d_rad, r_rad, c_rad = radii\n rotation_inv = np.linalg.inv(rotation)\n conversion_matrix = rotation_inv.dot(np.diag(scales))\n d, r, c = (d_lim - d_org), (r_lim - r_org), (c_lim - c_org)\n distances = (\n ((d * conversion_matrix[0, 0] +\n r * conversion_matrix[0, 1] +\n c * conversion_matrix[0, 2]) / d_rad) ** 2 +\n ((d * conversion_matrix[1, 0] +\n r * conversion_matrix[1, 1] +\n c * conversion_matrix[1, 2]) / r_rad) ** 2 +\n ((d * conversion_matrix[2, 0] +\n r * conversion_matrix[2, 1] +\n c * conversion_matrix[2, 2]) / c_rad) ** 2\n )\n if distances.size < minarea:\n old_radii = radii.copy()\n radii *= 1.1\n print('Increase radii from ({}) to ({})'.format(old_radii, radii))\n else:\n break\n distance_thresh = 1\n while True:\n dd, rr, cc = np.nonzero(distances < distance_thresh)\n if len(dd) < minarea:\n distance_thresh *= 1.1\n else:\n break\n dd.flags.writeable = True\n rr.flags.writeable = True\n cc.flags.writeable = True\n dd += upper_left_bottom[0]\n rr += upper_left_bottom[1]\n cc += upper_left_bottom[2]\n return dd, rr, cc", "def extractOblateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for oblate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2. )\n\n\n # the oblate surface area\n surface = 2. * np.pi * kperp**2. * ( 1. + ( (1. - e**2.) / e ) * np.arctanh(e) )\n\n return surface", "def get_interior_points(N=128):\n x1 = sobol.i4_sobol_generate(2, N) - np.array([1, 1])\n x2 = sobol.i4_sobol_generate(2, N) - np.array([1, 0])\n x3 = sobol.i4_sobol_generate(2, N) - np.array([0, 1])\n return torch.from_numpy(np.concatenate((x1, x2, x3), 0)).float()", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def fix_sphere_h (center_x, center_y, center_z, radius, centers, radii, len_points, list_of_a):\n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n for i in range(0, len(points)):\n check = 0\n check_b = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n h = 0\n while ((check_b == 0) and (h <= len(list_of_a) -1)):\n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], list_of_a[h].x, list_of_a[h].y, list_of_a[h].z) <= 1.50): \n check_b += 1\n h += 1\n if ((check == 0) and (check_b == 0)):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n return g_x, g_y, g_z", "def major_extent(self) -> complex:\n return max((self.max() - self.null, self.null - self.min()))", "def expo_sphere(k0, kmax, pos):\n\n # Technical note: we use ellipsis, so that we can pass either a\n # single sample or multiple samples without having to add a\n # trivial extra dimension to input array\n im = numpy.complex(0.0, 1.0)\n # The integer grid must be the same as the one set in kgrid,\n # otherwise there is an offset the problem is that integer\n # negative indexing is impossible in python and rounding or\n # truncating kmax can slightly offset the grid\n\n # We pick up the smallest k0 to compute the integer grid\n # This leaves many unused vectors in the other directions, which\n # could be dropped using different nkmax for x, y, z\n nk_max = 1 + int(kmax / min(k0))\n expo = numpy.ndarray((len(pos), ) + pos[0].shape + (2*nk_max+1, ), numpy.complex)\n expo[..., nk_max] = numpy.complex(1.0, 0.0)\n # First fill positive k\n for j in range(pos[0].shape[-1]):\n expo[..., j, nk_max+1] = numpy.exp(im * k0[j] * pos[..., j])\n expo[..., j, nk_max-1] = expo[..., j, nk_max+1].conjugate()\n for i in range(2, nk_max):\n expo[..., j, nk_max+i] = expo[..., j, nk_max+i-1] * expo[..., j, nk_max+1]\n # Then take complex conj for negative ones\n for i in range(2, nk_max+1):\n expo[..., nk_max+i] = expo[..., nk_max+i-1] * expo[..., nk_max+1]\n expo[..., nk_max-i] = expo[..., nk_max+i].conjugate()\n\n return expo", "def test_el_small_surface_instability():\n levels = np.array([959., 931.3, 925., 899.3, 892., 867.9, 850., 814.,\n 807.9, 790., 779.2, 751.3, 724.3, 700., 655., 647.5,\n 599.4, 554.7, 550., 500.]) * units.mbar\n temperatures = np.array([22.2, 20.2, 19.8, 18.4, 18., 17.4, 17., 15.4, 15.4,\n 15.6, 14.6, 12., 9.4, 7., 2.2, 1.4, -4.2, -9.7,\n -10.3, -14.9]) * units.degC\n dewpoints = np.array([20., 18.5, 18.1, 17.9, 17.8, 15.3, 13.5, 6.4, 2.2,\n -10.4, -10.2, -9.8, -9.4, -9., -15.8, -15.7, -14.8, -14.,\n -13.9, -17.9]) * units.degC\n el_pressure, el_temperature = el(levels, temperatures, dewpoints)\n assert_nan(el_pressure, levels.units)\n assert_nan(el_temperature, temperatures.units)", "def points_from_ellipsoid_surface(a, b, c=0, n=10):\n points = []\n points = np.zeros((n, 3))\n points[:, 0] = np.random.normal(0, a, n)\n points[:, 1] = np.random.normal(0, b, n)\n if c > 0:\n points[:, 2] = np.random.normal(0, c, n)\n ds = np.linalg.norm(points / [a, b, c if c else 1], axis=-1)\n points /= ds[:, np.newaxis]\n return points if c else points[:, :2]", "def epipoles_location(f_mat):\r\n u, s, vh = np.linalg.svd(f_mat)\r\n e_l = vh[-1, :]\r\n e_r = u[:, -1]\r\n # get x, y by dividing by w\r\n e_l = (e_l[0] / e_l[2], e_l[1] / e_l[2])\r\n e_r = (e_r[0] / e_r[2], e_r[1] / e_r[2])\r\n return e_l, e_r", "def llh(self):\n return Station._ellipsoid.geodetic(self.xyz())", "def extractProlateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for prolate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2 )\n\n\n # the prolate surface area\n surface = 2. * np.pi * kpar**2. * ( 1. + (1. / ( e * np.sqrt( 1. - e**2. ) ) ) * np.arcsin(e) )\n\n return surface", "def test_elliptic_known(self):\n\n s = space(curvature=1)\n for r, s1, v2, s2, v3 in (\n (\n 1.0,\n 5.28711812816291235777213197934,\n 2.88836579751364013754312174055,\n 8.89791299620185648000441978084,\n 3.42654319113592227685929952373\n ),\n (\n 0.1,\n 0.627271856640888586303151271167,\n 0.0313897553222061208579665325089,\n 0.125245385229718577742290413525,\n 0.00418042059859385652716262757844\n ),\n (\n 1.55,\n 6.28182665751126808523746937213,\n 6.15252755066186628750014389238,\n 12.5609366032633242045384074345,\n 9.60830772249653625946806331352\n )\n ):\n self.assertTrue(isclose(\n s.sphere_s1(r),\n s1\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s1(s1),\n r\n ))\n self.assertTrue(isclose(\n s.sphere_v2(r),\n v2\n ))\n self.assertTrue(isclose(\n s.inv_sphere_v2(v2),\n r\n ))\n self.assertTrue(isclose(\n s.sphere_s2(r),\n s2\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s2(s2),\n r\n ))\n self.assertTrue(isclose(\n s.sphere_v3(r),\n v3\n ))\n # inv_sphere_v3\n # is not tested\n # this is intentional", "def near_surface_samples(self):\n if self._near_surface_samples is None:\n if self.is_from_directory:\n nss_sample_path = f'{self._directory_root}/nss_points.sdf'\n nss = gaps_util.read_pts_file(nss_sample_path)\n # log.info(f'The points have shape {nss.shape}')\n else:\n nss = self._archive['axis_samples']\n self._near_surface_samples = np.reshape(nss,\n [100000, 4]).astype(np.float32)\n return self._near_surface_samples", "def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2", "def equatorial_flattening(self):\n return (self.semimajor_axis - self.semimedium_axis) / self.semimajor_axis", "def createEllipsoid( position=(0,0,0), radius=(1,1,1), colour=(0.6,0.6,0.6), samplesY = 20, samplesXZ = 20, exponentBottom = 2, exponentTop = 2, exponentSide = 2 ):\r\n \r\n if exponentBottom < 2.0 or exponentTop < 2.0 or exponentSide < 2.0 :\r\n raise ValueError( 'Exponents for ellipsoid must all be under 2.0!' )\r\n \r\n position = PyUtils.toPoint3d(position)\r\n vertices = []\r\n for i in range(1,samplesY):\r\n thetaI = i*math.pi/float(samplesY)\r\n if i < samplesY / 2 : \r\n n = exponentTop\r\n else:\r\n n = exponentBottom\r\n cos = math.cos(thetaI) \r\n y = cos * radius[1]\r\n scaleXZ = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) )\r\n for j in range(0,samplesXZ):\r\n thetaJ = j*2.0*math.pi/float(samplesXZ)\r\n n = exponentSide\r\n cos = math.cos(thetaJ)\r\n x = cos * scaleXZ * radius[0]\r\n z = math.pow( 1-math.pow(math.fabs(cos),n), 1.0/float(n) ) * math.copysign(1, math.sin(thetaJ)) * scaleXZ * radius[2]\r\n vertices.append( position + Vector3d(x,y,z) )\r\n vertices.append( position + Vector3d(0,radius[1],0) )\r\n vertices.append( position + Vector3d(0,-radius[1],0) ) \r\n\r\n faces = []\r\n for i in range(0,(samplesY-2)*samplesXZ,samplesXZ) :\r\n for j in range(0,samplesXZ) :\r\n faces.append( (i+j, i+(j+1)%samplesXZ, i+samplesXZ+(j+1)%samplesXZ, i+samplesXZ+j) ) \r\n\r\n for i in range(0,samplesXZ) :\r\n base = (samplesY-2)*samplesXZ\r\n faces.append( ((i+1)%samplesXZ, i, (samplesY-1)*samplesXZ) ) \r\n faces.append( (base+i, base+(i+1)%samplesXZ, (samplesY-1)*samplesXZ+1) ) \r\n\r\n \r\n return create( vertices, faces, colour )", "def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def _minAlien(self):\n minA = 999999\n for r in self._aliens:\n for y in r:\n if(y != None):\n minA = min(minA,y.x)\n return minA", "def center_of_mass_polyhedron():\n raise NotImplementedError", "def trajectory_inside_ellipsoid(env, p_0, p_all, q_all, k_fb, k_ff):\n n, _ = np.shape(k_ff)\n n_u = env.n_u\n n_s = env.n_s\n # init system to p_0\n\n x_all = simulate_trajectory(env, p_0, k_fb, k_ff, p_all)[1:, :]\n\n inside_ellipsoid = np.zeros((n,), dtype=np.bool)\n for i in range(n):\n inside_ellipsoid[i] = sample_inside_ellipsoid(x_all[None, i, :],\n p_all[i, :, None],\n q_all[i, :].reshape((n_s, n_s)))\n\n return inside_ellipsoid", "def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))", "def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r", "def compute_voronoi_centroid_volume(vertices):\n from scipy.spatial import Delaunay, ConvexHull\n\n tess = Delaunay(vertices)\n dimension = np.shape(vertices)[1]\n\n w = np.zeros((tess.nsimplex, 1))\n cent = np.zeros((tess.nsimplex, dimension))\n for i in range(tess.nsimplex):\n # pylint: disable=E1136\n ch = ConvexHull(tess.points[tess.simplices[i]])\n w[i] = ch.volume\n cent[i, :] = np.mean(tess.points[tess.simplices[i]], axis=0)\n\n volume = np.sum(w)\n centroid = np.matmul(np.divide(w, volume).T, cent)\n\n return centroid, volume", "def leftmost_element(self):\n\n\n #Element limits\n L = 0\n R = 2*self.h\n\n psi0 = (self.x-self.h)*(self.x-2*self.h)/(2*self.h**2)\n psi1 = -self.x*(self.x-2*self.h)/(self.h**2)\n psi2 = self.x*(self.x-self.h)/(2*self.h**2)\n\n self.psi[0] = psi0\n self.psi[1] = psi1\n self.psi[2] = psi2\n\n d_psi0 = sym.diff(psi0,self.x)\n d_psi1 = sym.diff(psi1,self.x)\n d_psi2 = sym.diff(psi2,self.x)\n\n psi_00 = d_psi0*d_psi0\n psi_11 = d_psi1*d_psi1\n psi_22 = d_psi2*d_psi2\n psi_01 = d_psi0*d_psi1\n psi_02 = d_psi0*d_psi2\n psi_12 = d_psi1*d_psi2\n\n A_00 = sym.integrate(psi_00, (self.x, L, R))\n A_11 = sym.integrate(psi_11, (self.x, L, R))\n A_22 = sym.integrate(psi_22, (self.x, L, R))\n A_01 = sym.integrate(psi_01, (self.x, L, R))\n A_02 = sym.integrate(psi_02, (self.x, L, R))\n A_12 = sym.integrate(psi_12, (self.x, L, R))\n\n rhs_0 = sym.integrate(self.f(self.x)*psi0,(self.x,L,R)) - self.C\n rhs_1 = sym.integrate(self.f(self.x)*psi1,(self.x,L,R))\n rhs_2 = sym.integrate(self.f(self.x)*psi2,(self.x,L,R))\n\n a1 = [A_00,A_01,A_02]\n a2 = [A_01, A_11, A_12]\n a3 = [A_02, A_12, A_22]\n\n A = np.array([a1, a2, a3]).reshape(3,3)#Dette kan gjøres utenfor bro.\n b = np.array([rhs_0, rhs_1, rhs_2])\n\n\n for i in range(3):\n self.global_vector[i] = b[i]\n for j in range(3):\n self.global_matrix[i,j] = A[i,j]", "def search_alpha_min(self):\n alpha_min = 0\n\n for alpha in range(0, 180, 4):\n r = self.solver(alpha)[0]\n if r[-1] > 1.1*self.Rs:\n break\n\n if (alpha-4) > 0:\n alpha_min = alpha - 4\n # print(\"alpha_min :\",alpha_min,\"(-4)\")\n i = 1\n\n while alpha_min == 0 or round(alpha_min*self.img_res) != round((alpha_min+i*10)*self.img_res): #increase precision\n\n for alpha in range(int(alpha_min/i), int(180/i), 1):\n alpha = alpha*i\n r = self.solver(alpha)[0]\n\n if r[-1] > 1.1*self.Rs:\n break\n\n if (alpha-i) > 0:\n alpha_min = alpha - i\n # print(\"alpha_min : \",alpha_min,\" (-\",i,\")\",sep=\"\")\n\n i = i/10\n i = 10*i\n alpha_min += i\n print(\"alpha_min: %s [%s, %s]\" % (alpha_min, alpha_min-i, alpha_min))\n\n return alpha_min", "def nearest_sphere_surface(x_input, y_input, z_input):\n\n vm = math.sqrt(sum([x_input**2, y_input**2, z_input**2]))\n return (x_input/vm, y_input/vm, z_input/vm)", "def num_ellipsoids(self):\n return self._shape_count(_sff.ellipsoid)", "def project(self, latitude, longitude):\n longitude = longitude if longitude > 0 else 360 + longitude\n\n lon_array = self.ecmwf_data.longitude\n lat_array = self.ecmwf_data.latitude\n\n # Find in the lon_array / lat_array the index interval\n # Including lon_ul and lat_ul\n a_lon = np.where((lon_array < longitude))[0][-1]\n if longitude > lon_array.max():\n # lon is between 359.6 and 0 ...\n b_lon = 0\n else:\n b_lon = np.where((lon_array >= longitude))[0][0]\n\n a_lat = np.where((lat_array < latitude))[0][0]\n b_lat = np.where((lat_array >= latitude))[0][-1]\n\n # Compute geo extent around the point :\n # => extent definition : LR,LL,UL,UR\n extent = [lon_array[a_lon], lat_array[a_lat],\n lon_array[b_lon], lat_array[a_lat],\n lon_array[b_lon], lat_array[b_lat],\n lon_array[a_lon], lat_array[b_lat]]\n\n extent_index = [a_lon, a_lat,\n b_lon, a_lat,\n b_lon, b_lat,\n a_lon, b_lat]\n\n log.info(' - Selected vertex : ')\n log.info('LL (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[0]), str(extent_index[1]), str(extent[0]), str(extent[1])))\n log.info('LR (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[2]), str(extent_index[3]), str(extent[2]), str(extent[3])))\n log.info('UR (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[4]), str(extent_index[5]), str(extent[4]), str(extent[5])))\n log.info('UL (px,ln) / (lon,lat) : (%s, %s) / (%s dd , %s dd)' % (\n str(extent_index[6]), str(extent_index[7]), str(extent[6]), str(extent[7])))\n\n # TIE Point grid defined - compute linear transformation\n # to estimate value at the lat/lon location\n # origin : extent_ul[0], extent_ul[1]\n delta_lon = 0.4 # extent[4] - extent[6] # UR - UL\n delta_lat = -0.4 # extent[1] - extent[7] # LL - UL\n\n lambda_lat = latitude - extent[7]\n lambda_lon = longitude - extent[6]\n\n beta_longitude = lambda_lon / delta_lon\n beta_latitude = lambda_lat / delta_lat\n\n # Processing of all keys\n for key in self.ecmwf_data.mandatory_attributes:\n M = getattr(self.ecmwf_data, key)\n v = self.linear_estimate(M,\n beta_latitude,\n beta_longitude,\n extent_index)\n setattr(self, key, v)", "def _EStep(x, centers):\n nbitem = x.shape[0]\n z = - np.ones(nbitem).astype(np.int_)\n mindist = np.inf * np.ones(nbitem)\n k = centers.shape[0]\n for q in range(k):\n dist = np.sum((x - centers[q]) ** 2, 1)\n z[dist < mindist] = q\n mindist = np.minimum(dist, mindist)\n J = mindist.sum()\n return z, J", "def _setup_grid_sphere(self, dk, kgrid, k0):\n kvec = defaultdict(list)\n kvec_centered = defaultdict(list)\n # With elongated box, we choose the smallest k0 component to setup the integer grid\n # This must be consistent with expo_grid() otherwise it wont find the vectors\n kmax = kgrid[-1] + dk[-1]\n kbin_max = 1 + int(kmax / min(k0))\n # TODO: it would be more elegant to define an iterator over ix, iy, iz for sphere, hemisphere, ... unless kmax is very high it might be more efficient to operate on a 3d grid to construct the vectors\n kmax_sq = kmax**2\n for ix in range(-kbin_max, kbin_max+1):\n for iy in range(-kbin_max, kbin_max+1):\n for iz in range(-kbin_max, kbin_max+1):\n # Slightly faster and more explicit than\n # ksq = sum([(x*y)**2 for x, y in zip(k0, [ix, iy, iz])])\n ksq = ((k0[0]*ix)**2 + (k0[1]*iy)**2 + (k0[2]*iz)**2)\n if ksq > kmax_sq:\n continue\n # beware: numpy.sqrt is x5 slower than math one!\n knorm = math.sqrt(ksq)\n # Look for a shell of vectors in which the vector could fit.\n # This expression is general and allows arbitrary k grids\n # However, searching for the shell like this is not fast\n # (it costs about as much as the above)\n for ki, dki in zip(kgrid, dk):\n if abs(knorm - ki) < dki:\n kvec[ki].append((ix+kbin_max, iy+kbin_max, iz+kbin_max))\n kvec_centered[ki].append((ix, iy, iz))\n break\n\n # if len(kvec.keys()) != len(kgrid):\n # _log.info('some k points could not be found')\n\n return kvec, kvec_centered", "def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def ellipse_bounds(P, level, n=100):\n # Round up to multiple of 2\n n += n % 2\n\n # Principal axes of ellipsoid\n eigval, eigvec = np.linalg.eig(P)\n eigvec *= np.sqrt(level / eigval)\n\n # set zero angle at maximum x\n angle = np.linspace(0, 2 * np.pi, n)[:, None]\n angle += np.arctan(eigvec[0, 1] / eigvec[0, 0])\n\n # Compute positions\n pos = np.cos(angle) * eigvec[:, 0] + np.sin(angle) * eigvec[:, 1]\n n /= 2\n\n # Return x-position (symmetric) and upper/lower bounds\n return pos[:n, 0], pos[:n, 1], pos[:n - 1:-1, 1]", "def sectorsphere(self, x):\r\n return sum(x**2) + (1e6-1) * sum(x[x<0]**2)", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def expo_sphere_safe(k0, kmax, pos):\n im = numpy.complex(0.0, 1.0)\n ndims = pos.shape[-1]\n nk_max = 1 + int(kmax / min(k0))\n expo = numpy.ndarray(pos.shape + (2*nk_max+1, ), numpy.complex)\n expo[:, :, :, nk_max] = numpy.complex(1.0, 0.0)\n\n for j in range(ndims):\n expo[:, :, j, nk_max+1] = numpy.exp(im*k0[j]*pos[:, :, j])\n expo[:, :, j, nk_max-1] = expo[:, :, j, nk_max+1].conjugate()\n for i in range(2, nk_max):\n expo[:, :, j, nk_max+i] = expo[:, :, j, nk_max+i-1] * expo[:, :, j, nk_max+1]\n\n for i in range(2, nk_max+1):\n expo[:, :, :, nk_max+i] = expo[:, :, :, nk_max+i-1] * expo[:, :, :, nk_max+1]\n expo[:, :, :, nk_max-i] = expo[:, :, :, nk_max+i].conjugate()\n\n return expo", "def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume", "def minimal_image(x,y,z,D):\n diff = D\n x = (x+diff/2) % D - diff/2\n y = (y+diff/2) % D - diff/2\n z = (z+diff/2) % D - diff/2\n return x, y, z", "def get_xyz(self, H, K, L):\n v7 = vec(H, K, L)\n v6 = self.Bmat * v7\n v5 = self.Umat * v6\n\n def ewald_condition(phi): return (\n norm(self.Evec)**2 - norm(self.Gmat(phi)*v5 + self.Evec)**2)\n\n phis = []\n if H == 0 and K == 0 and L == 0:\n pass\n elif optimize.fsolve(ewald_condition, 45.0, full_output=1)[2] == 1:\n phis = list(\n np.unique(\n np.around(\n [optimize.fsolve(ewald_condition, phi) % 360\n for phi in np.arange(30, 390, 15)],\n decimals=4)))\n\n def get_ij(phi):\n v4 = self.Gmat(phi) * v5\n p = norm_vec(v4 + self.Evec)\n v3 = -(self.Dvec[0, 0] / p[0, 0]) * p\n v2 = self.Dmat * (v3 + self.Dvec)\n v1 = (self.Omat * v2 / self.pixel_size) + self.Cvec\n return v1[0, 0], v1[1, 0]\n\n peaks = []\n for phi in phis:\n x, y = get_ij(phi)\n z = ((phi - self.phi_start) / self.phi_step) % 3600\n if z < 25:\n z = z + 3600\n elif z > 3625:\n z = z - 3600\n if x > 0 and x < self.shape[1] and y > 0 and y < self.shape[0]:\n peaks.append(NXPeak(x, y, z, H=H, K=K, L=L, parent=self))\n\n peaks = [peak for peak in peaks if peak.z > 0 and peak.z < 3648]\n\n return peaks", "def getRoots(self):\n a, b, c = self.getCoefficients()[2], self.getCoefficients()[1], self.getCoefficients()[0]\n delta = b**2 - 4*a*c\n if delta >= 0:\n roots = sorted([(-b - math.sqrt(delta))/(2*a), (-b + math.sqrt(delta))/(2*a)])\n else:\n roots = sorted([(-b - math.sqrt(-delta)*1j)/(2*a), (-b + math.sqrt(-delta)*1j)/(2*a)], key=lambda x: (x.real, x.imag))\n return roots", "def ellipseDesc(lps):\r\n unit = 100 #units in QualiTree are in [mm], hence Pgl is in [dm] ?\r\n\r\n if isinstance(lps, pgl.Translated):\r\n cx, cy, cz = lps.translation\r\n else:\r\n print\"missing Translated from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n\r\n ori = lps.geometry\r\n\r\n if isinstance(ori, pgl.Oriented):\r\n rotMat = ori.transformation().getMatrix3()\r\n az, el, roll = rotMat.eulerAnglesZYX()\r\n else:\r\n print\"missing Oriented from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n az = 0\r\n \r\n scal = ori.geometry\r\n\r\n if isinstance(scal, pgl.Scaled):\r\n scMat = scal.transformation().getMatrix()\r\n rx, ry, rz, rt = scMat.getDiagonal()\r\n else:\r\n print\"missing Scaled from the bounding ellipse as a Translated(Rotated(Scaled(Sphere)))\"\r\n rx=ry=rz=1\r\n\r\n #x1, y1, z1 #Conversion repère MappleT (m) à reprère Qualitree (q) : Xq=Xm Yq=Zm Zq=-Ym. \r\n #Due to change of coordinate axis, rotation needs - pi <-- apparently not !\r\n #return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az-3.1415927\r\n\r\n return cx*unit, cz*unit, -cy*unit, rx*unit, rz*unit, ry*unit, az", "def pointsOn4Sphere(numPts):\n points = np.zeros((2*numPts, 4))\n N = 4\n surfaceArea = N * np.pi ** (N/2) / (N/2) # for even N\n delta = np.exp(np.log(surfaceArea / numPts) / 3)\n Iter = 0\n ind = 0\n maxIter = 1000\n while ind != numPts and Iter < maxIter:\n ind = 0\n deltaW1 = delta\n w1 = 0.5 * deltaW1\n while w1 < np.pi:\n q0 = np.cos(w1)\n deltaW2 = deltaW1 / np.sin(w1)\n w2 = 0.5 * deltaW2\n while w2 < np.pi:\n q1 = np.sin(w1) * np.cos(w2)\n deltaW3 = deltaW2 / np.sin(w2)\n w3 = 0.5 * deltaW3\n while w3 < 2 * np.pi:\n q2 = np.sin(w1) * np.sin(w2) * np.cos(w3)\n q3 = np.sin(w1) * np.sin(w2) * np.sin(w3)\n points[ind, :] = np.array([q0, q1, q2, q3])\n ind += 1\n w3 += deltaW3\n w2 += deltaW2\n w1 += deltaW1\n delta *= np.exp(np.log(float(ind) / numPts) / 3)\n Iter += 1\n return points[0:numPts, :]", "def minX(self):\n self._updateExtents()\n return self._mMinX", "def cornersphere(self, x):\r\n nconstr = len(x) - 0\r\n if any(x[:nconstr] < 1):\r\n return np.NaN\r\n return sum(x**2) - nconstr", "def test_min_corner_offset(self):\n points = np.array([\n [0.1, 0.1, 0.1],\n [1.1, 2.1, 3.1],\n [1.3, 2.2, 3.4]])\n vg = VoxelGrid(1, min_corner=Vector3f(-1, -2, -3), points=points)\n centers = vg.voxel_centers()\n expected_centers = np.array([\n [0.5, 0.5, 0.5],\n [1.5, 2.5, 3.5]])\n np.testing.assert_array_almost_equal(centers, expected_centers)", "def qe(np_points, np_centers):\n a1 = np.sum(np.power(np_points, 2), axis=1)\n a2 = np.dot(np_points, np_centers.T)\n a3 = np.sum(np.power(np_centers, 2), axis=1)\n dist = - 2*a2 + a3[np.newaxis, :]\n mindist = np.min(dist, axis=1) + a1\n error = np.sum(mindist)\n return error", "def getVolumePoints(minRes, rRes, region):\n\n # when every resolution has the same bndry buffer\n maxDx = (1. + 1.e-8) * lx / float(minRes)\n dr = pecRad / float(rRes)\n\n # shell distances inside dielectric\n rmin = 0.5 * math.sqrt(3.0) * maxDx\n rmax = epsRad - 3.0 * maxDx\n rIn = numpy.arange(rmin, rmax, dr)\n\n # shell distances outside dielectric\n rmin = epsRad + 3.0 * maxDx\n rmax = pecRad - 3.0 * maxDx\n rOut = numpy.arange(rmin, rmax, dr)\n\n if region == \"in\":\n rs = rIn\n elif region == \"out\":\n rs = rOut\n else:\n rs = numpy.concatenate([rIn, rOut])\n\n points = []\n for r in rs:\n dTheta = math.acos(1.0 - 0.5 * (dr / r)**2)\n thetaMin = math.asin(maxDx / r / math.sqrt(2.0))\n thetaMax = math.acos(0.5 * maxDx / r)\n for theta in numpy.arange(thetaMin, thetaMax, dTheta):\n sinTh = math.sin(theta)\n dPhi = dTheta / sinTh\n phiMin = math.asin(0.5 * maxDx / (r * sinTh))\n phiMax = math.acos(0.5 * maxDx / (r * sinTh))\n for phi in numpy.arange(phiMin, phiMax, dPhi):\n points.append([r * math.sin(theta) * math.cos(phi),\n r * math.sin(theta) * math.sin(phi),\n r * math.cos(theta)])\n return points", "def find_local_min_x(self, Ns=None):\n if Ns is None:\n Ns = self.num\n with self.fix_evaluator():\n params = np.linspace(0, np.pi, Ns)\n dx_func = lambda param: self.diff(param)[0]\n dx = [dx_func(param) for param in params]\n # roots of dx are extrema of x\n roots = find_all_roots(params, dx, func=dx_func)\n if len(roots) < 3: # need at least two maxima and a minimum\n return None\n # take the interior root with smallest x-value\n return min(roots[1:-1], key=lambda param: self(param)[0])", "def compute_minimum_scale(self):\n dt = self.dt\n\n def func_to_solve(s):\n return self.wavelet.fourier_period(s) - 2 * dt\n\n return optimize.fsolve(func_to_solve, 1)[0]", "def get_quad_points():\n points = np.array(\n [[0.333333333333333333333333333333, 0.333333333333333333333333333333],\n [0.950275662924105565450352089520, 0.024862168537947217274823955239],\n [0.024862168537947217274823955239, 0.950275662924105565450352089520],\n [0.024862168537947217274823955239, 0.024862168537947217274823955239],\n [0.171614914923835347556304795551, 0.414192542538082326221847602214],\n [0.414192542538082326221847602214, 0.171614914923835347556304795551],\n [0.414192542538082326221847602214, 0.414192542538082326221847602214],\n [0.539412243677190440263092985511, 0.230293878161404779868453507244],\n [0.230293878161404779868453507244, 0.539412243677190440263092985511],\n [0.230293878161404779868453507244, 0.230293878161404779868453507244],\n [0.772160036676532561750285570113, 0.113919981661733719124857214943],\n [0.113919981661733719124857214943, 0.772160036676532561750285570113],\n [0.113919981661733719124857214943, 0.113919981661733719124857214943],\n [0.009085399949835353883572964740, 0.495457300025082323058213517632],\n [0.495457300025082323058213517632, 0.009085399949835353883572964740],\n [0.495457300025082323058213517632, 0.495457300025082323058213517632],\n [0.062277290305886993497083640527, 0.468861354847056503251458179727],\n [0.468861354847056503251458179727, 0.062277290305886993497083640527],\n [0.468861354847056503251458179727, 0.468861354847056503251458179727],\n [0.022076289653624405142446876931, 0.851306504174348550389457672223],\n [0.022076289653624405142446876931, 0.126617206172027096933163647918],\n [0.851306504174348550389457672223, 0.022076289653624405142446876931],\n [0.851306504174348550389457672223, 0.126617206172027096933163647918],\n [0.126617206172027096933163647918, 0.022076289653624405142446876931],\n [0.126617206172027096933163647918, 0.851306504174348550389457672223],\n [0.018620522802520968955913511549, 0.689441970728591295496647976487],\n [0.018620522802520968955913511549, 0.291937506468887771754472382212],\n [0.689441970728591295496647976487, 0.018620522802520968955913511549],\n [0.689441970728591295496647976487, 0.291937506468887771754472382212],\n [0.291937506468887771754472382212, 0.018620522802520968955913511549],\n [0.291937506468887771754472382212, 0.689441970728591295496647976487],\n [0.096506481292159228736516560903, 0.635867859433872768286976979827],\n [0.096506481292159228736516560903, 0.267625659273967961282458816185],\n [0.635867859433872768286976979827, 0.096506481292159228736516560903],\n [0.635867859433872768286976979827, 0.267625659273967961282458816185],\n [0.267625659273967961282458816185, 0.096506481292159228736516560903],\n [0.267625659273967961282458816185, 0.635867859433872768286976979827]]);\n\n w = np.array(\n [0.051739766065744133555179145422,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.008007799555564801597804123460,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046868898981821644823226732071,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.046590940183976487960361770070,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.031016943313796381407646220131,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.010791612736631273623178240136,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.032195534242431618819414482205,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.015445834210701583817692900053,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.017822989923178661888748319485,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190,\n 0.037038683681384627918546472190])*0.5;\n quad_x = np.copy(points[:,0])\n quad_y = np.copy(points[:,1])\n return (quad_x, quad_y, w)", "def exp_map(b, p):\n \"\"\"\n EXP_MAP The exponential map for n-spheres\n b is the base point (vector in R^n), norm(b)=1\n p is a point on the tangent plane to the hypersphere at b (also a vector in R^n)\n\n method can be 0 or 1:\n 0: hypersphere (e.g. quaternions)\n 1: dual quaternion\n \"\"\"\n if np.allclose(b, p):\n x = b\n else:\n theta = np.linalg.norm(b - p)\n dminusbx = np.sqrt(2 - 2. * np.cos(np.pi - theta))\n l = 2. * np.sin(theta / 2)\n alpha = np.arccos((4 + dminusbx ** 2 - l ** 2) / (4 * dminusbx))\n dpb = 2. * np.tan(alpha)\n v = b + ((p - b) / np.linalg.norm(p - b)) * dpb\n x = ((v + b) / np.linalg.norm(v + b)) * dminusbx - b\n\n return x", "def _get_surface_color_scalars(self, mol, solvent_radius, surface_points, smooth_input):\n grid = FutamuraHash(mol)\n T = grid.T\n radii = {'C':1.75,\n 'O':1.4,\n 'N':1.55,\n 'S':1.8,\n 'P':2.0,\n 'H':1.17,\n 'Z':3.0}\n default_distance = 1.8\n print 'locating nearest atoms'\n scalars = vtk.vtkIntArray()\n scalars.SetNumberOfComponents(1)\n # now locate the intersections\n number_index_map = {}\n for ind in range(len(mol.atoms)):\n number_index_map[mol.atoms[ind].atom_number] = ind\n \n last_atom = 'None'\n if smooth_input:\n new_points = []\n ptctr = 0\n for point in surface_points:\n x_val = y_val = z_val = 0\n # figure out which bin it goes in\n for x_ind in range(0, grid.volume_count_x):\n if point[0] < grid.volume_indices_x[x_ind]:\n break\n else:\n x_val = x_ind\n for y_ind in range(grid.volume_count_y):\n if point[1] < grid.volume_indices_y[y_ind]:\n break\n else:\n y_val = y_ind\n for z_ind in range(grid.volume_count_z):\n if point[2] < grid.volume_indices_z[z_ind]:\n break\n else:\n z_val = z_ind\n\n start_array = [0,0,0]\n end_array = [0,0,0]\n # figure out starts and ends\n counts = [grid.volume_count_x, grid.volume_count_y, grid.volume_count_z]\n keys = [x_val, y_val, z_val]\n for ind in [0,1,2]:\n if keys[ind] == 0:\n start_array[ind] = 0\n end_array[ind] = 2\n elif keys[ind] == counts[ind] - 1:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+1\n else:\n start_array[ind] = keys[ind]-1\n end_array[ind] = keys[ind]+2\n min_dist = 1000.0\n sec_dist = 1000.0\n id2 = -1\n id = -1\n escape = 0 # turns 1 once the correct atom is found\n if smooth_input == 0:\n identification_distance = 0.1\n # figure out if its in range of the last atom chosen (arbitrary, but tends to speed up the calculations)\n if last_atom != 'None':\n dist = math.sqrt(pow(point[0]-last_atom.x,2) + pow(point[1]-last_atom.y,2) + pow(point[2]-last_atom.z,2))\n dif = abs(dist - radii.get(last_atom.atom_type[0], default_distance))\n if dif < identification_distance:\n id = last_atom.atom_number # assume this is it\n escape = 1\n \n if not escape:\n # now look for atoms in the same bin as the last atom\n ky = '%s %s %s'%(x_val,y_val,z_val)\n if ky in T.keys(): # first look in this atoms bin\n for atom in T[ky]:\n # do not retrieve if type H and protonation is turned off\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number # assume this is it\n escape = 1\n break\n if not escape:\n for i in range(start_array[0], end_array[0]):\n for j in range(start_array[1], end_array[1]):\n for k in range(start_array[2], end_array[2]):\n key2 = '%s %s %s'%(i,j,k)\n #if key2 != ky:\n if key2 in T.keys():\n for atom in T[key2]:\n if self.hydrogens_on or ((not self.hydrogens_on) and atom.atom_type[0] != 'H'):\n dist = math.sqrt(pow(point[0]-atom.x,2) + pow(point[1]-atom.y,2) + pow(point[2]-atom.z,2))\n if not smooth_input:\n if abs(dist - radii.get(atom.atom_type[0], default_distance)) < identification_distance:\n id = atom.atom_number\n escape = 1\n break\n elif dist < min_dist:\n min_dist = dist\n id = atom.atom_number\n else:\n if dist < min_dist:\n sec_dist = min_dist\n id2 = id\n min_dist = dist\n id = atom.atom_number\n if escape:\n break\n if escape:\n break\n if escape:\n break\n # assign the index\n last_atom = mol.atoms[number_index_map[id]]\n scalars.InsertTuple1(ptctr, number_index_map[id])\n # smooth the data\n fitting_back_distance = 0.2\n if smooth_input:\n x2 = point[0]\n y2 = point[1]\n z2 = point[2]\n if id2 != -1: # more than one intersection is necessary\n sec_last_atom = mol.atoms[number_index_map[id2]]\n if abs(min_dist-radii.get(last_atom.atom_type[0], default_distance)) < fitting_back_distance: # if this atom is close enough\n if abs(sec_dist-radii.get(sec_last_atom.atom_type[0], default_distance)) > 0.4: # if second atom is far enough away\n r = radii.get(last_atom.atom_type[0], default_distance)\n d = min_dist\n x = last_atom.x\n y = last_atom.y\n z = last_atom.z\n x2 = ((r/d)*(point[0]-x)) + x\n y2 = ((r/d)*(point[1]-y)) + y\n z2 = ((r/d)*(point[2]-z)) + z\n new_points.append([x2,y2,z2])\n \n ptctr += 1\n if smooth_input:\n return scalars,new_points\n else:\n return scalars", "def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n\n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n\n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n\n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n\n sigma = distance / (b * A)\n sigmap = 1\n\n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B * sinSigma * (cos2sigmaM + B / 4 * (\n cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM) - B / 6 * cos2sigmaM * (\n -3 + 4 * sinSigma * sinSigma) * (-3 + 4 * cos2sigmaM * cos2sigmaM)))\n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n\n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1 # Auxiliary variable\n\n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma * cosAlfa1,\n (1 - f) * math.sqrt(sinAlfa * sinAlfa + var_aux * var_aux))\n\n lamb = math.atan2(sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa * (\n sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L + 3 * math.pi) % (2 * math.pi) - math.pi\n\n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2)\n lon2_dd = math.degrees(lon2)\n\n return lat2_dd, lon2_dd", "def getVisualFieldOrigin(self):\r\n\r\n if not hasattr(self, 'finalPatchesMarked'):\r\n raise LookupError('Please mark the final patches first!!')\r\n\r\n if not hasattr(self, 'altPosMapf'):\r\n _ = self._getSignMap()\r\n\r\n try:\r\n V1 = self.finalPatchesMarked['V1'].array.astype(np.float)\r\n LM = self.finalPatchesMarked['LM'].array.astype(np.float)\r\n RL = self.finalPatchesMarked['RL'].array.astype(np.float)\r\n\r\n overlap = 0 # number of overlaping pixels\r\n iterNum = 1 # number of iteration\r\n while overlap < 1:\r\n # print 'Iteration number for finding overlapping pixel:', iterNum\r\n V1 = ni.morphology.binary_dilation(V1, iterations=1).astype(np.float)\r\n LM = ni.morphology.binary_dilation(LM, iterations=1).astype(np.float)\r\n RL = ni.morphology.binary_dilation(RL, iterations=1).astype(np.float)\r\n totalField = V1 + LM + RL\r\n # plt.imshow(totalField)\r\n overlap = len(np.argwhere(totalField == 3))\r\n iterNum += 1\r\n # print 'Number of overlapping pixels:', overlap\r\n # plt.show()\r\n\r\n altPosOrigin = np.mean(self.altPosMapf[totalField == 3], axis=0)\r\n aziPosOrigin = np.mean(self.aziPosMapf[totalField == 3], axis=0)\r\n\r\n except KeyError:\r\n print('Can not find necessary visual areas (V1, LM, RL) for normalization. \\nSetting origins to 0 ...')\r\n altPosOrigin = 0.\r\n aziPosOrigin = 0.\r\n\r\n return altPosOrigin, aziPosOrigin", "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def eigCent(A):\n lam,V = np.linalg.eig(A)\n v = V[:,np.argmax(lam)]\n v = v*(1./v[0])\n return v", "def vp_from_ke(m):\n return (m[0, 0]/m[2,0], m[1,0]/m[2,0])", "def ellipsoid_mean(coords, stack, meshgrid, ij_rad=7, z_rad=2):\n # Equation: (x-x0)^2 + (y-y0)^2 + a(z-z0)^2 = r^2\n r = ij_rad # r is just more intuitive for me to think about...\n a = (r ** 2) / (z_rad ** 2)\n z0, i0, j0 = coords\n valsgrid = np.sqrt((a * ((meshgrid[0] - z0) ** 2)) + ((meshgrid[1] - i0) ** 2) + ((meshgrid[2] - j0) ** 2))\n pixels = stack[valsgrid <= r]\n return pixels.mean()", "def test_extract_geometry():\r\n file_path = 'C:/Oregon_State/Spring_2019/Soft_dev_eng/StoveOpt/tests/Stove_test_Geometry.xlsx'\r\n pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y = extract_geometry(file_path)\r\n assert pt2x == 0.1\r\n assert pt2z == 0\r\n assert pt2y == 0\r\n assert pt3x == 0\r\n assert pt3z == 0.15\r\n assert pt3y == 0\r\n assert pt4x == 0.1\r\n assert pt4z == 0.15\r\n assert pt4y == 0\r\n assert pt5x == 0.1\r\n assert pt5z == 0.16\r\n assert pt5y == 0\r\n assert pt6x == 0\r\n assert pt6z == 0.16\r\n assert pt6y == 0\r\n assert pt7x == 0\r\n assert pt7z == 0.3\r\n assert pt7y == 0\r\n assert pt8x == 0.1\r\n assert pt8z == 0.3\r\n assert pt8y == 0\r\n assert pt9x == 0.17\r\n assert pt9z == 0.3\r\n assert pt9y == 0\r\n assert pt10x == -0.07\r\n assert pt10z == 0.3\r\n assert pt10y == 0\r\n assert pt11x == -0.07\r\n assert pt11z == 0.5\r\n assert pt11y == 0\r\n assert pt12x == -.04\r\n assert pt12z == 0.5\r\n assert pt12y == 0\r\n assert pt13x == 0.14\r\n assert pt13z == 0.5\r\n assert pt13y == 0\r\n assert pt14x == 0.17\r\n assert pt14z == 0.5\r\n assert pt14y == 0\r\n assert pt15x == -0.04\r\n assert pt15z == 0.33\r\n assert pt15y == 0\r\n assert pt16x == 0.14\r\n assert pt16z == 0.33\r\n assert pt16y == 0\r\n #assert U_100x == 1\r\n #assert U_100y == 0\r\n #assert U_100z == 0\r", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def mercier(self):\n\n # See Overleaf note \"Mercier criterion near the magnetic axis- detailed notes\".\n # See also \"20200604-02 Checking sign in Mercier DGeod near axis.docx\"\n\n # Shorthand:\n d_l_d_phi = self.d_l_d_phi\n B0 = self.B0\n G0 = self.G0\n p2 = self.p2\n etabar = self.etabar\n curvature = self.curvature\n sigma = self.sigma\n iotaN = self.iotaN\n iota = self.iota\n pi = np.pi\n\n #integrand = d_l_d_phi * (Y1c * Y1c + X1c * (X1c + Y1s)) / (Y1c * Y1c + (X1c + Y1s) * (X1c + Y1s))\n integrand = d_l_d_phi * (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*sigma*sigma + etabar*etabar*curvature*curvature) \\\n / (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*(1+sigma*sigma) + 2*etabar*etabar*curvature*curvature)\n\n integral = np.sum(integrand) * self.d_phi * self.nfp * 2 * pi / self.axis_length\n\n #DGeod_times_r2 = -(2 * sG * spsi * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar &\n self.DGeod_times_r2 = -(2 * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar \\\n / (pi * pi * pi * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * iotaN * iotaN)) \\\n * integral\n\n self.d2_volume_d_psi2 = 4*pi*pi*abs(G0)/(B0*B0*B0)*(3*etabar*etabar - 4*self.B20_mean/B0 + 2 * (self.G2 + iota * self.I2)/G0)\n\n self.DWell_times_r2 = (mu0 * p2 * abs(G0) / (8 * pi * pi * pi * pi * B0 * B0 * B0)) * \\\n (self.d2_volume_d_psi2 - 8 * pi * pi * mu0 * p2 * abs(G0) / (B0 * B0 * B0 * B0 * B0))\n\n self.DMerc_times_r2 = self.DWell_times_r2 + self.DGeod_times_r2", "def getBoundingBoxCenter(self, shell=False, *args, **kwargs):\n if shell:\n self.grabShell()\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\n uvCenter = [((uvBB[0][1] + uvBB[0][0]) / 2), ((uvBB[1][1] + uvBB[1][0]) / 2)]\n return uvCenter", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def argminY( self ):\n min = 1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min: min = p[1]\n return min", "def voxelize_points(points, pc_bbox_center, voxel_resolution, num_voxels_per_dim, pc_center_in_voxel_grid):\n\n # this is the voxel grid we are going to return\n voxel_grid = np.zeros((num_voxels_per_dim,\n num_voxels_per_dim,\n num_voxels_per_dim), dtype=np.bool)\n\n # take the points and convert them from meters to voxel space coords\n centered_scaled_points = np.floor(\n (points - np.array(pc_bbox_center) + np.array(\n pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)\n\n # remove any points that are beyond the area that falls in our voxel grid\n mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # remove any points that are outside of the region we are voxelizing\n # as they are to small.\n mask = centered_scaled_points.min(axis=1) > 0\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid,\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # treat our remaining points as ints, since we are already in voxel coordinate space.\n # this points shoule be things like (5, 6, 7) which represent indices in the voxel grid.\n csp_int = centered_scaled_points.astype(int)\n\n # create a mask from our set of points.\n mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2])\n\n # apply the mask to our voxel grid setting voxel that had points in them to be occupied\n voxel_grid[mask] = 1\n\n return voxel_grid", "def lowerlim(x, y, z, a, b, c):\n if x/a + y/b + z/c > 1:\n B = a + b + c - x - y - z\n C = a*b + a*c + b*c - a*y - a*z - b*x - b*z - c*x - c*y\n D = a*b*c - a*b*z - a*c*y - b*c*x\n r = np.roots([1,B,C,D])\n ll = r[~np.iscomplex(r) & (r>0.)]\n return ll[0].real\n else:\n return 0.", "def _get_init_chem_pot(self):\n num_singlets = len(self._ground_states) - 1\n matrix = np.zeros((num_singlets, num_singlets))\n energy_vector = np.zeros(num_singlets)\n gs_energies = self._get_gs_energies()\n for i in range(num_singlets):\n for j in range(num_singlets):\n ref_singlet = self._ground_states[0][\"cf\"][self._singlet_names[j]]\n singlet = self._ground_states[i + 1][\"cf\"][self._singlet_names[j]]\n matrix[i, j] = (ref_singlet - singlet)\n energy_ref = gs_energies[0] / len(self._ground_states[0][\"atoms\"])\n energy = gs_energies[i + 1] / len(self._ground_states[i + 1][\"atoms\"])\n energy_vector[i] = energy_ref - energy\n\n mu_boundary = np.linalg.solve(matrix, energy_vector)\n return mu_boundary", "def luneberg(self, mx, my, R):\n e = np.ones((self.nx, self.ny))\n for qx in range(mx-R, mx+R):\n for qy in range(my-R, my+R):\n r = int(math.sqrt((qx-mx)**2 + (qy-my)**2))\n if r>R: continue\n e[qx-1, qy-1] = 2 - (r/R)**2\n\n return e*const.epsilon_0", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)", "def particle_LJV(R,N,D):\n b = np.zeros(N)\n for i in range(N):\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n b[i] = np.sum(4*((1/r)**12-(1/r)**6))\n Uv = np.sum(b)\n return Uv", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def _generate_boxcar_volume(x, radius, center):\n\n # Form cubic position array for x, y, z\n X_cube = x.copy()\n\n\n # Find all points inside boxcar inside the cube\n vol = np.sqrt((X_cube - center) ** 2 / radius ** 2)\n vol = vol <= 1\n\n return vol.astype(float)", "def get_minimum_air_volume(v_vent: np.ndarray) -> float:\n\n return v_vent.sum()", "def energy_logcenter(e_edges):\n return np.sqrt(e_edges[:-1] * e_edges[1:])", "def GetParametricCenter(self, p_float=..., p_float=..., p_float=...):\n ...", "def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n \n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n \n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n \n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq/16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq/1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n \n sigma = distance / (b * A)\n sigmap = 1\n \n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B*sinSigma*(cos2sigmaM+B/4*(cosSigma*(-1+2*cos2sigmaM*cos2sigmaM)-B/6*cos2sigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2sigmaM*cos2sigmaM))) \n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n \n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1\n \n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma*cosAlfa1, (1 - f)*math.sqrt(sinAlfa * sinAlfa + var_aux*var_aux))\n \n lamb = math.atan2 (sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa *(sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L +3*math.pi)%(2*math.pi) - math.pi\n \n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2) \n lon2_dd = math.degrees(lon2)\n \n return lat2_dd, lon2_dd", "def propanolLowest():\n coords = [\n [-1.9554949371, 0.1467391618, 0.0031595607],\n [-0.5906278346, -0.5279387138, -0.0201649611],\n [0.5440986558, 0.4958779663, 0.0283462055],\n [0.4812068385, 1.1678478833, -0.8308000219],\n [0.4590669813, 1.0993020658, 0.9450529713],\n [1.8195161785, -0.0957487212, -0.0534239359],\n [1.9103706588, -0.7338049177, 0.6631507673],\n [-0.5004127933, -1.2028008461, 0.8364936998],\n [-0.4854009629, -1.1250023438, -0.9282499098],\n [-2.7476736372, -0.5972665554, -0.0242488945],\n [-2.0700756998, 0.8040326560, -0.8554507953],\n [-2.0722381370, 0.7410005769, 0.9069567477],\n ]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"O\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def midpoint(self) -> Point:\n l = self._line.meet(infty_hyperplane(self.dim))\n return harmonic_set(*self.vertices, l)", "def midpoint_of_points(pnts: Iterable[Point]) -> Point:\n num = len(pnts)\n x = sum(pnt.x for pnt in pnts)/num\n y = sum(pnt.y for pnt in pnts)/num\n z = sum(pnt.z for pnt in pnts)/num\n return Point(x, y, z)", "def arroots(self):\n return self.arpoly.roots()", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def minor_extent(self) -> complex:\n return min((self.max() - self.null, self.null - self.min()))", "def calculate_zoom(self):\n distances = [geopy.distance.geodesic(self.centre_location, centroid).km for centroid in self.centroids]\n a = 4 / 20000\n distances = [1 + 4 - a * distance for distance in distances]\n print(min(distances))\n return min(distances)" ]
[ "0.62986", "0.62653875", "0.5862462", "0.5844809", "0.5837435", "0.58371854", "0.5790067", "0.5785195", "0.5721557", "0.5710913", "0.56959003", "0.5689655", "0.5679842", "0.56546134", "0.56341815", "0.56135213", "0.5597461", "0.55802417", "0.5576471", "0.55697113", "0.55599046", "0.55515724", "0.5536294", "0.5526614", "0.5506065", "0.5504257", "0.5500207", "0.5479397", "0.5460376", "0.54458547", "0.5422736", "0.54198897", "0.54151154", "0.5409069", "0.53989625", "0.5388078", "0.538427", "0.53797317", "0.5371068", "0.5366723", "0.5361806", "0.535751", "0.5357037", "0.53567636", "0.5352391", "0.5349062", "0.53483313", "0.5346856", "0.53451216", "0.5344408", "0.53426045", "0.5335079", "0.53278637", "0.5322489", "0.53223747", "0.53198385", "0.5309724", "0.53063", "0.52993417", "0.5293217", "0.52809095", "0.5276641", "0.52657944", "0.5253261", "0.5248052", "0.5244626", "0.5240697", "0.5239202", "0.5235706", "0.52338797", "0.52216834", "0.5216113", "0.5214885", "0.52124864", "0.5211912", "0.5204802", "0.5196833", "0.51964974", "0.5195072", "0.5194902", "0.518987", "0.5188738", "0.518155", "0.51794374", "0.51778316", "0.51775485", "0.5173396", "0.5170986", "0.51650393", "0.51637703", "0.5162977", "0.5162521", "0.5161916", "0.5161238", "0.5160872", "0.5158863", "0.51533467", "0.51511943", "0.51509106", "0.5149747" ]
0.6222927
2
Method called to collect data and send to Prometheus
def get_commit_time(self, metric): session = requests.Session() session.verify = False logging.debug("metric.repo_project %s" % (metric.repo_project)) logging.debug("metric.git_api %s" % (self._git_api)) git_server = self._git_api if ( "github" in git_server or "bitbucket" in git_server or "gitlab" in git_server or "gitea" in git_server ): logging.warn("Skipping non Azure DevOps server, found %s" % (git_server)) return None # Private or personal token # Fill in with your personal access token and org URL personal_access_token = self._token organization_url = self._git_api # Create a connection to the org credentials = BasicAuthentication("", personal_access_token) connection = Connection(base_url=organization_url, creds=credentials) # Get a client (the "git" client provides access to commits) git_client = connection.clients.get_git_client() commit = git_client.get_commit( commit_id=metric.commit_hash, repository_id=metric.repo_project, project=metric.repo_project, ) logging.debug("Commit %s" % ((commit.committer.date).isoformat("T", "auto"))) if hasattr(commit, "innerExepction"): # This will occur when trying to make an API call to non-Github logging.warning( "Unable to retrieve commit time for build: %s, hash: %s, url: %s. Got http code: %s" % ( metric.build_name, metric.commit_hash, metric.repo_url, str(commit.message), ) ) else: try: metric.commit_time = commit.committer.date.isoformat("T", "auto") logging.info("metric.commit_time %s" % (str(metric.commit_time)[:19])) logging.info("self._timedate_format %s" % (self._timedate_format)) metric.commit_timestamp = pelorus.convert_date_time_to_timestamp( (str(metric.commit_time)[:19]), self._timedate_format ) except Exception: logging.error( "Failed processing commit time for build %s" % metric.build_name, exc_info=True, ) logging.debug(commit) raise return metric
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\r\n self.collect_data()", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def collectData(self):\n\n self.data.datahash = {} # dict of system data\n\n vmstat_dict = self._getvmstat()\n if vmstat_dict:\n self.data.datahash.update(vmstat_dict)\n\n uptime_dict = self._getuptime()\n if uptime_dict:\n self.data.datahash.update(uptime_dict)\n\n log.log( \"<system>system.collectData(): new system list created\", 7 )", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def collect_data(endless):\r\n click.echo(\"start collecting data ...\")\r\n _collect_data(endless)", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def _push_to_server(self) -> None:\n timestamp = int(arrow.get().float_timestamp * 1000)\n\n datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []\n\n for metric in REGISTRY.collect():\n if type(metric) == Metric and metric.type in [\"gauge\", \"counter\"]:\n if len(metric.samples) == 0:\n continue\n\n external_id = self.external_id_prefix + metric.name\n datapoints.append({\"externalId\": external_id, \"datapoints\": [(timestamp, metric.samples[0].value)]})\n\n self.cdf_client.datapoints.insert_multiple(datapoints)\n self.logger.debug(\"Pushed metrics to CDF tenant '%s'\", self._cdf_project)", "def collector_process_data(self, data):\n for c in clients:\n c.on_message(json.dumps(data))", "def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()", "def process_data():\n if ARG.WRITE:\n producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'),\n key_serializer=lambda v: json.dumps(v).encode('utf-8'),\n bootstrap_servers=BROKERS)\n datestruct = dict()\n fetch_counts(datestruct)\n uts = dict()\n for sub in CAT_SUBGROUPS:\n for user in CAT_SUBGROUPS[sub]:\n uts[user] = sub\n epoch_seconds = time.time()\n for user in sorted(datestruct):\n payload = {'time': epoch_seconds}\n wuser = user.split('@')[0]\n workday = call_responder('config', 'config/workday/' + wuser)\n payload['user'] = wuser\n if 'config' in workday:\n payload['organization'] = workday['config']['organization']\n if payload['organization'] == 'Connectome Annotation Team':\n payload['subgroup'] = uts[wuser] if wuser in uts else ''\n else:\n LOGGER.warning(\"Could not find user %s\", wuser)\n payload['organization'] = 'unknown'\n for key in OPERATIONS:\n payload['operation'] = key\n payload['count'] = datestruct[user][key]\n if ARG.WRITE:\n LOGGER.debug(json.dumps(payload))\n future = producer.send(ARG.TOPIC, payload, str(datetime.datetime.now()))\n try:\n future.get(timeout=10)\n except KafkaError:\n LOGGER.critical(\"Failed publishing to %s\", ARG.TOPIC)\n else:\n LOGGER.info(json.dumps(payload))", "def collect(self, article_data):\n self._parser_data_collector.send(article_data)", "def handleTelemetry(self):\n\t\tprint(\"*****************handleTelemetry\")\n\t\tself.cpuUtilPct = self.cpuUtilTask.getTelemetryValue() # Get CPU usage performance\n\t\tself.memUtilPct = self.memUtilTask.getTelemetryValue() # Get Memory usage performance\n\t\tsysData = SystemPerformanceData()\n\t\tsysData.setCpuUtilization(self.cpuUtilPct)\n\t\tsysData.setMemoryUtilization(self.memUtilPct)\n\t\tself.dataMessageListener.handleSystemPerformanceMessage(sysData)\n\t\tlogging.info('CPU utilization is %s percent, and memory utilization is %s percent.', str(self.cpuUtilPct), str(self.memUtilPct))\n\t\t# Log out the usage performance", "def _collect_data(endless=True):\r\n conf = Config.Config().get_config()\r\n\r\n Collector.DataCollector(conf.get('db', 'path'), 'config')\r\n\r\n if endless:\r\n try:\r\n while True:\r\n time.sleep(0.5)\r\n except KeyboardInterrupt:\r\n click.echo(\"Process terminated\")\r\n exit(0)", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def run(self):\n\n # TODO: Logic to get data, enforce request limits, and filter out duplicates", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def collect_data(self,sensation,action,reward,next_sensation):\n pass", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "async def request_data(\r\n self,\r\n **kwargs, # pylint: disable=unused-argument\r\n ) -> None:\r\n await self._send(\"report 2\")\r\n\r\n if self.device_info.is_meter_integrated():\r\n await self._send(\"report 3\")\r\n\r\n if self.device_info.is_data_logger_integrated():\r\n await self._send(\"report 100\")", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def compute_metrics(self):\n pass", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def collect():\n\n stats = {}\n for feed in Feed.objects:\n try:\n logger.info('Fetching from {0}...'.format(feed.ext_url))\n new_articles = fetch(feed)\n stats[feed.ext_url] = len(new_articles)\n\n except SAXException as e:\n if feed.errors is None:\n feed.errors = 0\n\n # Error with the feed, make a note.\n logger.info('Error fetching from {0}.'.format(feed.ext_url))\n feed.errors += 1\n feed.save()\n pretty_stats = json.dumps(stats, sort_keys=True, indent=4)\n notify('Corpora collection complete.', 'Total article count: {0}\\n\\nResults for this pass:\\n{1}'.format(len(Article.objects), pretty_stats))", "def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes", "def collect(self, app):\n pass", "def set_metrics(self):", "def _process(self):\n export_collect_medias(self.kwargs[\"collect\"])", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n for key in value_dict.keys():\n value_dict[key] *= self.coverage\n value_dict['coverage'] = self.coverage\n logging.info(\"coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n logging.info(\"{0}:{1}\".format(key,value))\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def collect(self):\n pass", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def _collect_all(self):", "def collect_incoming_data(self, data):\n self.logger.debug('collect_incoming_data() -> (%d)\\n\"\"\"%s\"\"\"', len(data), data)\n self.received_data.append(data)", "def collect_output(self):\n pass", "def collect_output(self):\n pass", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def _start_collect_stats(sc):\n\n arg_dict = {}\n ev = sc.new_event(id=lb_const.EVENT_COLLECT_STATS_V2, data=arg_dict)\n sc.post_event(ev)", "def run():\n\twrite_fuel_data()", "def cpu_monitor():\n\n architecture = uname()[4] # This will return 'x86_64', 'aarc64' (for 64 bit arm), etc.\n if(not architecture in SUPPORTED_ARCHITECTURES):\n logerr(\"This architecture doesn't appear to be one that is supported. Consider adding it and openning\" + \n \" a pull request on github!\")\n exit()\n\n init_node(\"bthere_cpu_monitor\", anonymous=False)\n pub = Publisher(\"/bthere/cpu_data\", CPUData, queue_size=10)\n loginfo(\"Outputting to /bthere/cpu_data\")\n \n #update period should to be somewhat small since the cpu load data is average since you last checked,\n #a slower update rate will be less accurate for bursty loads and may introduce more lag than expected\n #if a load is added later in the time between updates for example.\n update_period = get_param('~update_period', 1.0)\n rate = Rate(1/float(update_period))\n loginfo(\"Publishing rate: \" + str(1.0/update_period) + \" hz\")\n\n quiet = get_param(\"~quiet\", False)\n\n #since the temperature-getting seems likely to be failure prone, try it once to check.\n able_to_get_temps = True\n\n if(isnan(get_cpu_temps(architecture)[0])):\n logwarn(\"Unable to get CPU temperatures\")\n able_to_get_temps = False\n \n last_cpu_times = []\n while not is_shutdown():\n data = CPUData()\n gated_loginfo(quiet, \"------ CPU Data ------\")\n if(able_to_get_temps):\n # If temperature data can be collected, add it to the CPUData to be published and log\n package_temp, core_temps = get_cpu_temps(architecture)\n gated_loginfo(quiet, \"CPU Package temp. (C): \" + str(package_temp))\n data.package_temp = package_temp\n if(len(core_temps) > 0):\n for core in range(len(core_temps)):\n gated_loginfo(quiet, \"CPU Core \" + str(core) + \"temp. (C): \" + str(core_temps[core]))\n data.core_temps = core_temps\n else:\n # If the data is unavailable just publish NaN and log\n gated_loginfo(quiet, \"CPU temperatures unavailable\")\n data.package_temp = float(\"NaN\")\n data.core_temps = [float(\"NaN\")]\n if(len(last_cpu_times) == 0): \n # If this hasn't been initialized, we just won't publish this info yet and init.\n # last_cpu_times can't just be initialized before the loop because it should (for consistency) be the same\n # time between data collections and getting the initial data before the loop would make the time between\n # data collections small and potentially make the data misleading due to burst loads.\n last_cpu_times = get_load_data()\n gated_loginfo(quiet, \"CPU load not yet available\")\n else:\n overall_load, per_cores, last_cpu_times = get_cpu_load(last_cpu_times)\n gated_loginfo(quiet, \"Overall CPU load: \" + str(round(overall_load * 100, 1)) + \"%\")\n data.overall_cpu_load = overall_load\n if(len(per_cores) > 0):\n for core in range(len(per_cores)):\n gated_loginfo(quiet, \"CPU core \" + str(core) + \" load: \" + str(round(per_cores[core] * 100, 1)) + \n \"%\")\n data.core_loads = per_cores\n \n # Add the header information:\n header = Header(stamp=Time.now())\n # The frame_id property seems to be to do with tf frames of reference. That isn't useful for something like \n # this, so just leave it empty. (this might be the wrong way to do this, but I don't have any other info.)\n # The sequential id is apparently set by the publisher.\n data.header = header\n \n pub.publish(data)\n rate.sleep()", "def measure():\n print(\"alias, timestamp, current, total, power, voltage, err_code\")\n message_str = MeasurementRequest(None).to_json()\n socket_object = UdpSocket()\n s = UDPSendThread(message_str, socket_object)\n r = UDPRecvThread(socket_object, measurement_output_parser)\n s.start()\n r.start()\n\n wait((s, r))", "def collect_data(self):\n self.lines = []\n\n while True:\n self._process_serial_data()", "def _process(self):\n self.output[\"data\"] = get_form_data(self.kwargs[\"collect\"].ona_scan_form_pk)", "def collect():\n datadir = 'data'\n if 'OUTPUT_DATA_DIR' in os.environ:\n datadir = os.environ['OUTPUT_DATA_DIR']\n\n scraper_dir = os.path.join(os.getcwd(), 'scrapers')\n scrapers = get_scraper_list(scraper_dir)\n now = datetime.now()\n total_deals = []\n for scr_instance in scrapers:\n deals = scr_instance.get_deals()\n\n # Map a timestamp on each deal\n for item in deals:\n item.update({'timestamp': now.strftime('%Y-%m-%d')})\n\n print(\"\\n Collected {0} deals for {1} \\n\\n\".format(len(deals), scr))\n\n total_deals += deals\n\n filename = '{0}_resultset.json'.format(now.strftime('%Y%m%d_%H%I%S'))\n\n fh = open(os.path.join(datadir, filename), 'w+')\n fh.write(json.dumps(total_deals))\n fh.close()", "def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()", "def yapasGarbageCollectorReq(self):\r\n if core.FW_conf['tracing_enabled']:\r\n core.FW_conf['trace'].yapasGarbageCollector()", "def collect(self):\n repos = requests.get('http://{}:{}/api/unstable/dosocs/repos'.format(\n self.config['broker_host'],self.config['broker_port'])).json()\n\n for repo in repos:\n try:\n logger.info(f'Adding Repo Labor Data for Repo: {repo}')\n self.generate_value_data(repo['repo_id'], repo['path'])\n except Exception as e:\n logger.error(f'Error occured for Repo: {repo}')\n logger.exception(e)\n\n self.register_task_completion('value')\n\n # while True:\n # time.sleep(2)\n # logger.info(f'Maintain Queue Empty: {self._maintain_queue.empty()}')\n # logger.info(f'Queue Empty: {self._queue.empty()}')\n # if not self._queue.empty():\n # message = self._queue.get()\n # logger.info(f\"Popped off message from Queue: {message.entry_info}\")\n # self.working_on = \"UPDATE\"\n # elif not self._maintain_queue.empty():\n # message = self._maintain_queue.get()\n # logger.info(f\"Popped off message from Maintain Queue: {message.entry_info}\")\n # self.working_on = \"MAINTAIN\"\n # else:\n # break\n\n # if message.type == 'EXIT':\n # break\n\n # if message.type != 'TASK':\n # raise ValueError(f'{message.type} is not a recognized task type')\n\n # if message.type == 'TASK':\n # try:\n # repos = requests.get('http://{}:{}/api/unstable/dosocs/repos'.format(\n # self.config['broker_host'],self.config['broker_port'])).json()\n\n # for repo in repos:\n # self.generate_value_data(repo['repo_id'], repo['path'])\n\n # self.register_task_completion('value')\n\n # except Exception:\n # # logger.error(\"Worker ran into an error for task: {}\\n\".format(message.entry_info['task']))\n # # logger.error(\"Error encountered: \" + str(e) + \"\\n\")\n # # # traceback.format_exc()\n # # logger.info(\"Notifying broker and logging task failure in database...\\n\")\n\n # logger.exception(f'Worker ran into an error for task {message.entry_info}')\n # self.register_task_failure(message.entry_info['repo_id'],\n # message.entry_info['task']['given']['git_url'])\n\n # # Add to history table\n # task_history = {\n # \"repo_id\": message.entry_info['repo_id'],\n # \"worker\": self.config['id'],\n # \"job_model\": message.entry_info['task']['models'][0],\n # \"oauth_id\": self.config['zombie_id'],\n # \"timestamp\": datetime.datetime.now(),\n # \"status\": \"Error\",\n # \"total_results\": self.results_counter\n # }\n\n # if self.history_id:\n # self.helper_db.execute(self.history_table.update().where(self.history_table.c.history_id==self.history_id).values(task_history))\n # else:\n # r = self.helper_db.execute(self.history_table.insert().values(task_history))\n # self.history_id = r.inserted_primary_key[0]\n\n # logger.info(f\"Recorded job error for: {message.entry_info['task']}\")\n\n # # Update job process table\n # updated_job = {\n # \"since_id_str\": message.entry_info['repo_id'],\n # \"last_count\": self.results_counter,\n # \"last_run\": datetime.datetime.now(),\n # \"analysis_state\": 0\n # }\n # self.helper_db.execute(self.job_table.update().where(self.job_table.c.job_model==message.entry_info['task']['models'][0]).values(updated_job))\n # logger.info(\"Updated job process for model: \" + message.entry_info['task']['models'][0] + \"\\n\")\n\n # # Reset results counter for next task\n # self.results_counter = 0\n # pass", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def collect_data(self):\r\n self.vcp.read(self.vcp.inWaiting())\r\n while True:\r\n data = self.vcp.readline()\r\n data = data.decode(\"ASCII\")\r\n timestamp = \",\" + datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n data_timestamp = data + timestamp\r\n if not self.data_pause:\r\n self.update_anemometer_log.emit(data_timestamp)\r\n if self.stop_timer:\r\n break", "def prometheus_metrics(request):\n if not settings.DEBUG:\n return HttpResponseNotFound()\n\n # DEPRECATED: prometheus_multiproc_dir has been replaced by PROMETHEUS_MULTIPROC_DIR\n if \"PROMETHEUS_MULTIPROC_DIR\" in os.environ or \"prometheus_multiproc_dir\" in os.environ:\n registry = prometheus_client.CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n else:\n registry = prometheus_client.REGISTRY\n metrics_page = prometheus_client.generate_latest(registry)\n return HttpResponse(\n metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST\n )", "async def process_reports(self):\n features = [features for (__, features) in self.updates]\n\n # Faster way to deep flatten a list of lists compared to list comprehension\n feature_dataset = list(chain.from_iterable(features))\n\n # Training the model using all the features received from the client\n sampler = all_inclusive.Sampler(feature_dataset)\n self.algorithm.train(feature_dataset, sampler,\n Config().algorithm.cut_layer)\n\n # Test the updated model\n self.accuracy = self.trainer.test(self.testset)\n logging.info('[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n\n await self.wrap_up_processing_reports()", "def _counter(self):\n while True:\n # ensure counter interval is up to date\n self._read_interval_time()\n\n log.debug(\"SFlowManager._counter: sleeping for %s\", self._counter_interval)\n\n time.sleep(self._counter_interval)\n\n # get a cpu times sample\n res = resource.getrusage(resource.RUSAGE_SELF)\n\n # build and send counter structure\n csample = { 'counter_sample': {\n 'app_name': get_sys_name(),\n 'app_resources': {\n 'user_time': int(res.ru_utime * 1000),\n 'system_time': int(res.ru_stime * 1000),\n 'mem_used': 0, # @TODO\n 'mem_max': res.ru_maxrss * 1024,\n 'fd_open': 0, # @TODO do we care?\n 'fd_max': 0, # @TODO \"\"\n 'conn_open': 0, # @TODO couch/rabbit connection summary somehow\n 'conn_max': 0\n },\n 'app_workers':{\n 'workers_active': len(self._container.proc_manager.proc_sup.children),\n 'workers_idle': 0,\n 'workers_max': 1024,\n 'req_delayed': 0,\n 'req_dropped': 0\n }\n }\n }\n\n log.debug(\"Publishing counter stats: %s\" % csample)\n\n self._publish(csample)", "def _publish_stats(self):\n if self._stat_publish_event is not None:\n self._stat_publish_event.cancel()\n\n topic = LOGGER(subtopic=self._publish_topic + \"/status/cpu\")\n\n points = {}\n\n for k, v in psutil.cpu_times_percent().__dict__.items():\n points['times_percent/' + k] = {'Readings': v,\n 'Units': 'double'}\n\n points['percent'] = {'Readings': psutil.cpu_percent(),\n 'Units': 'double'}\n try:\n self.vip.pubsub.publish('pubsub', topic.format(), message=points)\n\n except Exception as e:\n _log.warn(\"Failed to publish to topic {}\".format(topic.format()))\n finally:\n # The stats publisher publishes both to the local bus and the vc\n # bus the platform specific topics.\n next_update_time = self._next_update_time(\n seconds=self._stats_publish_interval)\n\n self._stats_publish_event = self.core.schedule(\n next_update_time, self._publish_stats)", "def _report_results(self, data_start_time: float, data_end_time: float,\n data_elapsed_time: float, user_cpu: float, sys_cpu: float, extra: dict = None):\n if not self.__is_worker:\n raise Exception(\"_report_results must not be called outside of a worker\")\n answer = {\n 'application': 'clusterbuster-json',\n 'namespace': self._namespace(),\n 'pod': self._podname(),\n 'container': self._container(),\n 'process_id': os.getpid(),\n 'pod_create_time': self.__timing_parameters['controller_crtime'] - self.__timing_parameters['controller_basetime'],\n 'pod_start_time': self.__timing_parameters['start_time'],\n 'data_start_time': data_start_time,\n 'data_end_time': data_end_time,\n 'data_elapsed_time': data_elapsed_time,\n 'user_cpu_time': user_cpu,\n 'system_cpu_time': sys_cpu,\n 'cpu_time': user_cpu + sys_cpu,\n 'timing_parameters': self.__timing_parameters\n }\n if isinstance(extra, dict):\n for key, val in extra.items():\n answer[key] = val\n self._timestamp(f\"Report results: {self._namespace()}, {self._podname()}, {self._container()}, {os.getpid()}\")\n try:\n answer = json.dumps(self._clean_numbers(answer))\n except Exception as exc:\n self.__fail(f\"Cannot convert results to JSON: {exc}\")\n self.__do_sync_command('RSLT', answer)\n self.__reported_results = True", "def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)", "def metrics_group():", "async def process_reports(self):\n await self.aggregate_weights(self.updates)\n\n # Testing the global model accuracy\n if Config().clients.do_test:\n # Compute the average accuracy from client reports\n self.average_accuracy = self.accuracy_averaging(self.updates)\n logging.info(\n '[Server #{:d}] Average client accuracy: {:.2f}%.'.format(\n os.getpid(), 100 * self.average_accuracy))\n\n if hasattr(Config().server, 'do_test'):\n if not Config().clients.do_test or Config().server.do_test:\n # Test the updated model directly at the server\n self.accuracy = self.trainer.test(self.testset)\n logging.info(\n '[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n else:\n self.accuracy = self.average_accuracy\n\n await self.wrap_up_processing_reports()", "def __send_load(self, metrics):\n\n try:\n threading.Thread(target=self.client.send, args=(metrics,)).start()\n except:\n print(\"Error: unable to start thread\")", "def before_request():\n request._prometheus_metrics_request_start_time = time.time()", "def run(self):\n if self.task == 'all':\n self.produce_all_term_data()\n else:\n self.produce_next_term_data()", "def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)", "def stats(self):", "def compute_statistics(self):", "def dispatch(self, host, obj_type, obj_instance, value):\n\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()", "def statsWorker():\n logger.info('STATS: Starting. Will report out every {0:.1g} hours'.format(\n config.STATS_HOURS))\n while True:\n gevent.sleep(timedelta(hours=config.STATS_HOURS).total_seconds())\n logger.info('STATS: {0}'.format(stats))\n stats.resetStats()\n\n return", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def monitor(self):", "def process(self):", "def process(self):", "def process(self):", "def run(self):\r\n counter = 0\r\n counter_increment = 1000 # Reporting frequency\r\n\r\n last_time = 0\r\n \r\n if get_param(\"record_queue_state\"):\r\n # Add event to query queue state.\r\n query_interval = 1\r\n report_queue_state = RecordQueueState(self.servers,\r\n self.stats_manager,\r\n query_interval)\r\n self.event_queue.put((query_interval, report_queue_state))\r\n while len(self.stats_manager.completed_jobs) < self.total_jobs:\r\n assert(not self.event_queue.empty())\r\n current_time, event = self.event_queue.get()\r\n \r\n #if current_time >= 3.0 * get_param(\"total_time\") / 4.0:\r\n # set_param(\"relative_weights\", \"1,2\")\r\n #elif current_time >= 1.0 * get_param(\"total_time\") / 2.0:\r\n # set_param(\"relative_weights\", \"1,4\")\r\n\r\n assert(current_time >= last_time)\r\n last_time = current_time\r\n\r\n if current_time > counter:\r\n counter = counter + counter_increment\r\n new_events = event.run(current_time)\r\n if new_events:\r\n for new_event in new_events:\r\n self.event_queue.put(new_event)\r\n \r\n self.stats_manager.output_stats()\r\n \r\n output_params()", "def compute_metrics(self, results: list) -> dict:", "def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def born(self, data):\n lc = LoopingCall(get_metrics, None)\n lc.start(2)\n reactor.listenUDP(self.UDP_PORT, NotificationUDPProcessor())\n reactor.listenMulticast(self.MULTICAST_PORT,\n MunticastNotificationProcessor(self.MULTICAST_IP), # add multicast 'born' processing\n listenMultiple=True)\n endpoints.serverFromString(reactor, \"tcp:21999\").listen(EchoFactory())", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def __call__(self):\n for resource in self.resources:\n self._evaluate_resource(resource)\n self.perfdata = sorted([p for p in self.perfdata if p])", "def collect_incoming_data(self, data):\n self.l.debug('data -> (%d bytes):\"%s\"', len(data), data)\n self.received_data.append(data)", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def step(self):\r\n self.datacollector.collect(self)\r\n self.datacollector2.collect(self)\r\n self.datacollector3.collect(self)\r\n self.datacollector4.collect(self)\r\n self.datacollector5.collect(self)\r\n self.datacollector6.collect(self)\r\n self.datacollector7.collect(self)\r\n self.datacollector8.collect(self)\r\n self.datacollector9.collect(self)\r\n self.datacollector10.collect(self)\r\n self.datacollector11.collect(self)\r\n self.datacollector12.collect(self)\r\n self.datacollector13.collect(self)\r\n\r\n self.datacollector14.collect(self)\r\n self.datacollector15.collect(self)\r\n self.datacollector16.collect(self)\r\n self.datacollector17.collect(self)\r\n self.datacollector18.collect(self)\r\n self.datacollector19.collect(self)\r\n self.datacollector20.collect(self)\r\n self.datacollector21.collect(self)\r\n self.datacollector22.collect(self)\r\n self.datacollector23.collect(self)\r\n self.datacollector24.collect(self)\r\n self.datacollector25.collect(self)\r\n self.datacollector26.collect(self)\r\n self.schedule.step()", "def _calculate_custom_data(self):\n self.data['vms'] = Vms(self.vms, self.url)", "def runAnalytics():\n #gets OAuth from the API\n analytics = get_Analytics_service()\n #get the object return from the API\n #send that object to print out useful fields\n response = get_report(analytics)\n print_response(response)", "def reporting_data(self):\n\n for publish_key in self.publish_data.get('ifc', {}):\n self._publish(\n '{}.status'.format(publish_key),\n self.publish_data.get('ifc').get(publish_key).get('Status')\n )\n self._publish(\n '{}.link_failure_count'.format(publish_key),\n self.publish_data.get('ifc').get(publish_key).get('Link Failure Count')\n )\n self._publish(\n '{}.active'.format(publish_key),\n self.publish_data.get('ifc').get(publish_key).get('Active')\n )\n self._publish(\n '{}.lldp_stats_unavailable'.format(publish_key),\n self.publish_data.get('ifc').get(publish_key).get('lldp_stats_unavailable')\n )\n\n for publish_key in self.publish_data.get('rules', {}):\n self._publish(\n 'mismatch.{}'.format(publish_key),\n self.publish_data.get('rules').get(publish_key)\n )\n\n self._publish('mismatch.bond', self.publish_data.get('mismatch_bond'))", "def store_info(self):\r\n _debug('Protocol: store_info' ) \r\n \r\n #Times\r\n if self.measure_type == '3PL':\r\n self.t_probe_p_s .append(self.t_probe) \r\n self.t_probe_m_s .append(self.t_probe) \r\n if self.measure_type == '4PL':\r\n self.t_probe_p_s .append(self.tp) \r\n self.t_probe_m_s .append(self.tm) \r\n \r\n self.t_pulseSequences_s.append(self.t_pulseSequences)\r\n self.t_process_s .append(self.t_process)\r\n #Total, accumulated, times\r\n self.t_tot_pulseSequences_s.append(self.t_tot_pulseSequences) \r\n self.t_tot_process_s .append(self.t_tot_process) \r\n #Rates\r\n self.Gp_guess_s .append(self.Gp_guess) #Mean of gamma+ \r\n self.Gm_guess_s .append(self.Gm_guess) #Mean of gamma- \r\n self.eGp_guess_s .append(self.eGp_guess) #Uncertainty of gamma+\r\n self.eGm_guess_s .append(self.eGm_guess) #Uncertainty of gamma- \r\n self.cov_GpGm_s .append(self.cov_GpGm) #Covariance of gamma- & gamma- \r\n #Other\r\n self.nb_iteration_s.append(self.iter)\r\n self.R_tot_s .append(self.R_tot)", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def process(self):\n pass", "def on_received_report(self, data):\n nodename = data['nodename']\n\n self.timeout_intervals[nodename] = data['reporting_interval']\n self.last_reports[nodename] = time.time()" ]
[ "0.74755186", "0.72660923", "0.69965476", "0.68536365", "0.67639965", "0.66526055", "0.6617369", "0.63650906", "0.62632084", "0.6253866", "0.62457424", "0.62276083", "0.6208388", "0.61819595", "0.61701816", "0.61203456", "0.611145", "0.6091151", "0.60695875", "0.60637254", "0.6059021", "0.60576147", "0.60551256", "0.60533607", "0.59631306", "0.5947896", "0.5933082", "0.5932966", "0.5930394", "0.5929771", "0.58881336", "0.58820516", "0.5853273", "0.58045846", "0.5794309", "0.5788106", "0.5786485", "0.57801986", "0.57801986", "0.5775196", "0.57644016", "0.57319766", "0.5725396", "0.5719516", "0.57133055", "0.56841403", "0.56826323", "0.56729573", "0.56729573", "0.5670126", "0.5665863", "0.56577337", "0.5648357", "0.5618197", "0.56158173", "0.5606474", "0.5605424", "0.56027406", "0.55937964", "0.5589997", "0.55822366", "0.5579817", "0.5576488", "0.557435", "0.5568567", "0.55603623", "0.5548537", "0.55483824", "0.5545339", "0.55219024", "0.55167806", "0.5515582", "0.5506002", "0.54986537", "0.5493165", "0.5492465", "0.5491302", "0.54903394", "0.54894876", "0.54870504", "0.54693437", "0.5466702", "0.5466702", "0.5466702", "0.5460441", "0.5453567", "0.5452645", "0.54398435", "0.5437482", "0.54364884", "0.5435658", "0.5430475", "0.5430074", "0.5426355", "0.5421683", "0.541973", "0.54140204", "0.5413559", "0.53914225", "0.5381713", "0.5377507" ]
0.0
-1
requires class model name
def __init__(self, model, idx=0, seed=None): self.__logger.info("Synthesizer init") self.__logger.debug("DEBUG Message") self.fake = Faker(seed) # First initialization of Faker self.__reccntr = idx # ?? Unknown variable self.add_providers() # Add providers to the faker self.schema = [] self.is_dependent = [] for field in model.info.schema.info.fields: self.schema.append(field.name) if field.info.aux.dependent == "": self.is_dependent.append(False) else: self.is_dependent.append(True) # Cache the generator functions once self.generator_fcns = {} self.set_generators_from_proto(model) # Following extension for generating duplicate records self.__dupcntr = 0 self.__maxdup = 0 self.__dupdist = [] # List of duplicate counts self._original = [] self.duplicate = False self._expect_duplicate = False self.nduplicate_weights = None self.wrg = None self.mod = None # Generator counters/stats self.stats = {"Total": 0, "Original": 0, "Duplicate": 0} # self.h_dupdist = Histogram1D(range(10)) if model.info.aux.HasField("duplicate"): self.duplicate = True self.duplicate_cfg = dict() self.duplicate_cfg["Prob_duplicate"] = model.info.aux.duplicate.probability self.duplicate_cfg["Dist_duplicate"] = model.info.aux.duplicate.distribution self.duplicate_cfg["Max_duplicate"] = model.info.aux.duplicate.maximum self.nduplicate_weights = self.generate_duplicate_pdf() if model.info.aux.HasField("record_modifier"): self.mod = Modifier( self.fake, self.generator_fcns, self.schema, model.info.aux.record_modifier, ) self.__logger.info("") self.__logger.info("Synthesizer configured") self.__logger.info("Model: %s" % model) self.__logger.info("Schema:") self.__logger.info(pformat(self.schema)) self.__logger.info("Dataset record index: %d" % idx) if seed: self.__logger.info("Seed set: %d" % seed) self.__logger.info("Generate duplicate records:") self.__logger.info(pformat(self.duplicate)) if self.duplicate: self.__logger.info("Duplicate record probabilities") self.__logger.info(pformat(self.duplicate_cfg)) self.__logger.info("Duplicate PDF") self.__logger.info(pformat(self.nduplicate_weights)) self.__logger.info("Record modifier configuration") self.__logger.info(model.info.aux.record_modifier)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modelClass(self):\n raise NotImplementedError", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def model(self) -> str:\n ...", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def find_model_using_name(model_name):\n model_filename = \"models.\" + model_name + \"_model\"\n modellib = importlib.import_module(model_filename)\n model = None\n target_model_name = model_name.replace('_', '') + 'model'\n for name, cls in modellib.__dict__.items():\n if name.lower() == target_model_name.lower() \\\n and issubclass(cls, BaseModel):\n model = cls\n\n if model is None:\n print(\"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.\" % (model_filename, target_model_name))\n exit(0)\n\n return model", "def ClassForModel(cls, namespace, name):\n try:\n return cls.schema[namespace][name]\n except KeyError:\n raise BadModelError(\"There is no Model with name: %s\" % name)", "def get_model(name):\n # Evil reflection\n model_name = name.lower()\n model_module = importlib.import_module('.'+model_name, cfg.model_pck)\n [(_, model_class)] = inspect.getmembers(\n model_module,\n lambda c: inspect.isclass(c) and sys.modules[c.__module__] == model_module)\n\n tf.logging.debug('Found class %s', model_class)\n return model_class", "def __init__(self, model: object):\n self.model = model", "def load_model(self) -> Any:", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def set_model(self, model_name):\n pass", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model):\n self.model = model", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model", "def model(self) -> Type[Model]:", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model: Type[ModelType]):\n self.model = model", "def __init__(self, model):\n\t\tself.model = model", "def MakeModel(self):\n pass", "def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False", "def create_model(self):\n pass", "def create_model(self):\n pass", "def get_model(model=gin.REQUIRED):\n return model", "def model() -> Model:\n return Model()", "def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)", "def get_model(model_name: str, *args, **kwargs):\n try:\n if '.' in model_name:\n module_name, class_name = model_name.rsplit('.', 1)\n else:\n module_name = model_name\n class_name = model_name.capitalize().replace(\"_\",\"\")\n\n model_module = import_module('.' + module_name, package='models')\n\n model_class = getattr(model_module, class_name)\n\n instance = model_class(*args, **kwargs)\n\n except (AttributeError, ModuleNotFoundError):\n raise ImportError('{} is not part of our model/architecture collection.'.format(model_name))\n else:\n if not issubclass(model_class, Model):\n raise ImportError(\"{} is not a valid model/architecture.\".format(model_class))\n\n return instance", "def load_model(self):\n pass", "def test_class_attribute():\n assert isinstance(ResRNNModel.model_name, str)\n assert ResRNNModel.model_name == 'res-RNN'\n assert ResRNNModel.file_name == 'model-{}.pt'", "def do_create(self, argv):\n if argv in self.__names:\n new_instance = self.__names[argv]()\n new_instance.save()\n print(\"{}\".format(new_instance.id))\n elif len(argv) is 0:\n print(\"** class name missing **\")\n elif argv is not \"BaseModel\":\n print(\"** class doesn't exist **\")", "def get_model(*args):\n return Model()", "def __str__(self):\n return f\"model {self._name}\"", "def make_model(name):\n module_path = '{0}.{1}'.format(matchers.__name__, name)\n module = __import__(module_path, fromlist=[''])\n classes = inspect.getmembers(module, inspect.isclass)\n classes = [c for c in classes if c[1].__module__ == module_path]\n classes = [c[1] for c in classes if c[0].lower() == name.lower()]\n assert len(classes) == 1\n return classes[0]", "def check_class_definition(cls):\n super().check_class_definition()\n\n if not cls.model:\n cls.definition_error('Must provide \"model\" attribute.')", "def classkey (self):\n return 'rmodel:%s' % (self.__class__.__name__.lower ())", "def test_model_name_type(self):\n \n model_name = get_model()[0]\n \n # Check to make sure the returned value is a string\n self.assertEqual(type(model_name), str)", "def __init__(self, model):\n self._model = model", "def do_create(self, arg):\n if not arg:\n print('** class name missing **')\n return\n args = arg.split(\" \")\n if args[0] not in self.__classes:\n print(\"** class doesn't exist **\")\n else:\n obj = eval(args[0])()\n obj.save()\n print(obj.id)", "def by_name(self, name):\n\n query = self.session.query(self.model_class)\n query = query.filter(self.model_class.name == name)\n return query.first()", "def do_create(self, line):\n if line:\n l = line.split()\n my_model = BaseModel()\n my_model.name = l[0]\n my_model.save()\n print(my_model.id)\n else:\n print(\"** class name missing **\")", "def test_model_class(self):\n db = Alchy(self.app)\n\n self.assertEquals(\n db.Model.__dict__['__init__'], alchy.model.ModelBase.__init__)\n self.assertIsInstance(\n db.Model.__dict__['query'], alchy.query.QueryProperty)", "def get_model_class(model_name, task_name):\n if task_name == 'rocstories':\n return OpenAIGPTDoubleHeadsModel if model_name == 'openai-gpt' else GPT2DoubleHeadsModel\n else:\n return OpenAIGPTLMHeadModel if model_name == 'openai-gpt' else GPT2LMHeadModel", "def create_models( self ):", "def importModel(model_name):\n module_path = os.path.join(path, \"models\")\n module_path = os.path.join(module_path, model_name + \".py\")\n model = importClass(model_name, model_name, module_path)\n return model", "def model_info():\n pass", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def get_model(name, **model_args):\n module = importlib.import_module('.' + name, 'models')\n return module.build_model(**model_args)", "def test_noarguments(self):\n self.assertEqual(BaseModel, type(BaseModel()))", "def get_model(self):\n\t\treturn self.object.__class__", "def get_model_class(model,baseclass=None):\n from inspect import isclass\n\n if isinstance(model,basestring):\n if model not in __model_registry:\n res = __model_registry[model.lower()]\n else:\n res = __model_registry[model]\n elif isclass(model):\n if issubclass(model,ParametricModel):\n res = model\n else:\n raise TypeError('class object is not a Model')\n elif issubclass(model.__class__,ParametricModel):\n res = model.__class__\n else:\n raise TypeError('attempted to get invalid model')\n\n if baseclass:\n if not issubclass(res,baseclass):\n raise TypeError('%s is not a subclass of %s'%(res,baseclass))\n\n return res", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def model_definition(self):\n pass", "def get_model():\n return UNISAL", "def get_model_name(self) -> str:\n raise NotImplementedError", "def retrieve_model(self, model_name):\n\t\tmodel_detail = dbop.get_model(self, model_name)\n\t\t#since the 'owner' field of model_detail is only owner's username,\n\t\t#we have to change it to a User object\n\t\t#In this case, the owner of this model is the user itself\n\t\tmodel_detail['owner'] = self\n\t\tif model_detail['model_type'] == 'SPSS Predictive Model':\n\t\t\treturn model.SPSSModel(**model_detail)\n\t\telif model_detail['model_type'] == 'DashDB In-database Model':\n\t\t\treturn model.DashdbModel(**model_detail)", "def class_path(model, variables):\n return None", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def prepare_model(self, **kwargs):\n pass", "def __class_validation(cls):\n\n # check if this class is a subClass of Model\n if not issubclass(cls, db.Model):\n raise AttributeError(cls.__name__ + \" is not subclass of \" + db.Model.__name__)", "def get_model(self, name):\n bundle_name, model_name = name.split(\".\")\n bundle = self.bundles[bundle_name]\n model = bundle.models[name]\n return model", "def test_override_model_class(self):\n class MyModelBase(object):\n def testing(self):\n return 'testing'\n\n Model = declarative_base(cls=MyModelBase)\n\n class Foo(Model):\n __tablename__ = 'foo'\n _id = Column(types.Integer(), primary_key=True)\n name = Column(types.String())\n\n db = Alchy(self.app, Model=Model)\n\n self.assertTrue(issubclass(db.Model, MyModelBase),\n 'db.Model should be a subclass of MyModelBase')\n\n db.create_all()\n\n self.assertEquals(db.session.query(Foo).all(), Foo.query.all(),\n 'Model classes should have a query property')\n\n record = Foo(name='Name')\n\n self.assertEquals(record.testing(), 'testing')", "def get_model_class(class_name, kwargs={}):\n # , Perceptron, PassiveAggressiveRegressor\n # , NuSVR, LinearSVR\n\n if class_name == 'LinearRegression':\n from sklearn.linear_model import LinearRegression\n return LinearRegression(**kwargs)\n\n if class_name == 'SGDRegressor':\n from sklearn.linear_model import SGDRegressor\n return SGDRegressor(**kwargs)\n\n if class_name == 'SVR':\n from sklearn.svm import SVR\n return SVR(**kwargs)\n\n if class_name == 'DecisionTreeRegressor':\n from sklearn.tree import DecisionTreeRegressor\n return DecisionTreeRegressor(**kwargs)\n\n if class_name == 'ExtraTreesRegressor':\n from sklearn.ensemble import ExtraTreesRegressor\n return ExtraTreesRegressor(**kwargs)\n\n if class_name == 'KNeighborsRegressor':\n from sklearn.neighbors import KNeighborsRegressor\n return KNeighborsRegressor(**kwargs)\n\n if class_name == 'MLPRegressor':\n from sklearn.neural_network import MLPRegressor\n return MLPRegressor(**kwargs)\n\n raise Exception(\"Unknown Model class\")", "def init_model(self):\n pass", "def set_model(*, name: str, model: typing.Type) -> None:\n setattr(open_alchemy.models, name, model)", "def test_get_model(self) -> None:\n get_model()", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def get_model_type(self):\n pass", "def __getattr__(self, name):\n return getattr(self.model, name)", "def __init__(self, name, *model_args):\n assert name != None\n self.name = name\n self._models = {}\n self._parent = None #Model instance\n self._loader = None\n self._loaded = True\n for model_arg in model_args:\n m = self.model_class(**model_arg)\n self.add(m)", "def describe_model(ModelName=None):\n pass", "def _get_model_by_name(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['model_name']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)", "def test_model_import(self, iris_astore):\n model = register_model(\n iris_astore, self.MODEL_NAME, self.PROJECT_NAME, force=True\n )\n\n assert self.MODEL_NAME == model.name", "def kind_to_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def __init__(self, model: str, **kwargs):\n\n super().__init__(model=model, **kwargs)\n logger.info('load model done')", "def get_class(self, name):\n raise NotImplementedError", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def _default_make_sa_model(model):\n name = model._meta.object_name + \".__aldjemy__\"\n return type(name, (), {\"__module__\": model.__module__})", "def get_model(self, full_name: Union[Type[Model], str], reference_model: Type[Model]):\n\n if isinstance(full_name, str):\n name_parts = full_name.split(\".\")\n if len(name_parts) == 1:\n return self.get_app_model(reference_model._meta.app_label, full_name)\n\n elif len(name_parts) == 2:\n return self.get_app_model(*name_parts)\n\n else:\n raise ConfigurationError('Model name needs to be in format \"app.Model\" or \"Model\"')\n\n elif inspect.isclass(full_name) and issubclass(full_name, Model):\n return full_name\n\n else:\n raise TypeError(f\"Cannot get model from {full_name}. Invalid type.\")", "def build_model(name, **model_params):\n assert name in globals().keys(),\\\n \"%s must be a model imported/defined in models/__init__.py\" % name\n return globals()[name](**model_params)", "def checkModel(self, model):\n # TODO", "def _create_model(self, key):\n pass", "def load_model(name, input_node):\n # Find the model class from its name\n all_models = models.get_models()\n net_class = [model for model in all_models if model.__name__ == name][0]\n\n # Construct and return the model\n return net_class({'data': input_node})", "def is_model(self):\n return self.model_name() is not None", "def do_create(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n else:\n instance = cls()\n models.storage.save()\n print(instance.id)", "def _get_model_from_table_name(table_name: str) -> Optional[Type[RDSModel]]:\n table_model = None\n try:\n if hasattr(Base, '_decl_class_registry'):\n models = Base._decl_class_registry.values() # sqlalchemy < 1.4\n else:\n models = Base.registry._class_registry.values()\n\n for model in models:\n if hasattr(model, '__tablename__') and model.__tablename__ == table_name:\n table_model = model\n except Exception as e:\n LOGGER.exception(f'Failed to get model for the table: {table_name} from rds model base')\n raise e\n\n return table_model", "def test_class_name(self):\n r = Review()\n r_dictionary = r.to_dict()\n self.assertIn('__class__', r_dictionary)", "def load_model(self, filename):\r\n pass", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model", "def real_model(request):\n return request.config.option.real_model", "def model_name(self, model_name: str):\n\n self._model_name = model_name", "def __init__(self, name, model_instance=None):\n self.cache = self._get_cache_instance()\n self.name = name\n self.model_instance = model_instance\n if self.model_instance:\n self.content_type = ContentType.objects.get_for_model(\n self.model_instance)" ]
[ "0.7560292", "0.6874529", "0.684144", "0.6818532", "0.68008655", "0.6774673", "0.67614686", "0.67368454", "0.6727387", "0.6721594", "0.6721594", "0.6721594", "0.6721594", "0.6721594", "0.6679806", "0.66507953", "0.66507953", "0.66507953", "0.66507953", "0.6616141", "0.65815794", "0.65416056", "0.6533458", "0.65214276", "0.65214276", "0.6515085", "0.649929", "0.6478649", "0.6429597", "0.6426744", "0.64206934", "0.64206934", "0.6417267", "0.6414934", "0.6406952", "0.6404933", "0.6399281", "0.63824683", "0.6378182", "0.6349488", "0.63488436", "0.63203216", "0.63162845", "0.6313436", "0.62969106", "0.6283336", "0.62731624", "0.62715876", "0.6255247", "0.62277013", "0.61535436", "0.61164176", "0.6115786", "0.61076987", "0.6103691", "0.6103691", "0.6099387", "0.60975", "0.608445", "0.6082243", "0.6076501", "0.6074577", "0.6074332", "0.60702646", "0.6064729", "0.6040726", "0.60253245", "0.6016199", "0.601463", "0.601107", "0.60095435", "0.60055864", "0.60014856", "0.5991935", "0.59891355", "0.5988128", "0.59718996", "0.5964654", "0.59565634", "0.5956018", "0.59444875", "0.59317565", "0.59292215", "0.5928449", "0.59241694", "0.5916522", "0.5914503", "0.5910774", "0.59035325", "0.5886053", "0.58854926", "0.58786464", "0.5873381", "0.5862899", "0.5862238", "0.58523667", "0.5847388", "0.58439434", "0.5843639", "0.5829242", "0.58227235" ]
0.0
-1
This method swaps out the numpy instance in the module, should it have one, to the one in the fake instance we have here.
def _swap_numpy(self, module): # Check to make sure this is not one of the string options from the YAML if not isinstance(module, str): if hasattr(module, 'numpy'): # Check if it has a self.numpy object # TODO: Replace this with the correct variable module.numpy = self.fake.numpy # Swap out with the class's instance of numpy return module # Return out the mutated module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def test_inplace_set_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n shp = (100/4,1024)#100KB\r\n\r\n x = numpy.zeros(shp, dtype=dtype)\r\n x = self.cast_value(x)\r\n x_shared = self.shared_constructor(x, borrow=True)\r\n\r\n old_data = x_shared.container.storage[0]\r\n nd = numpy.ones(shp, dtype=dtype)\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n x_shared.container.value[:] = nd\r\n assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n nd[0]+=1\r\n x_shared.container.value[0] = nd[0]\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n nd += 1\r\n #THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray\r\n x_shared.get_value(borrow=True)[:] = nd\r\n #assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n x_shared.get_value(borrow=True)\r\n\r\n # Test by set_value with borrow=False\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd, borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=False when new data cast.\r\n # specificaly useful for gpu data\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd), borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace\r\n\r\n # Test by set_value with borrow=True\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd.copy(), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=True when new data cast.\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd.copy()), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace", "def test_Numpy_import(benchmark):\n\n def Benchmark():\n import numpy as np\n a = np.ndarray(1)\n del a\n\n benchmark(Benchmark)", "def set_value(self, new_value, borrow=False):\n new_value = np.array(new_value, copy = not borrow)\n try:\n if self.shape != new_value.shape:\n self.resize(new_value.shape, refcheck=False)\n # refcheck is necessary to get this to work, but bypasses\n # the reference checks. Reference errors might occur if\n # a reference to this ShimmedTensorShared variable exists elsewhere,\n # and we try to access it after the resize. This is the kind\n # of thing you shouldn't do anyway with Theano variables.\n self[:] = new_value\n except IndexError:\n # Scalars will fail on the above\n assert(isscalar(new_value))\n # np.isscalar will fail on 0-dim arrays; isscalar works\n self = super(ShimmedTensorShared, self).__setitem__(None, new_value)", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def numpy(self):\n for key, value in self.__dict__.items():\n self.__dict__[key] = value.numpy()\n return self", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def __setstate__(self, state):\n shape = state['_SharedNumpyArray__np_array'].shape\n dtype = state['_SharedNumpyArray__np_array'].dtype\n type_id = np_type_id_to_ctypes(dtype)\n self.__shared = RawArray(type_id, np.product(shape))\n self.__np_array = np.frombuffer(self.__shared, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array, state['_SharedNumpyArray__np_array'])\n self.tag = None", "def reset(self) -> np.array:\n raise NotImplementedError", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr", "def copy(self):\n obj = type(self)(self.a_n[:], domain=self.domain, name=self.name)\n if isinstance(obj.a_n, np.ndarray):\n obj.a_n = obj.a_n.copy()\n return obj", "def __setstate__(self, state):\n shape = state['_DoubleBufferedSharedNumpyArray__np_array1'].shape\n dtype = state['_DoubleBufferedSharedNumpyArray__np_array1'].dtype\n type_id = np_type_id_to_ctypes(dtype)\n self.__shared1 = RawArray(type_id, np.product(shape))\n self.__np_array1 = np.frombuffer(self.__shared1, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array1, state['_DoubleBufferedSharedNumpyArray__np_array1'])\n self.__shared2 = RawArray(type_id, np.product(shape))\n self.__np_array2 = np.frombuffer(self.__shared2, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array2, state['_DoubleBufferedSharedNumpyArray__np_array2'])\n self.__parity = state['_DoubleBufferedSharedNumpyArray__parity']", "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def tearDown(self):\n\n def reset_module(name, module):\n if module:\n sys.modules[name] = module\n else:\n sys.modules.pop(name, None)\n reset_module('simplejson', self.simplejson)\n reset_module('json', self.json)\n reload(protojson)", "def test_pickle():\r\n M = Module()\r\n M.x = (T.dmatrix())\r\n M.y = (T.dmatrix())\r\n a = T.dmatrix()\r\n M.f = Method([a], a + M.x + M.y)\r\n M.g = Method([a], a * M.x * M.y)\r\n\r\n mode = get_mode()\r\n m = M.make(x=numpy.zeros((4,5)), y=numpy.ones((2,3)), mode=mode)\r\n\r\n m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1))\r\n\r\n assert numpy.all(m.x == m_dup.x) and numpy.all(m.y == m_dup.y)\r\n\r\n m_dup.x[0,0] = 3.142\r\n assert m_dup.f.input_storage[1].data[0,0] == 3.142\r\n assert m.x[0,0] == 0.0 #ensure that m is not aliased to m_dup\r\n\r\n #check that the unpickled version has the same argument/property aliasing\r\n assert m_dup.x is m_dup.f.input_storage[1].data\r\n assert m_dup.y is m_dup.f.input_storage[2].data\r\n assert m_dup.x is m_dup.g.input_storage[1].data\r\n assert m_dup.y is m_dup.g.input_storage[2].data", "def __init__(self, size, orig=None):\n if not cArray.cModule:\n cArray.cModule=ctypes.cdll.LoadLibrary(\"./arraylib.so\")\n #Arg & return types must be said explicitly, otherwise we are gonna get seg. faults when dealing with pointers.\n #pointers of 64 bit machines are longlong, if treated as int, they are truncated => seg. fault\n cArray.cModule.reserve_array.restype = ctypes.c_longlong\n cArray.cModule.reserve_array.argtypes = [ctypes.c_int]\n cArray.cModule.free_array.argtypes = [ctypes.c_longlong]\n cArray.cModule.and_array.argtypes = [ctypes.c_longlong,ctypes.c_longlong,ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.or_array.argtypes = [ctypes.c_longlong,ctypes.c_longlong,ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.not_array.argtypes = [ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.get_element.argtypes = [ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.set_element.argtypes = [ctypes.c_longlong,ctypes.c_int,ctypes.c_int]\n \n self.size=size\n self.arrayRef=cArray.cModule.reserve_array(ctypes.c_int(self.size))\n self.myCModule=cArray.cModule #on the destructor, cArray can not be accesed anymore, hence the object should store a ref to this.\n if orig != None:\n for i in range(size):\n self.__setitem__(i,orig[i])", "def load_build(self):\r\n Unpickler.load_build(self)\r\n if isinstance(self.stack[-1], NDArrayWrapper):\r\n if self.np is None:\r\n raise ImportError('Trying to unpickle an ndarray, '\r\n \"but numpy didn't import correctly\")\r\n nd_array_wrapper = self.stack.pop()\r\n array = nd_array_wrapper.read(self)\r\n self.stack.append(array)", "def _repackage_hidden(h: nd.NDArray):\n return h.detach()", "def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")", "def _reset(self, env_id: np.ndarray) -> None:", "def restore_via_init(objt: _ty.Type[MyArray]) -> Restorer[BaseArray, MyArray]:\n return objt", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def test_array_cache(self):\n cache = array_handler.ArrayCache()\n # Test if len works.\n self.assertEqual(len(cache), 0)\n arr = numpy.zeros(100, float)\n varr = vtk.vtkFloatArray()\n # test contains\n self.assertEqual(varr not in cache, True)\n cache.add(varr, arr)\n self.assertEqual(len(cache), 1)\n self.assertEqual(varr in cache, True)\n \n # Test the get method.\n self.assertEqual(cache.get(varr) is arr, True)\n\n # Test if the cache is cleared when the array is deleted.\n del varr\n self.assertEqual(len(cache), 0)", "def __array_wrap__(self, result, **kwargs):\n\n return self.__class__(result, self.shape)", "def set_value(self, value, borrow=False):\r\n if not borrow:\r\n #TODO: check for cuda_ndarray type\r\n if not isinstance(value, numpy.ndarray):\r\n # in case this is a cuda_ndarray, we copy it\r\n value = copy.deepcopy(value)\r\n self.container.value = value # this will copy a numpy ndarray\r", "def reset(self) -> List[int]:\n self.array = deepcopy(self.original)\n return self.array", "def copy(self, old):\n self.h = old.h\n self.L_h = old.L_h\n\n self.d = np.arange(1,self.L_h+1)\n\n self.it = old.it\n self.N_first = old.N_first\n self.la = old.la\n self.a = old.a\n self.e = np.copy(old.e)\n self.e2 = old.e2\n\n self.P = old.P\n self.alpha_g = np.copy(old.alpha_g)\n self.A = np.copy(old.A)\n self.sigma2 = old.sigma2\n self.mu = np.copy(old.mu)\n self.R = np.copy(old.R)\n\n self.b = np.copy(old.mu)\n self.w = np.copy(old.w)\n self.pie = np.copy(old.pie)\n self.pi = np.copy(old.pi)\n self.p = np.copy(old.p)\n\n self.mu_pad = np.copy(old.mu_pad)\n self.M_mu = np.copy(old.M_mu)\n self.R_pad = np.copy(old.R_pad)\n #self.M_R = np.copy(old.M_R)\n\n self.half_pie_var = np.copy(old.half_pie_var)\n self.half_pie_var_pad = np.copy(old.half_pie_var_pad)\n self.M_half_pie_var_pad = np.copy(old.M_half_pie_var_pad)\n self.pie_var = np.copy(old.pie_var)\n\n self.rev_A = np.copy(old.rev_A)\n\n self.LP = old.LP\n self.LP_list = old.LP_list\n self.la_list = old.la_list\n self.a_list = old.a_list\n self.sigma2_list = old.sigma2_list\n self.A_list = old.A_list", "def _numpy_transform(fqdn, value):\n import numpy\n return _package_transform(numpy, fqdn, value)", "def reset(self):\r\n self.A = np.zeros(self.A.shape)", "def test_setitem_rightvalue_ndarray_fails():\n a = numpy.arange(3 * 4 * 5)\n a.resize((3, 4, 5))\n a = theano._asarray(a, dtype='float32')\n _a = cuda_ndarray.CudaNdarray(a)\n\n b = theano._asarray([7, 8, 9, 10], dtype='float32')\n _b = cuda_ndarray.CudaNdarray(b)\n b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')\n _b5 = cuda_ndarray.CudaNdarray(b)\n\n # attempt to assign the ndarray b with setitem\n _a[:, :, 1] = _b\n a[:, :, 1] = b\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # test direct transfert from numpy to contiguous region\n # attempt to assign the ndarray b with setitem\n # same number of dim\n mat = numpy.random.rand(4, 5).astype('float32')\n _a[2, :, :] = mat\n a[2, :, :] = mat\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # without same number of dim\n try:\n _a[0, :, :] = mat\n #a[0, :, :] = mat\n #assert numpy.allclose(numpy.asarray(_a), a)\n except ValueError as e:\n pass\n\n # test direct transfert from numpy with broadcast\n _a[0, :, :] = b5\n a[0, :, :] = b5\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # test direct transfert from numpy to not contiguous region\n # attempt to assign the ndarray b with setitem\n _a[:, :, 2] = b\n a[:, :, 2] = b\n assert numpy.allclose(numpy.asarray(_a), a)", "def test_setitem_rightvalue_ndarray_fails():\r\n a = numpy.arange(3 * 4 * 5)\r\n a.resize((3, 4, 5))\r\n a = theano._asarray(a, dtype='float32')\r\n _a = cuda_ndarray.CudaNdarray(a)\r\n\r\n b = theano._asarray([7, 8, 9, 10], dtype='float32')\r\n _b = cuda_ndarray.CudaNdarray(b)\r\n b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')\r\n _b5 = cuda_ndarray.CudaNdarray(b)\r\n\r\n # attempt to assign the ndarray b with setitem\r\n _a[:, :, 1] = _b\r\n a[:, :, 1] = b\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n #test direct transfert from numpy to contiguous region\r\n # attempt to assign the ndarray b with setitem\r\n # same number of dim\r\n mat = numpy.random.rand(4, 5).astype('float32')\r\n _a[2, :, :] = mat\r\n a[2, :, :] = mat\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n # without same number of dim\r\n try:\r\n _a[0, :, :] = mat\r\n #a[0, :, :] = mat\r\n #assert numpy.allclose(numpy.asarray(_a), a)\r\n except ValueError, e:\r\n pass\r\n\r\n #test direct transfert from numpy with broadcast\r\n _a[0, :, :] = b5\r\n a[0, :, :] = b5\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n #test direct transfert from numpy to not contiguous region\r\n # attempt to assign the ndarray b with setitem\r\n _a[:, :, 2] = b\r\n a[:, :, 2] = b\r\n assert numpy.allclose(numpy.asarray(_a), a)", "def __init__(self, strict=True):\n self.strict = strict\n self.testwithoutnp = test_without_numpy()", "def _setModule(self, module):\n self._module = module\n # copy the original module for exploration\n self.n_values = deepcopy(module)\n self.n_values._params[:] = 0", "def _reset_module(m):\n raise NotImplementedError", "def __init__(self):\n self._array = None", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def _resize(self, cap): # nonpublic utitity\n B = self._make_array(cap) # new (bigger) array\n for k in range(self._size): # for each existing value\n B[k] = self._Array[k]\n self._Array = B # use the bigger array\n self._capacity = cap", "def unpatch(cls):\n xml.dom.minidom.Element = cls._original_element", "def __array_interface__(self):\n ...", "def __array_interface__(self):\n ...", "def test_get_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n x_orig = numpy.asarray(rng.uniform(0,1,[2,4]),dtype=dtype)\r\n x_cast = self.cast_value(x_orig)\r\n if self.shared_constructor_accept_ndarray:\r\n x_shared = self.shared_constructor(x_orig, borrow = False)\r\n assert isinstance(x_shared.get_value(), x_orig.__class__)\r\n\r\n x_shared = self.shared_constructor(x_cast, borrow = False)\r\n assert isinstance(x_shared.get_value(), x_cast.__class__)", "def _patch_implementation(self, original, *args, **kwargs):\n pass", "def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data", "def tearDown(self):\n builtins.__import__ = self.original_imports", "def reset_state(self):\n self.s = np.copy(self.s_i)", "def ma2np(self):\n try:\n self.mask = self.Zarr.mask\n self.Zarr = ma.getdata(self.Zarr)\n except: print 'Data array is already numpy array'\n return", "def get_array_module(arr):\n # TODO: also check for __array_interface__ attribute and not\n # __cuda_array_interface__?\n if have_cupy:\n return cupy.get_array_module(arr)\n else:\n return np", "def __getstate__(self):\n state = self.__dict__.copy()\n del state['_DoubleBufferedSharedNumpyArray__shared1']\n del state['_DoubleBufferedSharedNumpyArray__shared2']\n return state", "def clone(self):", "def copy(self, other):\n assert isinstance(other, Xray)\n try:\n self.time = np.copy(other.time)\n self.Pbrem = np.copy(other.Pbrem)\n self.bang = np.copy(other.bang)\n except:\n pass", "def __init__(self, orig):\n self.orig = orig", "def _copy_from_NdArray(vecObj, NdArray):\n vecObj.getNdArray()[:] = NdArray\n return", "def with_numpy(func):\r\n return func", "def reindex(self):\n if self.channels is None:\n return\n\n self.data = None\n\n keep_indices = self.channels.new_indices_in_old()\n self.channels.reindex()\n\n if self.parms is not None:\n self.parms = self.integration.get_dependents(\n self.get_config_name())\n\n channel_attributes = self.channel_dependent_attributes\n\n for attribute, value in self.__dict__.items():\n if attribute not in channel_attributes:\n continue\n if not isinstance(value, np.ndarray):\n continue\n setattr(self, attribute, value[keep_indices])", "def reset(self):\n rich_obs = super(ColoredCostmapRandomAisleTurnEnv, self).reset()\n obs = rich_obs.costmap.get_data() # pylint: disable=no-member\n obs = np.expand_dims(obs, -1)\n return obs", "def clone_rand(self):", "def create_new_array(self,shape=(26,26),replace=False,replace_with=-1):\r\n if replace==False:\r\n return np.zeros(shape)\r\n else:\r\n array=np.zeros(shape)\r\n for i in range(0,len(array)):\r\n array[i]=replace_with\r\n return array", "def clone_zero(self):", "def reset(self):\n self.mat = np.zeros(9).reshape(3,3).astype(np.int32)\n return self.mat", "def build_remotely(new_args):\n log.debug(\"Building StorageNumpy object with %s\", new_args)\n return StorageNumpy(new_args.storage_id)", "def wrap(b):\n if b is None:\n return b\n b = torch.stack(b, 0).t().contiguous()\n if self.cuda:\n b = b.cuda()\n # b = Variable(b, volatile=self.volatile)\n b = Variable(b)\n return b", "def __del__(self) -> None:\n if not self._is_from_numpy_array and image_utils:\n # __del__ can be executed during interpreter shutdown, therefore\n # image_utils may not be available.\n # See https://docs.python.org/3/reference/datamodel.html#object.__del__\n image_utils.image_data_free(self._image_data)", "def originalData(self): \n self.__exampleIndices = array(list(range(0, self.__numExamples)))", "def set_doctest_env(doctest_namespace):\n doctest_namespace['numpy'] = numpy", "def original(self) -> Any:\n raise NotImplementedError", "def test_renderer_works_correctly_with_numpy_array(self):\n data = numpy.array([1])\n rendered = self.renderer.render(\n data=data, media_type=\"application/json\", renderer_context={}\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, data)", "def assign(ary, out):\n\n from . import _bh\n\n if not np.isscalar(ary):\n (ary, out) = broadcast_arrays(ary, out)[0]\n # We ignore self assignments\n if _bh.same_view(ary, out):\n return\n\n # Assigning empty arrays doesn't do anything\n if hasattr(ary, \"size\"):\n if ary.size == 0:\n return\n if hasattr(out, \"size\"):\n if out.size == 0:\n return\n\n # We use a tmp array if the in-/out-put has memory conflicts\n if overlap_conflict(out, ary):\n tmp = array_create.empty_like(out)\n assign(ary, tmp)\n return assign(tmp, out)\n\n if bhary.check(out):\n _bh.ufunc(UFUNCS[\"identity\"].info['id'], (out, ary))\n else:\n if bhary.check(ary):\n if \"BH_SYNC_WARN\" in os.environ:\n import warnings\n warnings.warn(\"BH_SYNC_WARN: Copying the array to NumPy\", RuntimeWarning, stacklevel=2)\n ary = ary.copy2numpy()\n out[...] = ary", "def test_make_legacy_dataset_from_numpy():\n current_dir = os.path.dirname(os.path.abspath(__file__))\n # legacy_dataset is a dataset in the legacy format kept around for testing purposes.\n data_dir = os.path.join(current_dir, \"legacy_dataset\")\n dataset = dc.data.DiskDataset(data_dir)\n assert dataset.legacy_metadata\n assert len(dataset.metadata_df.columns) == 4\n assert list(dataset.metadata_df.columns) == ['ids', 'X', 'y', 'w']\n\n # Test constructor reload works for legacy format\n dataset2 = dc.data.DiskDataset(dataset.data_dir)\n assert dataset2.legacy_metadata\n assert len(dataset2.metadata_df.columns) == 4\n assert list(dataset2.metadata_df.columns) == ['ids', 'X', 'y', 'w']", "def copy(self, copy_meta_data=False):\n if self.meta_data is not None:\n if copy_meta_data:\n new_meta_data = (self.meta_data[0].copy(),\n self.meta_data[1].copy())\n else:\n new_meta_data = self.meta_data\n else:\n new_meta_data = None\n return xndarray(self.data.copy(), self.axes_names[:],\n self.axes_domains.copy(),\n self.value_label, new_meta_data)", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def reinit_pose(self, Xnew):\n self.X = sym.Matrix([[Xnew[0]], [Xnew[1]], [Xnew[2]]])", "def test02(self):\n b = bcolz.arange(self.N, rootdir=self.rootdir)\n b.resize(0)\n a = np.arange(0)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")", "def reset(self):\n\n # Implement your reset method here\n # return observation\n self._state = np.random.uniform(-1, 1, size=(2,))\n observation = np.copy(self._state)\n return observation", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_2i3I().pack(_x.manip_return_code, _x.object_grabber_return_code, _x.des_gripper_pose.header.seq, _x.des_gripper_pose.header.stamp.secs, _x.des_gripper_pose.header.stamp.nsecs))\n _x = self.des_gripper_pose.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7di3I().pack(_x.des_gripper_pose.pose.position.x, _x.des_gripper_pose.pose.position.y, _x.des_gripper_pose.pose.position.z, _x.des_gripper_pose.pose.orientation.x, _x.des_gripper_pose.pose.orientation.y, _x.des_gripper_pose.pose.orientation.z, _x.des_gripper_pose.pose.orientation.w, _x.object_finder_return_code, _x.object_pose.header.seq, _x.object_pose.header.stamp.secs, _x.object_pose.header.stamp.nsecs))\n _x = self.object_pose.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_7d().pack(_x.object_pose.pose.position.x, _x.object_pose.pose.position.y, _x.object_pose.pose.position.z, _x.object_pose.pose.orientation.x, _x.object_pose.pose.orientation.y, _x.object_pose.pose.orientation.z, _x.object_pose.pose.orientation.w))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def reset(self):\n newPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(newPerm)", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def test_op_setslice_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n b = a + 2.5\n expect = numpy.empty_like(b)\n old_b = numpy.empty_like(b)\n expect[:] = b[:]\n old_b[:] = b[:]\n\n offl_a = stream.bind(a)\n offl_a[:] = b\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((b == old_b).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_b))\n self.assertTrue((a == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, expect))", "def test_as_float_array():\n X = np.ones((3, 10), dtype=np.int32)\n X = X + np.arange(10, dtype=np.int32)\n # Checks that the return type is ok\n X2 = as_float_array(X, copy=False)\n np.testing.assert_equal(X2.dtype, np.float32)\n # Another test\n X = X.astype(np.int64)\n X2 = as_float_array(X, copy=True)\n # Checking that the array wasn't overwritten\n assert as_float_array(X, False) is not X\n # Checking that the new type is ok\n np.testing.assert_equal(X2.dtype, np.float64)\n # Here, X is of the right type, it shouldn't be modified\n X = np.ones((3, 2), dtype=np.float32)\n assert as_float_array(X, copy=False) is X", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def test_override_array(self):\n i, j, k, l = dimify('i j k l')\n shape = tuple(d.size for d in (i, j, k, l))\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = np.zeros(shape=shape, dtype=np.float32) + 3.\n a2 = np.zeros(shape=shape, dtype=np.float32) + 4.\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1, np.zeros(shape) + 6))\n assert(np.allclose(a2, np.zeros(shape) + 7))", "def test_convert_numpy_to_libsvm(self):\n\n file = tempfile.NamedTemporaryFile(delete=False)\n\n # write to temporary files\n write_data_to_xlearn_format(self.X, self.y, file.name)\n\n # load data back and compare if they are the same as original data\n X_true, y_true = load_svmlight_file(file.name)\n file.close()\n if os.path.exists(file.name):\n os.remove(file.name)\n\n assert np.all(np.isclose(self.X, X_true.todense()))\n assert np.all(self.y.ravel() == y_true.ravel())", "def _resize(self, new_cap):\n new_array = ba(new_cap)\n\n for i in range(self.count):\n new_array[i] = self.the_array[i]\n\n self.the_array = new_array\n self.capacity = new_cap", "def teardown_simulate(self):\n self.positions = self.calibrated_positions", "def array(self, src) -> None:\n self.set_array(src)", "def setData(self,newdata):\n self.record(inspect.currentframe())\n if np.shape(newdata) == np.shape(self.data):\n self.data = np.copy(newdata)", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def copy(a):\n return array(a, copy=True)", "def test_op_isub_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def replace(self, old, new, count=None):\n return asarray(replace(self, old, new, count))", "def opaque_module(self, modobj):\n for var, val in modobj.__dict__.iteritems():\n if isinstance(val, type(Lumpy)):\n self.opaque_class(val)", "def wrap(self, ds):\n\n self.ds = ds\n self._attrs = ds.attrs\n self.ds.attrs = {}\n\n print('Wrapped an existing xarray dataset. Class attributes taken from the dataset. Dataset attrs cleared.')", "def backup_shape(self):\n\n self.shape_backup = np.copy(self.shape)", "def __init__(self):\n self.array = None\n pass", "def test_op_isub_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def clone(self, reflect_y = False):\n clone = copy.deepcopy(self)\n if reflect_y:\n # change the locations of all points in the test_clone\n for mass in clone.all_mass_objects:\n mass.geometry = mass.geometry.reflect_y()\n return clone", "def reset(self):\n self.observation = self.initial_observation.copy()\n self.sim = copy.deepcopy(self.initial_sim)\n return self.observation" ]
[ "0.60902596", "0.58744705", "0.5683738", "0.5663783", "0.5609355", "0.5535437", "0.5494269", "0.54620075", "0.54172635", "0.5362005", "0.5362005", "0.5336442", "0.5305379", "0.530221", "0.5182793", "0.51734614", "0.5172819", "0.51510024", "0.51288235", "0.5113124", "0.50953704", "0.5092387", "0.5091938", "0.5086598", "0.5063424", "0.5052794", "0.50387406", "0.5032744", "0.5023723", "0.5018826", "0.50171524", "0.5014905", "0.50050807", "0.50023127", "0.50002116", "0.5000138", "0.49917677", "0.49892923", "0.49892923", "0.49869993", "0.49840617", "0.49811664", "0.49811664", "0.49744594", "0.49721897", "0.4967865", "0.49677482", "0.4964082", "0.4959114", "0.49555597", "0.4943118", "0.4937685", "0.49376273", "0.49346533", "0.49269626", "0.4926928", "0.49170253", "0.4906517", "0.48968396", "0.48966014", "0.48909143", "0.487359", "0.4872992", "0.4869792", "0.485289", "0.48502788", "0.48478004", "0.48441544", "0.48373914", "0.48279315", "0.48239258", "0.48229647", "0.4812183", "0.4812183", "0.4810512", "0.4809325", "0.4799855", "0.47852504", "0.47849494", "0.4776661", "0.47759104", "0.47722167", "0.47691604", "0.47690588", "0.4762699", "0.4759289", "0.47537994", "0.47537208", "0.47492984", "0.47490922", "0.4746832", "0.47460148", "0.47449166", "0.4736617", "0.47323525", "0.47309476", "0.47300386", "0.47293606", "0.4727868", "0.47079876" ]
0.765654
0
This method injects in the providers to the faker instance.
def add_providers(self): str_providers = PROVIDERS[0] # Providers, called by name live_providers = PROVIDERS[1] # Providers, provided as a live module for providers in PROVIDERS: # Iterate over the types of providers for provider in providers: # Iterate over all the methods # Inject those into faker, and swap the numpy instance self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config: Config) -> None:\n self.config = config\n\n faker_config = self.config.faker\n self.faker = Faker(locale=faker_config.locale)\n\n self.fakes = {}", "def setup_provider(self):\n pass", "def add_providers_deped(self):\n # This gives direct access to the module's main class called Provider.\n klasses = [\n provider.Provider for provider in PROVIDERS] # Accessing the PROVIDERS. Check this method out, see how it operates.\n for k in klasses:\n self.fake.add_provider(k)", "def faker() -> Faker:\n\n return Faker()", "def fake_init():\n return Faker()", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def setUp(self):\n self.factory = PhoneFactory()", "def _fixture_setup(self):\n pass", "def addAllFactories(self) -> None:\n ...", "def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names", "def register(self, provider):\n for entry in dir(provider):\n try:\n provider_function = type(provider).__dict__[entry]\n factory_provider = getattr(provider_function, 'factory_provider', None)\n if factory_provider:\n provided_type, singleton = factory_provider\n if callable(provider_function): # A function or member function\n # if it's a bound method, this will get the bound version\n provider_member = getattr(provider, entry)\n self.add_factory(provided_type, provider_member, singleton)\n elif hasattr(provider_function, '__get__'):\n # this is a property or non-callable descriptor:\n self.add_factory(\n provided_type,\n functools.partial(provider_function.__get__, provider, provider),\n singleton,\n )\n else:\n self.add_service(provided_type, provider_function)\n except KeyError:\n pass", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "def _build_observation_providers(self) -> Dict[str, ObservationProvider]:\n pass", "def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]", "def setUpClass(cls):\n super(EmotionTest, cls).setUpClass()\n user = UserFactory(username='dan', email='dan@dan.net')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n cls.dan = user\n\n for _ in range(10):\n user = UserFactory.create()\n user.set_password(factory.Faker('password'))\n user.save()", "def initialize_client():\n logging.info('Initializing Sendgrid provider')\n sendgrid_authentication, sendgrid_username = get_provider_credentials('sendgrid') \n sendgrid_provider = SendGridProvider(sendgrid_authentication, sendgrid_username)\n\n logging.info('Initializing Mailgun provider')\n mailgun_authentication, mailgun_domain = get_provider_credentials('mailgun')\n mailgun_provider = MailGunProvider(mailgun_authentication, mailgun_domain)\n\n logging.info('Registering providers')\n client.register_provider(sendgrid_provider, 10)\n client.register_provider(mailgun_provider, 20)", "def _setup_random_gen(\n self,\n probabilities: List[float],\n random_nums: List[int]\n ) -> None:\n RandomGen._probabilities = probabilities\n RandomGen._random_nums = random_nums\n self._random_gen = RandomGen()", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()", "def populate_fixtures():\n languages()\n words()", "def setup(self):\n for gen in self._feature_stats_generators:\n gen.setup()", "def test__get_faker_anonymize_list(self):\n # Run\n transformer = Mock()\n transformer.anonymize = ['email']\n\n result = CategoricalTransformer._get_faker(transformer)\n\n # Asserts\n self.assertEqual(\n result.__name__,\n 'faker',\n \"Expected faker function\"\n )", "def setUp(self):\n UsuarioFactory.create()\n self.user = Usuario.objects.get(username='admin')\n ProyectoFactory.lider_proyecto = self.user\n ProyectoFactory.create()\n FaseFactory.proyecto = Proyecto.objects.get(nombre='Proyecto01')\n FaseFactory.create()\n TipoItemFactory.fase = Fase.objects.get(nombre='Fase01')\n TipoItemFactory.create()\n self.factory = RequestFactory()", "async def setup(self, context: InjectionContext):", "def setUp(self):\n self.factory = RequestFactory()\n StaffProfile.objects.rebuild()\n self.manager = mommy.make(\n \"auth.User\", first_name=\"Jane\", last_name=\"Ndoe\", email=\"jane@example.com\"\n )\n self.user = mommy.make(\n \"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\", email=\"bob@example.com\"\n )\n manager_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.manager)\n staff_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.user)\n self.manager_profile = manager_mommy.make()\n self.staffprofile = staff_mommy.make()", "def setup(self, registers):\n \"\"\" tasks before any generation functions are called \"\"\"\n pass", "def setUp(self):\n password = factory.Faker('pystr', min_chars=8, max_chars=16)\n self.user = UserFactory.create(password=password)\n self.token = Token.objects.create(user=self.user)\n self.factory = APIRequestFactory()\n\n # set up the data\n store = StoreFactory(user=self.user)\n material = MaterialFactory()\n self.material_stock = MaterialStockFactory(\n store=store, material=material, current_capacity=20, max_capacity=100\n )", "def setup_method(cls):\n seed()", "def _initialize(self):\n configured_providers = self.domain.config[\"DATABASES\"]\n provider_objects = {}\n\n if configured_providers and isinstance(configured_providers, dict):\n if \"default\" not in configured_providers:\n raise ConfigurationError(\"You must define a 'default' provider\")\n\n for provider_name, conn_info in configured_providers.items():\n provider_full_path = conn_info[\"PROVIDER\"]\n provider_module, provider_class = provider_full_path.rsplit(\n \".\", maxsplit=1\n )\n\n provider_cls = getattr(\n importlib.import_module(provider_module), provider_class\n )\n provider = provider_cls(provider_name, self.domain, conn_info)\n\n provider_objects[provider_name] = provider\n\n self._providers = provider_objects", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'random_max_len,value_profile,'\n strategy1.probability = 1\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def setUp(self):\n self.ec = EntryChanger()\n self.ec.first_name = 'Bob'\n self.ec.last_name = 'Harvey'\n self.ec.all_names = True", "def setUp(self):\n self.manager, self.proxy = tests.utils.setup_xmlrpc()\n self.proxy.provider.register(\n PROVIDER_ID, USERNAME, PASSWORD, URL, TENANT, PROVIDER_TYPE\n )", "def setUp(self):\n\n self.client = get_client()\n self.fake = Faker()\n self.sim = Simulate()\n\n self.generate_authorizations(10)", "def setup(self):\n log.msg(\"Fetching required net test inputs...\")\n for net_test_loader in self.netTestLoaders:\n yield self.fetchAndVerifyNetTestInput(net_test_loader)\n\n if self.bouncer:\n log.msg(\"Looking up test helpers...\")\n yield self.lookupTestHelpers()", "def setUp(self):\n self.validator = Validator()\n self.users = Users()", "def test__get_faker_anonymize_not_tuple_or_list(self):\n # Run\n transformer = Mock()\n transformer.anonymize = 'email'\n\n result = CategoricalTransformer._get_faker(transformer)\n\n # Asserts\n self.assertEqual(\n result.__name__,\n 'faker',\n \"Expected faker function\"\n )", "def get_factory():", "def useSystemRandomSeeds(process):\n from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper\n randSvc = RandomNumberServiceHelper(process.RandomNumberGeneratorService) \n randSvc.populate()\n\n return process", "def generator_setup():\n PaaSPureGenerator()", "def parameter_providers(self, parameter_providers):\n\n self._parameter_providers = parameter_providers", "def _fetch_providers(self, looking_for, providers=None):\n if providers is None:\n providers = self._reverse_mapping.get(looking_for, [])\n default_providers = []\n atom_providers = []\n for p in providers:\n if p.name in (_TRANSIENT_PROVIDER, self.injector_name):\n default_providers.append(p)\n else:\n atom_providers.append(p)\n return default_providers, atom_providers", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"mouse@your.domain.here\",\n \"mouse@mouse.com\",\n \"password\",\n local=True,\n localname=\"mouse\",\n two_factor_auth=False,\n )\n self.rat = models.User.objects.create_user(\n \"rat@your.domain.here\",\n \"rat@rat.com\",\n \"password\",\n local=True,\n localname=\"rat\",\n )\n self.badger = models.User.objects.create_user(\n \"badger@your.domain.here\",\n \"badger@badger.com\",\n \"password\",\n local=True,\n localname=\"badger\",\n two_factor_auth=True,\n )\n self.anonymous_user = AnonymousUser\n self.anonymous_user.is_authenticated = False\n models.SiteSettings.objects.create(id=1, require_confirm_email=False)", "def setUp(self) -> None:\n\n self.federal_client = FederalRegister()", "def fixture_pandy():\n yield Person(name=\"Pandy\", age=12, hobbies=[\"Fortnite\"])", "def setUp(self):\n guid = self.__class__.__name__ + '-' + str(uuid.uuid4())\n self.flavor_name = guid + 'name'\n\n self.nova = nova_utils.nova_client(self.os_creds, self.os_session)\n\n # Initialize for cleanup\n self.flavor_creator = None", "def provider(name):\n def wrapper(cls):\n def wrapped(init):\n def register_event_callbacks(self):\n # NOTE(morganfainberg): A provider who has an implicit\n # dependency on other providers may utilize the event callback\n # mechanism to react to any changes in those providers. This is\n # performed at the .provider() mechanism so that we can ensure\n # that the callback is only ever called once and guaranteed\n # to be on the properly configured and instantiated backend.\n if not hasattr(self, 'event_callbacks'):\n return\n\n if not isinstance(self.event_callbacks, dict):\n msg = _('event_callbacks must be a dict')\n raise ValueError(msg)\n\n for event in self.event_callbacks:\n if not isinstance(self.event_callbacks[event], dict):\n msg = _('event_callbacks[%s] must be a dict') % event\n raise ValueError(msg)\n for resource_type in self.event_callbacks[event]:\n # Make sure we register the provider for each event it\n # cares to call back.\n callbacks = self.event_callbacks[event][resource_type]\n if not callbacks:\n continue\n if not hasattr(callbacks, '__iter__'):\n # ensure the callback information is a list\n # allowing multiple callbacks to exist\n callbacks = [callbacks]\n notifications.register_event_callback(event,\n resource_type,\n callbacks)\n\n def __wrapped_init__(self, *args, **kwargs):\n \"\"\"Initialize the wrapped object and add it to the registry.\"\"\"\n init(self, *args, **kwargs)\n REGISTRY[name] = self\n register_event_callbacks(self)\n\n resolve_future_dependencies(__provider_name=name)\n\n return __wrapped_init__\n\n cls.__init__ = wrapped(cls.__init__)\n _factories[name] = cls\n return cls\n return wrapper", "def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']", "async def init_provider(self):\n self.dsp_name = \"OpenStack\"\n await self._provider.init(image_names=self.config[\"images\"].values())", "def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)", "def _register_factory(self):\n for name, info in self._plugins.items():\n if info['priority']:\n factory = getattr(info['plugin'], 'factory', None)\n if callable(factory):\n registry[info['factory']] = info['plugin'].factory\n registry.freeze()", "def setup_platform(hass, config, add_entities, discovery_info=None):\n _LOGGER.debug(\"Initializing fritzbox temperature sensors\")\n devices = []\n fritz_list = hass.data[FRITZBOX_DOMAIN]\n\n for fritz in fritz_list:\n device_list = fritz.get_devices()\n for device in device_list:\n if (device.has_temperature_sensor\n and not device.has_switch\n and not device.has_thermostat):\n devices.append(FritzBoxTempSensor(device, fritz))\n\n add_entities(devices)", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def register_provides(self, provider_protocol, protocol):\n\n self.register_factory(\n no_adapter_necessary, provider_protocol, protocol\n )\n\n return", "def Inject(self,injector):\n pass", "def setUp(self):\n\n # Allocates users\n self.users = []\n self.user_session_tokens = []\n\n # Template for creating users\n user_template = {\n \"clientId\": 2,\n \"username\": \"user\",\n \"pwd\": \"password\",\n \"nameLast\": \"Last\",\n \"nameFirst\": \"First\",\n \"email\": \"user@gmail.com\",\n \"phone\": \"123-4567\",\n \"profile_picture_path\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Creates 'n' users and stores them\n n = 3\n for i in range(0, n):\n user = deepcopy(user_template)\n user['username'] += randstr()\n user['email'] += randstr()\n handler.user_create(event=user, context=None)\n self.users.append(user)\n self.user_session_tokens.append(None)", "def test_multiple_factories(self, mocker):\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True)\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_uwsgi = mocker.Mock()\n build_uwsgi.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_uwsgi_factory', new=build_uwsgi)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this API Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def __init__(self, animal_factory=None):\n self.pet_factory = animal_factory", "def provider(self, provider):\n\n self._provider = provider", "def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='boggusmail@boggusmail.net'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n test_helpers.patch_environ(self)\n test_helpers.patch(self, [\n 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability'\n ])\n self.mock.decide_with_probability.return_value = True\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 1\n strategy1.engine = 'afl'\n data.append(strategy1)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def provider(self, provider: Provider) -> None:\n self._provider = provider", "def _get_factories(self):\n return self._factories", "def setUp(self):\n self.ec = EntryChanger()\n self.ecm = EntryChangeMock()\n self.db = DatabaseIntermediary()\n\n entry_list = self.db.return_all()\n self.ec.entry = entry_list[0]\n\n names = [('Albert', 'Smith'), ('Aoraki', 'Cook'), ('First', 'Last'),\n ('Bob', 'Hatfield'), ('Dillen', 'Jones')]\n self.ec.names = names", "def setUp(self):\n self.setup_beets()", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def setup():\n find_modules('alerters')\n find_modules('watchers')\n find_modules('auditors')", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def setUp(cls):\n cls.place = Place()\n cls.place.city_id = \"hawaii808\"\n cls.place.user_id = \"modern123\"\n cls.place.name = \"The Modern Honolulu\"\n cls.place.description = \"The heart of Waikiki\"\n cls.place.number_rooms = 375\n cls.place.number_bathrooms = 1\n cls.place.max_guest = 10000\n cls.place.price_by_night = 300\n cls.place.latitude = 21.306944\n cls.place.longitude = -157.858337\n cls.place.amenity_ids = [\"amenity321\"]", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def setUp(self):\n self.setUpPyfakefs()\n self.fake_os = fake_filesystem.FakeOsModule(self.fs)\n\n populate_fakefs(self)", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def setUpTestData(cls):\n cls.user = UserFactory()\n cls.auth = AuthFactory()\n\n cls.device = TOTPDevice.objects.create(user=cls.user)\n cls.relate = TOTPDevice.challenge.objects.create(\n device=cls.device, token=cls.auth\n )\n\n cls.algorithm = TOTPAlgorithm()", "def setup_method(self, method):\n self.user1 = UserFactory()\n self.user2 = UserFactory()\n self.budget1 = BudgetFactory(name=\"test1\", creator=self.user1)\n self.budget2 = BudgetFactory(name=\"budget0\", creator=self.user1)\n self.budget3 = BudgetFactory(name=\"third\", creator=self.user2)\n self.budget4 = BudgetFactory(name=\"fourth\", creator=self.user2, users=[self.user1])\n TransactionFactory(budget=self.budget1, amount=Decimal('30'), user=self.user1, category='test')\n TransactionFactory(budget=self.budget1, amount=Decimal('12.20'), user=self.user1)\n TransactionFactory(budget=self.budget1, amount=Decimal('-34.85'), user=self.user1)\n TransactionFactory(budget=self.budget4, user=self.user1)\n TransactionFactory(budget=self.budget4, user=self.user2)\n TransactionFactory(budget=self.budget4, user=self.user2)", "def setUp(self):\n\n self.sold = Soldier(0, 0)\n self.R = Random(seed)", "def setUp(self):\n self.user = User.objects.create_user(**USER)\n self.user_a = User.objects.create_user(**USER_A)\n self.user_b = User.objects.create_user(**USER_B)\n self.author = Author.objects.create(\n user = self.user,\n displayname=\"Jimmy\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n self.author_a = Author.objects.create(\n user = self.user_a,\n displayname=\"Bobby\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n self.author_b = Author.objects.create(\n user = self.user_b,\n displayname=\"Drake\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n\n c.token_credentials(self.author)", "def setUpFixture(self):\n pass", "def setup():\n\n self.zorp_mock = Mock()\n\n for name, func in six.iteritems(self._get_zorp_mock_methods()):\n self.zorp_mock.server.registry.put(name, func)\n\n self.zorp_mock.start()", "def test_multiple_factories(self, mocker):\n sdk_ready_flag = threading.Event()\n\n def _init(self, ready_flag, some, auth_api, streaming_enabled, telemetry_runtime_producer, telemetry_init_consumer, sse_url=None):\n self._ready_flag = ready_flag\n self._synchronizer = mocker.Mock(spec=Synchronizer)\n self._streaming_enabled = False\n self._telemetry_runtime_producer = telemetry_runtime_producer\n self._telemetry_init_consumer = telemetry_init_consumer\n mocker.patch('splitio.sync.manager.Manager.__init__', new=_init)\n\n def _start(self, *args, **kwargs):\n sdk_ready_flag.set()\n mocker.patch('splitio.sync.manager.Manager.start', new=_start)\n\n def _stop(self, *args, **kwargs):\n pass\n mocker.patch('splitio.sync.manager.Manager.stop', new=_stop)\n\n mockManager = Manager(sdk_ready_flag, mocker.Mock(), mocker.Mock(), False, mocker.Mock(), mocker.Mock())\n\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True, mocker.Mock(spec=ImpressionsManager), mockManager, mocker.Mock(), mocker.Mock(), mocker.Mock())\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "def setUp(self):\n from emotion_emotions.views import RecordEmotions\n\n user = UserFactory(username='dan', email='dan@dan.net')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n self.dan = user\n\n emotion = EmotionFactory(user=user)\n emotion.save()\n\n self.request = RequestFactory()\n\n mock_emotions = [\n {'scores': {\n 'anger': 1.00,\n 'contempt': 1.00,\n 'disgust': 1.00,\n 'fear': 1.00,\n 'happiness': 1.00,\n 'neutral': 1.00,\n 'sadness': 1.00,\n 'surprise': 1.00,\n }}\n ]\n\n emotion_patcher = patch.object(RecordEmotions, 'get_emotion_data',\n return_value=mock_emotions)\n self.mock_emotion_api_call = emotion_patcher.start()\n self.addCleanup(emotion_patcher.stop)\n self.emotion_patcher = emotion_patcher", "def fixtures():", "def provider_and_mock_one(monkeypatch, provider_one, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_one, 'make_provider', mock_make_provider)\n return provider_one, mock_inner_provider", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n api_key = config.get(CONF_API_KEY)\n origin = config.get(CONF_ORIGIN)\n options = config.get(CONF_OPTIONS)\n display_zone = config.get(CONF_DISPLAY_ZONE)\n\n add_devices([GoogleGeocode(hass, origin, name, api_key, options, display_zone)])", "def register_factory(factory, iface, requires, name):", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"mouse@example.com\",\n \"mouse@mouse.mouse\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n )\n with patch(\"bookwyrm.models.user.set_remote_server.delay\"):\n self.remote_user = models.User.objects.create_user(\n \"rat\",\n \"rat@rat.rat\",\n \"ratword\",\n remote_id=\"http://example.com/rat\",\n local=False,\n )\n self.book = models.Edition.objects.create(\n title=\"Test Book\",\n parent_work=models.Work.objects.create(title=\"Test work\"),\n )", "def test__get_faker_anonymize_tuple(self):\n # Setup\n\n # Run\n transformer = Mock()\n transformer.anonymize = ('email',)\n\n result = CategoricalTransformer._get_faker(transformer)\n\n # Asserts\n self.assertEqual(\n result.__name__,\n 'faker',\n \"Expected faker function\"\n )", "def setUp(self):\n self.my_model1 = BaseModel()\n self.my_model1.name = \"hello\"\n self.my_model1.number = 9\n self.my_model2 = BaseModel()\n self.my_model2.name = \"goodbye\"\n self.my_model2.number = 19\n self.mock_stdin = create_autospec(sys.stdin)\n self.mock_stdout = create_autospec(sys.stdout)", "def registerInParameterFactory(self) -> None:\n ...", "def setup_models(self):\n pass", "def setup_data(es_with_collector):\n country_uk = constants.Country.united_kingdom.value.id\n country_us = constants.Country.united_states.value.id\n uk_region = constants.UKRegion.south_east.value.id\n CompanyFactory(\n name='abc defg ltd',\n trading_names=['helm', 'nop'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_uk,\n uk_region_id=uk_region,\n )\n CompanyFactory(\n name='abc defg us ltd',\n trading_names=['helm', 'nop', 'qrs'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_us,\n registered_address_country_id=country_us,\n )\n es_with_collector.flush_and_refresh()" ]
[ "0.63692963", "0.6351762", "0.61550707", "0.6035626", "0.5997692", "0.5795709", "0.57771325", "0.57227266", "0.55226743", "0.54858613", "0.5434935", "0.54322845", "0.53593737", "0.5339375", "0.5336239", "0.53179175", "0.529026", "0.5267008", "0.52436227", "0.52424914", "0.5205812", "0.520212", "0.51944804", "0.51875806", "0.51826084", "0.51672924", "0.5128174", "0.5126979", "0.512609", "0.5119085", "0.51090384", "0.51074046", "0.50593597", "0.5020356", "0.5016015", "0.50050324", "0.50050086", "0.50021285", "0.49988207", "0.49944812", "0.49714658", "0.49712577", "0.49698272", "0.49555042", "0.4953483", "0.4941731", "0.4924869", "0.49236166", "0.4917947", "0.4917633", "0.4915543", "0.4911619", "0.49047524", "0.48994932", "0.48950493", "0.48904225", "0.48893875", "0.4887271", "0.48845187", "0.48766536", "0.48683575", "0.4857099", "0.4857099", "0.48447916", "0.48421255", "0.48400968", "0.48281884", "0.48281884", "0.48281884", "0.4816729", "0.48153317", "0.4814843", "0.48127586", "0.48098323", "0.48095128", "0.48088637", "0.4796123", "0.47827488", "0.47764355", "0.47700217", "0.47647458", "0.47584456", "0.47577783", "0.47461608", "0.4741222", "0.4738983", "0.47332585", "0.47275385", "0.47259042", "0.47167385", "0.47148666", "0.4714042", "0.47120017", "0.47109488", "0.47065884", "0.4702189", "0.47004387", "0.4696122", "0.46911597", "0.4690415" ]
0.77991426
0
Add custom providers, Now depricated to to allow for the injection of the new methods.
def add_providers_deped(self): # This gives direct access to the module's main class called Provider. klasses = [ provider.Provider for provider in PROVIDERS] # Accessing the PROVIDERS. Check this method out, see how it operates. for k in klasses: self.fake.add_provider(k)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_providers(self):\n str_providers = PROVIDERS[0] # Providers, called by name\n live_providers = PROVIDERS[1] # Providers, provided as a live module\n for providers in PROVIDERS: # Iterate over the types of providers\n for provider in providers: # Iterate over all the methods\n # Inject those into faker, and swap the numpy instance\n self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])", "def register(self, provider):\n for entry in dir(provider):\n try:\n provider_function = type(provider).__dict__[entry]\n factory_provider = getattr(provider_function, 'factory_provider', None)\n if factory_provider:\n provided_type, singleton = factory_provider\n if callable(provider_function): # A function or member function\n # if it's a bound method, this will get the bound version\n provider_member = getattr(provider, entry)\n self.add_factory(provided_type, provider_member, singleton)\n elif hasattr(provider_function, '__get__'):\n # this is a property or non-callable descriptor:\n self.add_factory(\n provided_type,\n functools.partial(provider_function.__get__, provider, provider),\n singleton,\n )\n else:\n self.add_service(provided_type, provider_function)\n except KeyError:\n pass", "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def setup_provider(self):\n pass", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def parameter_providers(self, parameter_providers):\n\n self._parameter_providers = parameter_providers", "def add_provider(self, provider):\n # Check for backend name clashes, emitting a warning.\n current_backends = {str(backend) for backend in self.available_backends()}\n added_backends = {str(backend) for backend in provider.available_backends()}\n common_backends = added_backends.intersection(current_backends)\n\n # checks for equality of provider instances, based on the __eq__ method\n if provider not in self.providers:\n self.providers.append(provider)\n if common_backends:\n logger.warning(\n 'The backend names \"%s\" of this provider are already in use. '\n 'Refer to documentation for `available_backends()` and `unregister()`.',\n list(common_backends))\n return provider\n else:\n warnings.warn(\"Skipping registration: The provider is already registered.\")\n return self.providers[self.providers.index(provider)]", "def _invoke_providers(self, method, *args, **kwargs):\n which = kwargs.pop('which', self.providers.values())\n bubble = kwargs.pop('bubble', False)\n if isinstance(which, dict):\n coroutines = [getattr(p, method)(*args) for p, args in which.items()]\n which = which.keys()\n else:\n coroutines = [getattr(p, method)(*args, **kwargs) for p in which]\n\n # Convert all the coroutines to Task objects and attach the providers\n # to them, so we know for which provider the given task applied to, since\n # the done list below may not be in the same order we passed.\n tasks = []\n for provider, coroutine in zip(which, coroutines):\n task = asyncio.Task(coroutine)\n task.provider = provider\n tasks.append(task)\n\n done, pending = yield from asyncio.wait(tasks)\n\n # Remove exceptions from the results list and log them.\n results = []\n for task in done:\n # Provider method could have raised an asynchronous exception,\n # in which case accessing future.result() will raise. If bubble is False\n # (default) then we catch it and log it rather than letting it\n # bubble up.\n try:\n results.append((task.provider, task.result()))\n except Exception as e:\n if bubble:\n raise\n log.exception('%s from provider %s failed', method, provider.NAME)\n return results", "def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())", "def provider(name):\n def wrapper(cls):\n def wrapped(init):\n def register_event_callbacks(self):\n # NOTE(morganfainberg): A provider who has an implicit\n # dependency on other providers may utilize the event callback\n # mechanism to react to any changes in those providers. This is\n # performed at the .provider() mechanism so that we can ensure\n # that the callback is only ever called once and guaranteed\n # to be on the properly configured and instantiated backend.\n if not hasattr(self, 'event_callbacks'):\n return\n\n if not isinstance(self.event_callbacks, dict):\n msg = _('event_callbacks must be a dict')\n raise ValueError(msg)\n\n for event in self.event_callbacks:\n if not isinstance(self.event_callbacks[event], dict):\n msg = _('event_callbacks[%s] must be a dict') % event\n raise ValueError(msg)\n for resource_type in self.event_callbacks[event]:\n # Make sure we register the provider for each event it\n # cares to call back.\n callbacks = self.event_callbacks[event][resource_type]\n if not callbacks:\n continue\n if not hasattr(callbacks, '__iter__'):\n # ensure the callback information is a list\n # allowing multiple callbacks to exist\n callbacks = [callbacks]\n notifications.register_event_callback(event,\n resource_type,\n callbacks)\n\n def __wrapped_init__(self, *args, **kwargs):\n \"\"\"Initialize the wrapped object and add it to the registry.\"\"\"\n init(self, *args, **kwargs)\n REGISTRY[name] = self\n register_event_callbacks(self)\n\n resolve_future_dependencies(__provider_name=name)\n\n return __wrapped_init__\n\n cls.__init__ = wrapped(cls.__init__)\n _factories[name] = cls\n return cls\n return wrapper", "def _build_observation_providers(self) -> Dict[str, ObservationProvider]:\n pass", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()", "def _load_commands(self):\n\n entry_points = pkg_resources.iter_entry_points(\n config.PROVIDER_EP_NAMESPACE)\n for entry_point in entry_points:\n self.logger.debug('found provider %r', entry_point.name)\n self._commands[entry_point.name] = entry_point.load()", "def provide(self, feature, provider, suspend_callable=False, *args, **kwargs):\n if not self.allow_replace:\n assert feature not in self.providers, \"Duplicate feature: %r\" % feature\n if callable(provider) and not suspend_callable:\n def call():\n return provider(*args, **kwargs)\n else:\n def call():\n return provider\n self.providers[feature] = call", "def _fetch_providers(self, looking_for, providers=None):\n if providers is None:\n providers = self._reverse_mapping.get(looking_for, [])\n default_providers = []\n atom_providers = []\n for p in providers:\n if p.name in (_TRANSIENT_PROVIDER, self.injector_name):\n default_providers.append(p)\n else:\n atom_providers.append(p)\n return default_providers, atom_providers", "def get_add_providers() -> List[Union[Type[ProviderApi], Type[AddProviderInterface]]]:\n return [p for p in ProviderFactory.get_providers() if issubclass(p, AddProviderInterface)]", "def __init__(self,\n *args,\n **kwargs):\n if not is_testrpc_available():\n raise Exception(\"`TestRPCProvider` requires the `eth-testrpc` package to be installed\")\n from testrpc.rpc import RPCMethods\n\n self.rpc_methods = RPCMethods()\n super(BaseProvider, self).__init__(*args, **kwargs)", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "def register_provides(self, provider_protocol, protocol):\n\n self.register_factory(\n no_adapter_necessary, provider_protocol, protocol\n )\n\n return", "def augment(self, *args, **kwargs):\n pass", "def provider(self, provider):\n\n self._provider = provider", "def add_or_update_provider(self, provider_name, provider_type, endpoints, zone, provider_region,\n validate_provider_auth = True, initiate_refresh = True):\n zone_id = self.find_zone_by_name(zone or 'default')\n # check if provider with the same name already exists\n provider_id = self.find_provider_by_name(provider_name)\n if provider_id: # provider exists\n existing_config = self.get_provider_config(provider_id)\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # If it wasn't returned from existing provider configuration this means it is either unsupported or null,\n # in both cases we can remove null/empty certificate_authority from endpoints we want to update.\n self.filter_unsupported_fields_from_config(endpoints, existing_config['endpoints'], {'certificate_authority'})\n\n updates = self.required_updates(provider_id, endpoints, zone_id, provider_region, existing_config)\n\n if not updates:\n return dict(changed=self.changed,\n msg=\"Provider %s already exists\" % provider_name)\n\n old_validation_details = self.auths_validation_details(provider_id)\n operation = \"update\"\n self.update_provider(provider_id, provider_name, endpoints, zone_id, provider_region)\n roles_with_changes = set(updates[\"Added\"]) | set(updates[\"Updated\"])\n else: # provider doesn't exists, adding it to manageiq\n\n # ManageIQ Euwe / CFME 5.7 API and older versions don't support certificate authority field in endpoint.\n # filter empty fields if none on creation - No existing endpoints for new provider\n self.filter_unsupported_fields_from_config(endpoints, [{}], {'certificate_authority'})\n updates = None\n old_validation_details = {}\n operation = \"addition\"\n provider_id = self.add_new_provider(provider_name, provider_type,\n endpoints, zone_id, provider_region)\n roles_with_changes = [e['endpoint']['role'] for e in endpoints]\n\n if validate_provider_auth:\n authtypes_to_verify = []\n for e in endpoints:\n if e['endpoint']['role'] in roles_with_changes:\n authtypes_to_verify.append(e['authentication']['authtype'])\n result, details = self.verify_authenticaion_validation(provider_id, old_validation_details, authtypes_to_verify)\n else:\n result = \"Skipped Validation\"\n details = result\n\n if result == \"Invalid\":\n self.module.fail_json(msg=\"Failed to Validate provider authentication after {operation}. details: {details}\".format(operation=operation, details=details))\n elif result == \"Valid\" or result == \"Skipped Validation\":\n if initiate_refresh:\n self.refresh_provider(provider_id)\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}. Refreshing provider inventory\".format(operation=operation, provider=provider_name, validation=details)\n else:\n message = \"Successful {operation} of {provider} provider. Authentication: {validation}.\".format(operation=operation, provider=provider_name, validation=details)\n elif result == \"Timed out\":\n message = \"Provider {provider} validation after {operation} timed out. Authentication: {validation}\".format(operation=operation, provider=provider_name, validation=details)\n return dict(\n provider_id=provider_id,\n changed=self.changed,\n msg=message,\n updates=updates\n )", "def install(self, provider):\n pass # pragma: no cover", "def registered_providers():\n return list(_DEFAULT_PROVIDER.providers)", "def register_entrypoints(self):\n for entrypoint in entrypoints.get_group_all(\"mlflow.run_context_provider\"):\n try:\n self.register(entrypoint.load())\n except (AttributeError, ImportError) as exc:\n warnings.warn(\n 'Failure attempting to register context provider \"{}\": {}'.format(\n entrypoint.name, str(exc)\n ),\n stacklevel=2\n )", "def _add_metrics_to_metrics_provider(cls, mp, metrics):\n providers_info = cls._METRICS_PROVIDER_INFO[mp.type][mp.namespace][\"providers\"]\n provided_metrics = next(\n provider_info[\"provided_metrics\"]\n for provider_info in providers_info\n if provider_info[\"name\"] == mp.name\n )\n\n # Check if the provided metrics are equal to the metrics\n num_metrics = len(metrics)\n if len(provided_metrics) != num_metrics:\n raise ValueError(\n f\"Found {len(provided_metrics)} metrics for metrics provider \"\n f\"{mp.name}. Expected {num_metrics}.\"\n )\n\n # Check what type of provider is used at the moment\n if mp.type == MetricsProviderType.STATIC:\n valued_metric_class = StaticMetric\n elif mp.type == MetricsProviderType.PROMETHEUS:\n valued_metric_class = PrometheusMetric\n else:\n raise NotImplementedError()\n # Iterate through the provided metrics\n valued_metrics = []\n for i, (metric_name, metric_value) in enumerate(provided_metrics):\n metric = metrics[i]\n if metric.mp_metric_name != metric_name:\n msg = (\n f\"Unexpected name {metric.mp_metric_name}. Expected: {metric_name}.\"\n )\n raise ValueError(msg)\n valued_metric = valued_metric_class(metric, metric_value)\n valued_metrics.append(valued_metric)\n mp.set_valued_metrics(valued_metrics)", "def register_provides(provider_protocol, protocol):\n manager = get_global_adaptation_manager()\n return manager.register_provides(provider_protocol, protocol)", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def register(self, key: Hashable, provider: Callable):\n self._bindings[key] = provider\n self._logger(f\"{self} registered provider {key} = {provider}\")", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def get_providers(obj):\n\n return scan_methods(obj, lambda attr: attr.check(Tags.PROVIDER))", "def register_method(method, clazz, *args_editor):\n \n methods = _methods.get(clazz)\n if methods: methods.append((method, args_editor))\n else: _methods[clazz] = [(method, args_editor)]", "def register_other_tools(self):\n self.add_tool(SaveAsTool)\n self.add_tool(CopyToClipboardTool)\n self.add_tool(PrintTool)\n self.add_tool(HelpTool)", "def set_execution_providers(self, execution_providers=[\"CPUExecutionProvider\"]): # noqa: B006\n self.execution_providers = execution_providers\n self.create_inference_session()", "def register_provider(args):\n if len(args) == 0:\n click.echo(\"Usage: mephisto register <provider_type> --arg1:value --arg2:value\")\n return\n\n from mephisto.core.local_database import LocalMephistoDB\n from mephisto.core.registry import get_crowd_provider_from_type\n from mephisto.core.argparse_parser import parse_arg_dict, get_extra_argument_dicts\n\n provider_type, requester_args = args[0], args[1:]\n args_dict = dict(arg.split(\":\") for arg in requester_args)\n transformed = dict(\n (key, {\"option_string\": key, \"value\": value})\n for (key, value) in args_dict.items()\n )\n\n crowd_provider = get_crowd_provider_from_type(provider_type)\n RequesterClass = crowd_provider.RequesterClass\n\n if len(requester_args) == 0:\n from tabulate import tabulate\n\n params = get_extra_argument_dicts(RequesterClass)\n for param in params:\n click.echo(param[\"desc\"])\n click.echo(tabulate(param[\"args\"].values(), headers=\"keys\"))\n return\n\n try:\n parsed_options = parse_arg_dict(RequesterClass, transformed)\n except Exception as e:\n click.echo(str(e))\n\n if \"name\" not in parsed_options:\n click.echo(\"No name was specified for the requester.\")\n\n db = LocalMephistoDB()\n requesters = db.find_requesters(requester_name=parsed_options[\"name\"])\n if len(requesters) == 0:\n requester = RequesterClass.new(db, parsed_options[\"name\"])\n else:\n requester = requesters[0]\n try:\n requester.register(parsed_options)\n click.echo(\"Registered successfully.\")\n except Exception as e:\n click.echo(str(e))", "def register():\n PLUGINS = dict()\n def decorator(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n value = func(*args, **kwargs)\n PLUGINS[func.__name__] = func\n return value\n return wrapper\n return decorator", "def Inject(self,injector):\n pass", "def provider(self, provider: Provider) -> None:\n self._provider = provider", "def before(hook_name, methods, kwargs):\n for hookimpl in methods:\n self._plugin2calls[hookimpl.plugin].add(hook_name)", "def add(self, replace_types: Iterable[T_StringSerializable] = (), cls: type = None):\n\n def decorator(cls):\n self.types.append(cls)\n for t in replace_types:\n self.replaces.add((t, cls))\n return cls\n\n if cls:\n decorator(cls)\n return\n\n return decorator", "def addCustomTests(self, tests):\n pass", "def _import_custom(self, custom_modules):\n for filter_module in custom_modules:\n info('Loading {}'.format(filter_module))\n funs = module_utils.get_all_functions(filter_module)\n for fun_name, fun in funs.items():\n if fun_name.startswith('function'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding function {}'.format(import_name))\n self._functions[import_name] = fun\n elif fun_name.startswith('filter'):\n import_name = '_'.join(fun_name.split('_')[1:])\n debug('Adding filter {}'.format(import_name))\n self._filters[import_name] = fun", "def AddProviderFlag(parser):\n help_text = \"\"\"\\\n Database provider, for managed databases.\n \"\"\"\n choices = ['RDS', 'CLOUDSQL']\n parser.add_argument('--provider', help=help_text, choices=choices)", "def register(*args, provider_class=IBMQProvider, **kwargs):\n # Try to autodiscover credentials if not passed.\n if not args and not kwargs and provider_class == IBMQProvider:\n kwargs = credentials.discover_credentials().get(\n credentials.get_account_name(IBMQProvider)) or {}\n if not kwargs:\n raise QISKitError(\n 'No IBMQ credentials found. Please pass them explicitly or '\n 'store them before calling register() with store_credentials()')\n\n try:\n provider = provider_class(*args, **kwargs)\n except Exception as ex:\n raise QISKitError(\"Couldn't instantiate provider! Error: {0}\".format(ex))\n\n _DEFAULT_PROVIDER.add_provider(provider)\n return provider", "def _enable(cls, provider):\r\n if provider.NAME in cls._ENABLED:\r\n raise ValueError('Provider %s already enabled' % provider.NAME)\r\n cls._ENABLED[provider.NAME] = provider", "def __new__(mcls, *args, **kwargs):\n provider_type = kwargs[\"metrics_provider\"].spec.type\n return object.__new__(mcls.registry[provider_type])", "def __wrapped_init__(self, *args, **kwargs):\n init(self, *args, **kwargs)\n REGISTRY[name] = self\n register_event_callbacks(self)\n\n resolve_future_dependencies(__provider_name=name)", "def __init__(self, allow_replace=False):\n self.providers = {}\n self.allow_replace = allow_replace", "def provider_features(self, provider_config=None):\n pcr_class = self.server.message_factory.get_response_type(\n \"configuration_endpoint\"\n )\n\n _provider_info = pcr_class(**self.default_capabilities)\n _provider_info[\"scopes_supported\"] = self.scopes\n\n sign_algs = list(jws.SIGNER_ALGS.keys())\n sign_algs.remove(\"none\")\n sign_algs = sorted(sign_algs, key=cmp_to_key(sort_sign_alg))\n\n _pat1 = \"{}_endpoint_auth_signing_alg_values_supported\"\n _pat2 = \"{}_endpoint_auth_methods_supported\"\n for typ in [\"token\", \"revocation\", \"introspection\"]:\n _provider_info[_pat1.format(typ)] = sign_algs\n _provider_info[_pat2.format(typ)] = AUTH_METHODS_SUPPORTED\n\n if provider_config:\n _provider_info.update(provider_config)\n\n return _provider_info", "def get_providers(providers: list, provider_type: str = 'Author') -> list:\n return [Node('Provider', name=provider, type=provider_type) for provider in providers]", "def get_add_provider(uri):\n for provider in ProviderFactory.get_add_providers():\n try:\n supports = provider.supports(uri) # type: ignore[union-attr]\n except BaseException as e:\n communication.warn(f\"Couldn't test add provider {provider.__class__.__name__}: {e}\")\n else:\n if supports:\n return provider(uri=uri) # type: ignore[call-arg]\n\n raise errors.DatasetProviderNotFound(uri=uri)", "def __init__(self, provider_class, provider_type, label, origin, config):\n super(Provider, self).__init__()\n\n self.created = datetime.datetime.now()\n \"\"\"datetime: The creation time of this document\"\"\"\n\n self.modified = datetime.datetime.now()\n \"\"\"datetime: The last modified time of this document\"\"\"\n\n self.provider_class = ProviderClass(provider_class)\n \"\"\"ProviderClass: The class of provider, either compute or storage\"\"\"\n\n self.provider_type = provider_type\n \"\"\"str: The type (or host) of the provider. (e.g. static, gcloud, etc)\"\"\"\n\n self.label = label\n \"\"\"str: The human-readable provider label\"\"\"\n\n self.origin = origin\n \"\"\"dict: The origin (e.g. user) of the provider\"\"\"\n\n self.config = config\n \"\"\"dict: The provider-specific configuration\"\"\"", "def register_curve_tools(self):\n self.add_tool(SignalStatsTool)\n self.add_tool(AntiAliasingTool)\n self.add_tool(AxisScaleTool)", "def add_hook(self, method, args=None, kwargs=None):\n self.hook.append((method, args, kwargs))", "def add_methods(self):\n for name in self.WRAP_METHODS_LIST: self.add_method_list(name)\n for name in self.WRAP_METHODS_NDA: self.add_method_nda(name)\n for name in self.WRAP_METHODS_2NDA: self.add_method_double_nda(name)", "def inject(self, request: BaseRequest, args_view: list, kwargs_view: dict):", "def test_all_providers(self, mock_provider, monkeypatch):\n\n def mock_providers():\n return ['mock']\n\n monkeypatch.setattr(notifiers, 'all_providers', mock_providers)\n\n assert 'mock' in notifiers.all_providers()", "def serviceProvider(self, iTag, srvType, cb, *args):\r\n if cb and not callable(cb):\r\n raise TypeError('Callback has to be callable.')\r\n\r\n return ServiceProvider(self, iTag, srvType, cb, args)", "def _add_config_arg(self, type_, content_type, name, default=None,\n required=False, methods=ALL_HTTP_METHODS):\n if methods == '*':\n methods = ALL_HTTP_METHODS\n arg = type_(methods, content_type, name, default, required)\n for method in methods:\n differentiator = (method, content_type)\n if not self.contains(type_, differentiator):\n self.register(type_, Registry(), differentiator)\n registry = self.get(type_, differentiator)\n registry.register(type_, arg, name)", "def add_member_function(cls, methodName, newMethod):\n cls.add_registration_code('def(\"%s\",%s)'%(methodName, newMethod), True)", "def resolve_future_dependencies(__provider_name=None):\n new_providers = dict()\n if __provider_name:\n # A provider was registered, so take care of any objects depending on\n # it.\n targets = _future_dependencies.pop(__provider_name, [])\n targets.extend(_future_optionals.pop(__provider_name, []))\n\n for target in targets:\n setattr(target, __provider_name, REGISTRY[__provider_name])\n\n return\n\n # Resolve optional dependencies, sets the attribute to None if there's no\n # provider registered.\n for dependency, targets in six.iteritems(_future_optionals.copy()):\n provider = REGISTRY.get(dependency)\n if provider is None:\n factory = _factories.get(dependency)\n if factory:\n provider = factory()\n REGISTRY[dependency] = provider\n new_providers[dependency] = provider\n for target in targets:\n setattr(target, dependency, provider)\n\n # Resolve future dependencies, raises UnresolvableDependencyException if\n # there's no provider registered.\n try:\n for dependency, targets in six.iteritems(_future_dependencies.copy()):\n if dependency not in REGISTRY:\n # a Class was registered that could fulfill the dependency, but\n # it has not yet been initialized.\n factory = _factories.get(dependency)\n if factory:\n provider = factory()\n REGISTRY[dependency] = provider\n new_providers[dependency] = provider\n else:\n raise UnresolvableDependencyException(dependency)\n\n for target in targets:\n setattr(target, dependency, REGISTRY[dependency])\n finally:\n _future_dependencies.clear()\n return new_providers", "def add_loaders(self, loaders):\n # type: (List[AbstractTemplateLoader]) -> None\n self.runtime_configuration_builder.add_loaders(loaders)", "def svc_provider(self, svc_provider):\n\n self._svc_provider = svc_provider", "def _add_method(cls: type) -> Callable:\n\n def decorator(func):\n func.enable = lambda: _method_enable(\n cls, [_plugin_funcname(func)], func\n )\n func.disable = lambda: _method_disable(\n cls, [_plugin_funcname(func)], func\n )\n return func\n\n return decorator", "def watch_providers(self, **kwargs):\n\n path = self._get_movie_id_path('watch_providers')\n resp = self._get_method(path, kwargs)\n return resp", "def register(cls):\n register(cls, cls.provided_class)", "def add_new_provider(self, provider_name, provider_type, endpoints, zone_id, provider_region):\n try:\n result = self.client.post(self.providers_url, name=provider_name,\n type=ManageIQProvider.PROVIDER_TYPES[provider_type],\n zone={'id': zone_id},\n connection_configurations=endpoints,\n provider_region=provider_region)\n provider_id = result['results'][0]['id']\n self.changed = True\n except Exception as e:\n self.module.fail_json(msg=\"Failed to add provider. Error: {!r}\".format(e))\n return provider_id", "def add_adapter(self, func):\n self.adapter = func", "def addAllFactories(self) -> None:\n ...", "def get_providers(cls, values=None):\n rv = super(PaymentGatewaySelf, cls).get_providers()\n self_record = ('self', 'Self')\n if self_record not in rv:\n rv.append(self_record)\n return rv", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def provider_and_mock_two(monkeypatch, provider_two, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_two, 'make_provider', mock_make_provider)\n return provider_two, mock_inner_provider", "def inject(self, *gens):\n raise NotImplementedError", "def install():\n ArticleDataProvider.register()\n ProductDataProvider.register()", "def add_custom(self, func: Callable, opset: OpsetVersion) -> None:\n self._functions.add_custom(opset, func)", "def extend(self, *args, **kwargs): # real signature unknown\n pass", "def get_providers(self):\n return [\"Rainfall\", \"Average Rainfall Sea\", \"Average Rainfall Land\"]", "def add_plugin(self, plugin, callables, jobs, shutdowns, urls) -> None:\n self._plugins[plugin.name] = plugin\n self.register_callables(callables)\n self.register_jobs(jobs)\n self.register_shutdowns(shutdowns)\n self.register_urls(urls)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def get_providers(self):\n return [\"Temperature\", \"Average Temperature Sea\", \"Average Temperature Land\"]", "def test_register(self):\n self._configure_testshib_provider()\n self._test_register()", "def register(check_environ=False):\n from mundi.loader import register\n from mundi.types.region import REGION_PLUGINS\n\n if check_environ:\n import os\n\n if os.environ.get(\"MUNDI_DEMOGRAPHY\", \"on\").lower() in (\"off\", \"false\", \"no\"):\n return\n\n for k, v in FUNCTIONS.items():\n register(k, v)\n\n REGION_PLUGINS[\"population\"] = lambda x: population(x.id)\n REGION_PLUGINS[\"age_distribution\"] = lambda x: age_distribution(x.id)\n REGION_PLUGINS[\"age_pyramid\"] = lambda x: age_pyramid(x.id)", "def add_to_apply_calls(self, func, *args, **kwargs):\n pass", "def provides(self):\n raise NotImplementedError()", "def __init__(self, methods=None):\n methods = methods or {}\n for name, method in methods.items():\n setattr(self, name, method)", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def register(self, callback, filters = []):\n\t\tself.callbacks.append((callback, filters))\n\t\tself.events[str(callback)] = []", "def provider_list(cls, args, config):\n # print \"MOLNSProvider.provider_list(args={0}, config={1})\".format(args, config)\n providers = config.list_objects(kind='Provider')\n if len(providers) == 0:\n print \"No providers configured\"\n else:\n table_data = []\n for p in providers:\n table_data.append([p.name, p.type])\n # table_print(['name', 'type'], table_data)\n r = {'type': 'table', 'column_names': ['name', 'type'], 'data': table_data}\n return r", "def provider_and_mock_one(monkeypatch, provider_one, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_one, 'make_provider', mock_make_provider)\n return provider_one, mock_inner_provider", "def inject_extensions(self, extensions: Dict[str, str]) -> None:\n self.extensions = extensions", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def get_providers(cls, values=None):\n rv = super(PaymentGatewayStripe, cls).get_providers()\n stripe_record = ('stripe', 'Stripe')\n if stripe_record not in rv:\n rv.append(stripe_record)\n return rv", "def create_data_providers():\n prov_dict = {}\n with custom_mp_config(\n get_test_data_path().parent.joinpath(\"msticpyconfig-test.yaml\")\n ):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=UserWarning)\n if _KQL_IMP_OK:\n prov_dict[\"az_sent_prov\"] = QueryProvider(\"MSSentinel\")\n prov_dict[\"mdatp_prov\"] = QueryProvider(\"MDE\")\n if _SPLUNK_IMP_OK:\n prov_dict[\"splunk_prov\"] = QueryProvider(\"Splunk\")\n prov_dict[\"ti_lookup\"] = TILookup()\n prov_dict[\"geolite\"] = GeoLiteLookup()\n\n if _IPSTACK_IMP_OK:\n prov_dict[\"ip_stack\"] = ip_stack_cls()\n return prov_dict", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def decorate(self, alias, *decorators):\n pfunc = getattr(self, alias)\n method, args, kargs = pfunc.func, pfunc.args, pfunc.keywords\n for decorator in decorators:\n method = decorator(method)\n self.register(alias, method, *args, **kargs)", "async def init_provider(self):\n self.dsp_name = \"OpenStack\"\n await self._provider.init(image_names=self.config[\"images\"].values())", "def build_provider(self) -> Provider:\n return self._provider", "def add_users(self, *users):\r\n pass", "def add_extra_login(request):\n context = RequestContext(request, {\n 'providers': provider_details()})\n return render(request, 'add-login.html', context)", "def add(*args, **kwargs): # real signature unknown\n pass" ]
[ "0.763159", "0.6670083", "0.66017246", "0.6336766", "0.62416", "0.60363275", "0.5885727", "0.5830474", "0.57857096", "0.5718912", "0.56462735", "0.56242216", "0.5617124", "0.5603441", "0.56022394", "0.5557451", "0.55536884", "0.5521999", "0.55216455", "0.55036163", "0.5493164", "0.5473934", "0.54666084", "0.5431114", "0.536523", "0.53495544", "0.5344601", "0.5293676", "0.5287814", "0.5260995", "0.5203276", "0.51965356", "0.51914227", "0.5184309", "0.518037", "0.51718366", "0.5168891", "0.51527995", "0.51436156", "0.51399094", "0.51373094", "0.5118895", "0.51037294", "0.51036704", "0.51002294", "0.5082266", "0.5072958", "0.5066481", "0.5065135", "0.50564194", "0.5051964", "0.5041869", "0.5002943", "0.49995434", "0.49934173", "0.49917632", "0.4977875", "0.49770805", "0.49765784", "0.49419668", "0.49416912", "0.49347344", "0.49331057", "0.49190065", "0.49078757", "0.49073532", "0.49002412", "0.48952842", "0.4888113", "0.48758018", "0.4872559", "0.48714754", "0.48705965", "0.48684844", "0.4863485", "0.48610446", "0.48565948", "0.48519033", "0.48409486", "0.48393652", "0.4823315", "0.48194328", "0.48068815", "0.48024666", "0.4800771", "0.47969612", "0.47968647", "0.47904798", "0.47873688", "0.47824967", "0.47767428", "0.4775224", "0.47736895", "0.4769578", "0.47607934", "0.47545236", "0.47496298", "0.4748269", "0.47300255", "0.47194463" ]
0.64158905
3
Convert field parameters to/from a message to python type parameters which do not contain Fields are converted to python type
def get_field_parameters(self, in_parms): if len(in_parms) == 0: # Check if there are params return None # If that's the case, return None values = [] # Empty values is_msg = False # Check if the param is a message for parm in in_parms: # Loop over params if parm.type == "Field": # If it is a message is_msg = True # Set is_message to true continue # Go to top of loop _type = eval(parm.type) # create a type object value = _type(parm.value) # Create the value, and cast it to the type values.append(value) # Add that into the parameters if is_msg is True: # check if is a message return in_parms # Return input params elif len(values) == 1: # If there is only one element return values[-1] # Return just that element else: # Otherwise return values # Return the params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_type(self, value, schema_type, **kwargs):", "def _ConvertFieldValuePair(self, js, message, path):\n names = []\n message_descriptor = message.DESCRIPTOR\n fields_by_json_name = dict((f.json_name, f)\n for f in message_descriptor.fields)\n for name in js:\n try:\n field = fields_by_json_name.get(name, None)\n if not field:\n field = message_descriptor.fields_by_name.get(name, None)\n if not field and _VALID_EXTENSION_NAME.match(name):\n if not message_descriptor.is_extendable:\n raise ParseError(\n 'Message type {0} does not have extensions at {1}'.format(\n message_descriptor.full_name, path))\n identifier = name[1:-1] # strip [] brackets\n # pylint: disable=protected-access\n field = message.Extensions._FindExtensionByName(identifier)\n # pylint: enable=protected-access\n if not field:\n # Try looking for extension by the message type name, dropping the\n # field name following the final . separator in full_name.\n identifier = '.'.join(identifier.split('.')[:-1])\n # pylint: disable=protected-access\n field = message.Extensions._FindExtensionByName(identifier)\n # pylint: enable=protected-access\n if not field:\n if self.ignore_unknown_fields:\n continue\n raise ParseError(\n ('Message type \"{0}\" has no field named \"{1}\" at \"{2}\".\\n'\n ' Available Fields(except extensions): \"{3}\"').format(\n message_descriptor.full_name, name, path,\n [f.json_name for f in message_descriptor.fields]))\n if name in names:\n raise ParseError('Message type \"{0}\" should not have multiple '\n '\"{1}\" fields at \"{2}\".'.format(\n message.DESCRIPTOR.full_name, name, path))\n names.append(name)\n value = js[name]\n # Check no other oneof field is parsed.\n if field.containing_oneof is not None and value is not None:\n oneof_name = field.containing_oneof.name\n if oneof_name in names:\n raise ParseError('Message type \"{0}\" should not have multiple '\n '\"{1}\" oneof fields at \"{2}\".'.format(\n message.DESCRIPTOR.full_name, oneof_name,\n path))\n names.append(oneof_name)\n\n if value is None:\n if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE\n and field.message_type.full_name == 'google.protobuf.Value'):\n sub_message = getattr(message, field.name)\n sub_message.null_value = 0\n elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM\n and field.enum_type.full_name == 'google.protobuf.NullValue'):\n setattr(message, field.name, 0)\n else:\n message.ClearField(field.name)\n continue\n\n # Parse field value.\n if _IsMapEntry(field):\n message.ClearField(field.name)\n self._ConvertMapFieldValue(value, message, field,\n '{0}.{1}'.format(path, name))\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n message.ClearField(field.name)\n if not isinstance(value, list):\n raise ParseError('repeated field {0} must be in [] which is '\n '{1} at {2}'.format(name, value, path))\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n # Repeated message field.\n for index, item in enumerate(value):\n sub_message = getattr(message, field.name).add()\n # None is a null_value in Value.\n if (item is None and\n sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):\n raise ParseError('null is not allowed to be used as an element'\n ' in a repeated field at {0}.{1}[{2}]'.format(\n path, name, index))\n self.ConvertMessage(item, sub_message,\n '{0}.{1}[{2}]'.format(path, name, index))\n else:\n # Repeated scalar field.\n for index, item in enumerate(value):\n if item is None:\n raise ParseError('null is not allowed to be used as an element'\n ' in a repeated field at {0}.{1}[{2}]'.format(\n path, name, index))\n getattr(message, field.name).append(\n _ConvertScalarFieldValue(\n item, field, '{0}.{1}[{2}]'.format(path, name, index)))\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n if field.is_extension:\n sub_message = message.Extensions[field]\n else:\n sub_message = getattr(message, field.name)\n sub_message.SetInParent()\n self.ConvertMessage(value, sub_message, '{0}.{1}'.format(path, name))\n else:\n if field.is_extension:\n message.Extensions[field] = _ConvertScalarFieldValue(\n value, field, '{0}.{1}'.format(path, name))\n else:\n setattr(\n message, field.name,\n _ConvertScalarFieldValue(value, field,\n '{0}.{1}'.format(path, name)))\n except ParseError as e:\n if field and field.containing_oneof is None:\n raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))\n else:\n raise ParseError(str(e))\n except ValueError as e:\n raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))\n except TypeError as e:\n raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))", "def _ConvertGenericMessage(self, value, message, path):\n # Duration, Timestamp, FieldMask have a FromJsonString method to do the\n # conversion. Users can also call the method directly.\n try:\n message.FromJsonString(value)\n except ValueError as e:\n raise ParseError('{0} at {1}'.format(e, path))", "def pack_field(self, value):\n if isinstance(value, str):\n return self.pack_str(value)\n elif isinstance(value, unicode):\n return self.pack_unicode(value, self.charset, self.errors)\n elif isinstance(value, int):\n return self.pack_int(value)\n elif isinstance(value, long):\n return self.pack_long(value)\n else:\n raise TypeError(\"Invalid argument type '%s'. Only 'str', 'int' or long expected\" % (type(value).__name__))", "def test_to_message_from_dto(self):\n fields = [('id', None)]\n FooEvent = message_factory(NamedTuple('FooEvent', fields))\n dto = FooEvent(id=1)\n message = to_message_from_dto(dto)\n\n assert message['class'] == 'FooEvent'\n assert message['data']['id'] == 1", "def _ConvertValueMessage(self, value, message, path):\n if isinstance(value, dict):\n self._ConvertStructMessage(value, message.struct_value, path)\n elif isinstance(value, list):\n self._ConvertListValueMessage(value, message.list_value, path)\n elif value is None:\n message.null_value = 0\n elif isinstance(value, bool):\n message.bool_value = value\n elif isinstance(value, str):\n message.string_value = value\n elif isinstance(value, _INT_OR_FLOAT):\n message.number_value = value\n else:\n raise ParseError('Value {0} has unexpected type {1} at {2}'.format(\n value, type(value), path))", "def test_from_message_to_dto(self, mock_factory):\n message = Message({'class': 'FooClass', 'data': {'foo': 'bar'}})\n from_message_to_dto(message)\n\n assert mock_factory.call_args[0][0].__name__ == 'FooClass'\n assert mock_factory.call_args[0][0]._fields == ('foo', 'Meta')", "def _ConvertWrapperMessage(self, value, message, path):\n field = message.DESCRIPTOR.fields_by_name['value']\n setattr(\n message, 'value',\n _ConvertScalarFieldValue(value, field, path='{0}.value'.format(path)))", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def testWrongTypeAssignment(self):\n self.assertRaises(messages.ValidationError,\n protojson.decode_message,\n MyMessage, '{\"a_string\": 10}')", "def _convert_field_type(row):\n return row", "def validate_fields(cls, message_type: str, attachment_data: dict) -> None:", "def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type", "def normalize_blob_fields(blob_fields, params):\n new_blob_fields = {}\n if isinstance(blob_fields, (str, unicode)):\n blob_fields = [blob_fields]\n if isinstance(blob_fields, (list, tuple)):\n for bf in blob_fields:\n field_name = None\n field_type = None\n if isinstance(bf, dict):\n field_name = bf.get(\"field_name\")\n field_type = bf.get(\"field_type\")\n elif isinstance(bf, (str, unicode)):\n field_name = bf\n else:\n raise Exception(\"Unsupported blob field config type %s.\" % type(bf))\n if field_type == None:\n if isinstance(params.get(field_name), (str, unicode)):\n field_type = \"clob\"\n else:\n field_type = \"blob\"\n elif isinstance(field_type, (str, unicode)):\n if field_type in [\"BLOB\", \"CLOB\", \"blob\", \"clob\"]:\n field_type = field_type.lower()\n else:\n raise Exception(\"Unsuported lob type %s.\" % field_type)\n else:\n raise Exception(\"Unsuported field_type %s.\" % type(field_type))\n new_blob_fields[field_name] = field_type\n return new_blob_fields\n elif isinstance(blob_fields, dict):\n return blob_fields\n else:\n raise Exception(\"Unsuported blob_fields types %s.\" % type(blob_fields))", "def _convert(string, type, message):\n try:\n return type(string)\n except ValueError as e:\n print(e)\n raise CharmmPSFError('Could not convert %s' % message)", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field", "def convert_fields(fields, _fields):\n mapper = {\n \"id\": \"local_id\",\n \"local_id\": \"id\"\n }\n fields = deepcopy(fields)\n for field in fields:\n if field['name'] in _fields:\n field['name'] = mapper[field['name']]\n return fields", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def convert_json_field_to_generic_scalar(field, registry=None):\n\n return OperationArgsType(description=field.help_text, required=not field.null)", "def _RegularMessageToJsonObject(self, message, js):\n fields = message.ListFields()\n\n try:\n for field, value in fields:\n if self.preserving_proto_field_name:\n name = field.name\n else:\n name = field.json_name\n if _IsMapEntry(field):\n # Convert a map field.\n v_field = field.message_type.fields_by_name['value']\n js_map = {}\n for key in value:\n if isinstance(key, bool):\n if key:\n recorded_key = 'true'\n else:\n recorded_key = 'false'\n else:\n recorded_key = str(key)\n js_map[recorded_key] = self._FieldToJsonObject(\n v_field, value[key])\n js[name] = js_map\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n # Convert a repeated field.\n js[name] = [self._FieldToJsonObject(field, k)\n for k in value]\n elif field.is_extension:\n name = '[%s]' % field.full_name\n js[name] = self._FieldToJsonObject(field, value)\n else:\n js[name] = self._FieldToJsonObject(field, value)\n\n # Serialize default value if including_default_value_fields is True.\n if self.including_default_value_fields:\n message_descriptor = message.DESCRIPTOR\n for field in message_descriptor.fields:\n # Singular message fields and oneof fields will not be affected.\n if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and\n field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or\n field.containing_oneof):\n continue\n if self.preserving_proto_field_name:\n name = field.name\n else:\n name = field.json_name\n if name in js:\n # Skip the field which has been serialized already.\n continue\n if _IsMapEntry(field):\n js[name] = {}\n elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:\n js[name] = []\n else:\n js[name] = self._FieldToJsonObject(field, field.default_value)\n\n except ValueError as e:\n raise SerializeToJsonError(\n 'Failed to serialize {0} field: {1}.'.format(field.name, e))\n\n return js", "def feast_value_type_to_python_type(field_value_proto: ProtoValue) -> Any:\n field_value_dict = MessageToDict(field_value_proto)\n\n for k, v in field_value_dict.items():\n if k == \"int64Val\":\n return int(v)\n if k == \"bytesVal\":\n return bytes(v)\n if (k == \"int64ListVal\") or (k == \"int32ListVal\"):\n return [int(item) for item in v[\"val\"]]\n if (k == \"floatListVal\") or (k == \"doubleListVal\"):\n return [float(item) for item in v[\"val\"]]\n if k == \"stringListVal\":\n return [str(item) for item in v[\"val\"]]\n if k == \"bytesListVal\":\n return [bytes(item) for item in v[\"val\"]]\n if k == \"boolListVal\":\n return [bool(item) for item in v[\"val\"]]\n\n if k in [\"int32Val\", \"floatVal\", \"doubleVal\", \"stringVal\", \"boolVal\"]:\n return v\n else:\n raise TypeError(\n f\"Casting to Python native type for type {k} failed. \"\n f\"Type {k} not found\"\n )", "def _decode(self, msgCls, data):\r\n rosMsg = msgCls()\r\n\r\n for (slotName, slotType) in zip(rosMsg.__slots__, rosMsg._slot_types):\r\n if slotName not in data:\r\n continue\r\n\r\n if '[]' == slotType[-2:]:\r\n listBool = True\r\n slotType = slotType[:-2]\r\n else:\r\n listBool = False\r\n\r\n field = data[slotName]\r\n\r\n if listBool and not isinstance(field, (list, tuple)):\r\n raise TypeError('Given data does not match the definition of '\r\n 'the ROS message.')\r\n\r\n if slotType == 'string':\r\n convFunc = _stringify\r\n elif slotType in self._BASE_TYPES:\r\n convFunc = self._BASE_TYPES[slotType]\r\n elif slotType in self._SPECIAL_TYPES:\r\n convFunc = self._SPECIAL_TYPES[slotType]().decode\r\n elif slotType in self._customTypes and _checkIsStringIO(field):\r\n convFunc = self._customTypes[slotType][0]().decode\r\n else:\r\n convFunc = partial(self._decode,\r\n self._loader.loadMsg(*slotType.split('/')))\r\n\r\n if listBool:\r\n convFunc = partial(map, convFunc)\r\n\r\n setattr(rosMsg, slotName, convFunc(field))\r\n\r\n return rosMsg", "def _to_known_field(cls, field_name: str, value) -> (Column, dict):\n field_names = field_name.split(\".\", maxsplit=1)\n if len(field_names) == 2:\n for field in cls.__fields__:\n if field.name == field_names[0] and field.field_type == dict:\n return field, {field_names[1]: value}\n return None, None", "async def load_message(obj, msg_type, msg=None, field_archiver=None):\n msg = msg_type() if msg is None else msg\n\n fields = msg_type.f_specs() if msg_type else msg.__class__.f_specs()\n for field in fields:\n await load_message_field(obj, msg, field, field_archiver=field_archiver)\n\n return msg", "def __init__(self, raw_field: Dict):\n self.name = raw_field.get(\"name\")\n self.description = raw_field.get(\"description\")\n self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get(\"args\", []))\n self.type: TypeDefer = TypeDefer(raw_field.get(\"type\")) if raw_field.get(\"type\") is not None else None\n self.is_deprecated: bool = raw_field.get(\"isDeprecated\")\n self.deprecation_reason: str = raw_field.get(\"deprecationReason\")", "def __set__(self, message_instance, value):\n t = self.type\n if isinstance(t, type) and issubclass(t, Message):\n if self.repeated:\n if value and isinstance(value, (list, tuple)):\n value = [(t(**v) if isinstance(v, dict) else v)\n for v in value]\n elif isinstance(value, dict):\n value = t(**value)\n super(MessageField, self).__set__(message_instance, value)", "def _cast_field(self, cast_to, value):\n if cast_to in (int, long, str):\n return cast_to(value)\n elif cast_to == unicode:\n try:\n value = value.decode(self.charset, self.errors)\n except UnicodeEncodeError, e:\n raise InvalidData(\"Error encoding unicode value '%s': %s\" % (repr(value), e))\n\n return value\n elif cast_to in (any, bytes):\n return value\n else:\n raise TypeError(\"Invalid field type %s\" % (cast_to))", "def _decode_field(self, field_type, field_data, subcontent=None):\n # check wire type\n wt_schema = self.__class__.FIELD_WIRE_TYPE[field_type]\n wt_data = field_data['wire_type']\n if wt_schema != wt_data:\n raise TypeError(\n 'Wire type mismatch (expect {0} but got {1})'\\\n .format(wt_schema, wt_data)\n )\n\n field_decoded = None\n\n # the actual decoding process\n # nested structure\n if field_type == 'a' and subcontent:\n self.logger.debug('_decode_field(): nested field begin')\n if self._kv_fmt:\n field_decoded = dict(self._decode_wire(\n io.BytesIO(field_data['data']),\n subcontent\n ))\n else:\n field_decoded = tuple(self._decode_wire(\n io.BytesIO(field_data['data']),\n subcontent\n ))\n self.logger.debug('_decode_field(): nested field end')\n\n # string, unsigned vint (2sc)\n elif field_type in 'aT':\n field_decoded = field_data['data']\n\n # unicode\n elif field_type in 'U':\n field_decoded = field_data['data'].decode('utf-8')\n\n # vint (zigzag)\n elif field_type == 'z':\n field_decoded = self._vint_dezigzagify(field_data['data'])\n\n # signed 2sc\n elif field_type == 't':\n field_decoded = self._vint_2sctosigned(field_data['data'])\n\n # fixed, float, double\n elif field_type in 'iIfdqQ':\n field_decoded = struct.unpack(\n '<{0}'.format(field_type), field_data['data']\n )[0]\n\n # boolean\n elif field_type == 'b':\n if field_data['data'] == 0:\n field_decoded = False\n else:\n field_decoded = True\n\n return field_decoded", "def message_to_type(raw: bytes) -> Message:\n try:\n return __type_to_message[raw[0]]\n except KeyError:\n raise InvalidType()", "def convert_data_types(fields, src_db='mysql', dest_db='postgres'):\n\n data_type_map = {\n 'mysql': {\n 'postgres': {\n 'date': 'date',\n 'tinyint': 'smallint',\n 'smallint': 'smallint',\n 'mediumint': 'integer',\n 'int': 'bigint',\n 'bigint': 'numeric',\n 'float': 'real',\n 'double': 'double precision',\n 'tinytext': 'varchar',\n 'mediumtext': 'varchar',\n 'longtext': 'varchar',\n 'varchar': 'varchar',\n 'text': 'varchar',\n 'char': 'char',\n 'binary': 'bytea',\n 'varbinary': 'bytea',\n 'tinyblob': 'bytea',\n 'blob': 'bytea',\n 'mediumblob': 'bytea',\n 'longblob': 'bytea',\n 'datetime': 'timestamp',\n 'time': 'time',\n 'decimal': 'decimal',\n 'json': 'jsonb'\n }\n }\n }\n\n for elem in fields:\n elem['data_type'] = data_type_map[src_db][dest_db][elem['data_type']]\n\n if elem['data_type'] == 'decimal':\n elem['data_type'] += f'''{int(elem['numeric_precision']), int(elem['numeric_scale'])}'''\n\n fields = {e['column_name']: e['data_type'] for e in fields}\n\n return fields", "def _decode_message(self, label: str, buf, typedef=None, pos=0, end=None, group=False):\n print(str(pos) + \" decode_message \" + label)\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n output = {}\n\n while pos < end:\n oldpos = pos\n tag, pos = decoder._DecodeVarint(buf, pos)\n try:\n field_number, wire_type = wire_format.UnpackTag(tag)\n except Exception as exc:\n raise (ValueError,\n 'Could not read valid tag at pos %d. Ensure it is a valid protobuf message: %s'\n % (pos-len(tag), exc), sys.exc_info()[2])\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n \n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = self.wire_type_defaults[wire_type]\n field_type = field_typedef['type']\n if self.debug:\n ft = field_type\n if ft == None:\n ft = \"None\"\n print(\"@\" + str(oldpos) + \"-\" + str(pos-1) + \":\" + label + \" field_number \" +\n str(field_number) +\n \" wire_type \" + str(wire_type) +\n \" field_type \" + str(ft))\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type == 'LD':\n field_out, pos = self.decode_message_LD(label, buf, pos, field_typedef)\n elif field_type == 'endGroup':\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise ValueError(\"Found END_GROUP before START_GROUP\")\n # exit out\n return output, typedef, pos\n elif field_type == 'message':\n field_out, pos = self.decode_message_message(\n label, buf, pos, field_typedef, field_number)\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = self.decode_group(\n label, buf, group_typedef, pos)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if self.wiretypes[field_type] != wire_type:\n raise ValueError(\"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type))\n # Simple type, just look up the decoder\n field_out, pos = self.decoders[field_type](buf, pos)\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_number], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_number], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if self.debug:\n print(str(field_key) + \" field_out:\" + str(field_out))\n if pos > end:\n raise decoder._DecodeError(\"Invalid Message Length, pos=\" +\n str(pos) + \" end=\" + str(end))\n # Should never hit here as a group\n if group:\n raise ValueError(\"Got START_GROUP with no END_GROUP.\")\n print(\"decode_message finish \" + str(pos))\n return output, typedef, pos", "def _proto2object(\n proto: UpdateGroupMessage_PB,\n ) -> \"UpdateGroupMessage\":\n\n return UpdateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def mongo_to_python_type(field, data):\n if isinstance(field, ObjectIdField):\n return str(data)\n elif isinstance(field, DecimalField):\n return data\n elif isinstance(field, BooleanField):\n return data\n else:\n return str(data)", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result", "def to_internal(value):\n if isinstance(value, bool):\n return types.Bool((value,))\n if isinstance(value, int):\n return types.Int((value,))\n if isinstance(value, float):\n return types.Float((value,))\n if isinstance(value, six.string_types):\n return types.String((value,))", "def intialize_from_fields(self):\n raise NotImplementedError", "def _ConvertStructMessage(self, value, message, path):\n if not isinstance(value, dict):\n raise ParseError('Struct must be in a dict which is {0} at {1}'.format(\n value, path))\n # Clear will mark the struct as modified so it will be created even if\n # there are no values.\n message.Clear()\n for key in value:\n self._ConvertValueMessage(value[key], message.fields[key],\n '{0}.{1}'.format(path, key))\n return", "def _ConvertMapFieldValue(self, value, message, field, path):\n if not isinstance(value, dict):\n raise ParseError(\n 'Map field {0} must be in a dict which is {1} at {2}'.format(\n field.name, value, path))\n key_field = field.message_type.fields_by_name['key']\n value_field = field.message_type.fields_by_name['value']\n for key in value:\n key_value = _ConvertScalarFieldValue(key, key_field,\n '{0}.key'.format(path), True)\n if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n self.ConvertMessage(value[key],\n getattr(message, field.name)[key_value],\n '{0}[{1}]'.format(path, key_value))\n else:\n getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(\n value[key], value_field, path='{0}[{1}]'.format(path, key_value))", "def testUnrecognizedFieldWrongFormat(self):\n\n class SimpleMessage(messages.Message):\n value = messages.IntegerField(1)\n\n message = SimpleMessage(value=3)\n message.set_unrecognized_field('from_json', 'test', messages.Variant.STRING)\n\n encoded = protobuf.encode_message(message)\n expected = (\n chr((1 << protobuf._WIRE_TYPE_BITS) | protobuf._Encoder.NUMERIC) +\n chr(3))\n self.assertEquals(encoded, expected)", "def testDecodeWrongWireFormat(self):\n class ExpectedProto(messages.Message):\n value = messages.StringField(1)\n\n class WrongVariant(messages.Message):\n value = messages.IntegerField(1)\n\n original = WrongVariant()\n original.value = 10\n self.assertErrorIs(messages.DecodeError,\n 'Expected wire type STRING but found NUMERIC',\n protobuf.decode_message,\n ExpectedProto,\n protobuf.encode_message(original))", "def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue", "def __extract_fields(self):\n for name, stuff in self.data.items():\n if stuff == (): # Empty tuple == 1 bit, value of 0\n self.fields.append(Field(name=name, value=0, size=1))\n elif isinstance(stuff, int): # int == specified value, value of 0\n self.fields.append(Field(name=name, value=0, size=stuff))\n elif isinstance(stuff, str): # str == specified value, value of 0\n pattern = re.compile(\"[0-9]+[bB]\")\n if pattern.match(stuff):\n if \"b\" in stuff: # bits specified\n size = int(stuff[:stuff.lower().index(\"b\")])\n self.fields.append(Field(name=name, value=0, size=size))\n elif \"B\" in stuff: # Bytes specified\n size = int(stuff[:stuff.lower().index(\"b\")]) * 8\n self.fields.append(Field(name=name, value=0, size=size))\n else: # No other string option, so must have been one of the \"vary\" constants from above.\n self.fields.append(Field(name=name, value=stuff, size=\"vary\"))\n elif isinstance(stuff, tuple) or isinstance(stuff, list): # specified value and size.\n if isinstance(stuff[0], str):\n if \"b\" in stuff[0]: # Bits\n size = int(stuff[0][:stuff[0].lower().index(\"b\")])\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif \"B\" in stuff[0]: # Bytes\n size = int(stuff[0][:stuff[0].lower().index(\"b\")]) * 8\n # if not self.__check_bit_size(stuff[1], size):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(size) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=size))\n elif stuff[0].lower() == NULL_TERMINATE:\n self.fields.append(Field(name=name, value=stuff[1], size=NULL_TERMINATE))\n elif stuff[0].lower() == PREFIX_LENGTH:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LENGTH))\n elif stuff[0].lower() == PREFIX_LEN_NULL_TERM:\n self.fields.append(Field(name=name, value=stuff[1], size=PREFIX_LEN_NULL_TERM))\n elif stuff[0].lower() == IPv4:\n self.fields.append(Field(name=name, value=stuff[1], size=IPv4))\n elif isinstance(stuff[0], int):\n # if not self.__check_bit_size(stuff[1], stuff[0]):\n # raise Exception(\"error. \" + str(stuff[1]) + \" cannot be fit in \" + str(stuff[0]) + \" bits.\")\n self.fields.append(Field(name=name, value=stuff[1], size=stuff[0]))", "def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "async def dump_message(obj, msg, field_archiver=None):\n mtype = msg.__class__\n fields = mtype.f_specs()\n\n obj = collections.OrderedDict() if obj is None else get_elem(obj)\n for field in fields:\n await dump_message_field(obj, msg=msg, field=field, field_archiver=field_archiver)\n return obj", "def normalize_params(params):\n # Collect a set of all fields\n fields = set()\n for p in params:\n fields.update(p)\n fields = sorted(fields)\n\n params2 = list(pluck(fields, params, MISSING))\n # Non-basic types (including MISSING) are unique to their id\n tokens = [\n tuple(x if isinstance(x, (int, float, str)) else id(x) for x in p)\n for p in params2\n ]\n\n return fields, tokens, params2", "def _field2parameter(self, field, *, name, location):\n ret = {\"in\": location, \"name\": name}\n\n partial = getattr(field.parent, \"partial\", False)\n ret[\"required\"] = field.required and (\n not partial or (is_collection(partial) and field.name not in partial)\n )\n\n prop = self.field2property(field)\n multiple = isinstance(field, marshmallow.fields.List)\n\n if self.openapi_version.major < 3:\n if multiple:\n ret[\"collectionFormat\"] = \"multi\"\n ret.update(prop)\n else:\n if multiple:\n ret[\"explode\"] = True\n ret[\"style\"] = \"form\"\n if prop.get(\"description\", None):\n ret[\"description\"] = prop.pop(\"description\")\n ret[\"schema\"] = prop\n return ret", "def _proto2object(\n proto: CreateGroupMessage_PB,\n ) -> \"CreateGroupMessage\":\n\n return CreateGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def value_to_message(self, value):\n if not isinstance(value, self.type):\n raise EncodeError('Expected type %s, got %s: %r' %\n (self.type.__name__,\n type(value).__name__,\n value))\n return value", "def _FieldToJsonObject(self, field, value):\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:\n return self._MessageToJsonObject(value)\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:\n if self.use_integers_for_enums:\n return value\n if field.enum_type.full_name == 'google.protobuf.NullValue':\n return None\n enum_value = field.enum_type.values_by_number.get(value, None)\n if enum_value is not None:\n return enum_value.name\n else:\n if field.file.syntax == 'proto3':\n return value\n raise SerializeToJsonError('Enum field contains an integer value '\n 'which can not mapped to an enum value.')\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:\n if field.type == descriptor.FieldDescriptor.TYPE_BYTES:\n # Use base64 Data encoding for bytes\n return base64.b64encode(value).decode('utf-8')\n else:\n return value\n elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:\n return bool(value)\n elif field.cpp_type in _INT64_TYPES:\n return str(value)\n elif field.cpp_type in _FLOAT_TYPES:\n if math.isinf(value):\n if value < 0.0:\n return _NEG_INFINITY\n else:\n return _INFINITY\n if math.isnan(value):\n return _NAN\n if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_FLOAT:\n if self.float_format:\n return float(format(value, self.float_format))\n else:\n return type_checkers.ToShortestFloat(value)\n\n return value", "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def _translate_param_value(self, value, param_spec: dict, upas: dict):\n # if param_spec.text_options.valid_ws_types exists and has entries, then its an object input\n # test if value is an UPA and translate it to get its original object name.\n\n # types:\n field_type = param_spec[\"field_type\"]\n if field_type == \"text\":\n valid_ws_types = param_spec.get(\"text_options\", {}).get(\"valid_ws_types\", [])\n if len(valid_ws_types) > 0 and value:\n if isinstance(value, list):\n value = [upas[v][1] if v in upas else v for v in value]\n else:\n value = upas[value][1] if value in upas else value\n return value", "def convertafield(field_comm, field_val, field_iddname):\n convinidd = ConvInIDD()\n field_typ = field_comm.get(\"type\", [None])[0]\n conv = convinidd.conv_dict().get(field_typ, convinidd.no_type)\n return conv(field_val, field_iddname)", "def _break_down(self, buf, type_override=None, id_override=None):\n assert (id_override is not None and type_override is not None) or\\\n (id_override is None and type_override is None),\\\n 'Field ID and type must be both specified in headerless mode'\n\n while 1:\n field = {}\n if type_override is not None:\n f_type = type_override\n f_id = id_override\n else:\n # if no more data, stop and return\n try:\n f_type, f_id = self._decode_header(buf)\n except EOFError:\n break\n\n self.logger.debug(\n \"_break_down():field #%d pbtype #%d\", f_id, f_type\n )\n try:\n if f_type == 0: # vint\n field['data'] = self._decode_vint(buf)\n elif f_type == 1: # 64-bit\n field['data'] = self._read_fixed(buf, 8)\n elif f_type == 2: # str\n field['data'] = self._decode_str(buf)\n elif f_type == 5: # 32-bit\n field['data'] = self._read_fixed(buf, 4)\n else:\n self.logger.warning(\n \"_break_down():Ignore unknown type #%d\", f_type\n )\n continue\n except EndOfMessage as e:\n if type_override is None or e.partial:\n raise CodecError('Unexpected end of message while decoding field {0}'.format(f_id))\n else:\n break\n field['id'] = f_id\n field['wire_type'] = f_type\n yield field", "def test_proto_plus_to_protobuf_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_proto_plus_to_protobuf, wrong_type\n )", "def make_tuple(*fields):\n fields2 = []\n for (idx, f) in zip(range(len(fields)), fields):\n if isinstance(f, (_StructField, pb.StructField)):\n fields2.append(f)\n if isinstance(f, pb.SQLType):\n fields2.append(pb.StructField(\n field_name=\"_%s\" % str(idx),\n field_type=f))\n raise ValueError(\"Could not understand type %s for %s\" % (type(f), f))\n return StructType(fields2)", "def FromWireFormat(self, value):\n pass", "def convertAttributeProto(onnx_arg):\n if onnx_arg.HasField('f'):\n return onnx_arg.f\n elif onnx_arg.HasField('i'):\n return onnx_arg.i\n elif onnx_arg.HasField('s'):\n return str(onnx_arg.s, 'utf-8') \\\n if sys.version_info[0] >= 3 else onnx_arg.s\n elif onnx_arg.HasField('t'):\n return onnx_arg.t # this is a proto!\n elif onnx_arg.floats:\n return list(onnx_arg.floats)\n elif onnx_arg.ints:\n return list(onnx_arg.ints)\n elif onnx_arg.strings:\n str_list = list(onnx_arg.strings)\n if sys.version_info[0] >= 3:\n str_list = map(lambda x: str(x, 'utf-8'), str_list)\n return str_list\n else:\n raise ValueError(\"Unsupported ONNX attribute: {}\".format(onnx_arg))", "def struct(*fields : SetupVal) -> SetupVal:\n for field in fields:\n if not isinstance(field, SetupVal):\n raise ValueError('struct expected a SetupVal, but got {field!r}')\n return StructVal(list(fields))", "def field_to_object(value):\n mapping = {\n str: StringField,\n int: IntField,\n list: ListField,\n dict: DictField,\n datetime.datetime: DateField,\n }\n return mapping.get(type(value), AnyField)(value)", "def __convert( source ):\n # Just in case things get this far but we don't know about the record\n if source['recordType'] not in definitions.RECORDS:\n return {\n 'rec_type': source['recordType']\n }\n\n # Create a flat wrapper\n record = estreamer.common.Flatdict( source )\n\n # Transform\n output = __selectWithNewKeys( record )\n\n return output", "def __parse_field(self, field, tuple_descriptor, alias_on_complex_types, make_visible):\r\n alias = None\r\n field_type = None\r\n return_type = None\r\n underlying_fields = None\r\n aggregate_factory = None\r\n literal_value = None\r\n func_factory = None\r\n fields_to_verify = []\r\n parsed_fds = []\r\n field_backup = list(field)\r\n self.__clean_list(field)\r\n \r\n # parse aliases if they exist\r\n if (len(field) >= 4) and (field[-2] == QueryTokens.AS):\r\n alias = field[-1]\r\n field = field[:-2]\r\n if (field[0] == QueryTokens.STRING_LITERAL) or \\\r\n (field[0] == QueryTokens.INTEGER_LITERAL) or \\\r\n (field[0] == QueryTokens.FLOAT_LITERAL): \r\n alias = self.unnamed_operator_name()\r\n underlying_fields = []\r\n field_type = FieldType.LITERAL\r\n literal_value = field[1]\r\n if field[0] == QueryTokens.STRING_LITERAL:\r\n return_type = ReturnType.STRING\r\n elif field[0] == QueryTokens.INTEGER_LITERAL:\r\n return_type = ReturnType.INTEGER\r\n literal_value = int(literal_value)\r\n elif field[0] == QueryTokens.FLOAT_LITERAL:\r\n return_type = ReturnType.FLOAT\r\n literal_value = float(literal_value)\r\n elif field[0] == QueryTokens.COLUMN_NAME: # field or alias\r\n if alias == None:\r\n alias = field[1]\r\n field_descriptor = tuple_descriptor.get_descriptor(field[1])\r\n if field_descriptor == None: # underlying field not yet defined. mark to check later\r\n field_type = FieldType.UNDEFINED\r\n underlying_fields = [field[1]]\r\n # check alias and underlying once this process is done to\r\n # find yet-undefined fields\r\n fields_to_verify.append(field[1])\r\n fields_to_verify.append(alias)\r\n else: # field found, copy information\r\n field_type = field_descriptor.field_type\r\n return_type = field_descriptor.return_type\r\n underlying_fields = field_descriptor.underlying_fields\r\n aggregate_factory = field_descriptor.aggregate_factory\r\n func_factory = field_descriptor.func_factory\r\n elif field[0] == QueryTokens.FUNCTION_OR_AGGREGATE: # function or aggregate \r\n if alias == None:\r\n if alias_on_complex_types:\r\n raise QueryException(\"Must specify alias (AS clause) for '%s'\" % (field[1]))\r\n else:\r\n alias = self.unnamed_operator_name()\r\n underlying_field_list = field[2:]\r\n underlying_fields = []\r\n for underlying in underlying_field_list:\r\n (parsed_fd_list, parsed_verify) = self.__parse_field(underlying, tuple_descriptor, False, False)\r\n for parsed_fd in parsed_fd_list:\r\n parsed_fd.visible = False\r\n fields_to_verify.extend(parsed_verify)\r\n parsed_fds.extend(parsed_fd_list)\r\n underlying_fields.append(parsed_fd_list[0].alias)\r\n aggregate_factory = get_aggregate_factory(field[1])\r\n if aggregate_factory != None: # found an aggregate function\r\n field_type = FieldType.AGGREGATE\r\n return_type = ReturnType.FLOAT\r\n else:\r\n function_information = self.function_registry.get_function(field[1])\r\n if function_information != None:\r\n field_type = FieldType.FUNCTION\r\n func_factory = function_information.func_factory\r\n return_type = function_information.return_type\r\n else:\r\n raise QueryException(\"'%s' is neither an aggregate or a registered function\" % (field[1]))\r\n else:\r\n raise QueryException(\"Empty field clause found: %s\" % (\"\".join(field_backup)))\r\n fd = FieldDescriptor(alias, underlying_fields, field_type, return_type, aggregate_factory, func_factory, literal_value)\r\n fd.visible = make_visible\r\n parsed_fds.insert(0, fd)\r\n return (parsed_fds, fields_to_verify)", "def test_make_user_message_all_fields(self):\n msg_helper = MessageHelper()\n msg_fields = {\n 'content': 'outbound message',\n 'from_addr': 'from',\n 'from_addr_type': 'msisdn',\n 'to_addr': 'to',\n 'to_addr_type': 'mxit_id',\n 'group': '#channel',\n 'provider': 'MY-MNO',\n 'session_event': TransportUserMessage.SESSION_NEW,\n 'transport_type': 'irc',\n 'transport_name': 'vuminet',\n 'transport_metadata': {'foo': 'bar'},\n 'helper_metadata': {'foo': {}},\n 'in_reply_to': 'ccf9c2b9b1e94433be20d157e82786fe',\n 'timestamp': datetime.utcnow(),\n 'message_id': 'bbf9c2b9b1e94433be20d157e82786ed',\n 'endpoint': 'foo_ep',\n }\n msg = msg_helper.make_user_message(**msg_fields)\n expected_fields = msg_fields.copy()\n expected_fields.update({\n 'message_type': TransportUserMessage.MESSAGE_TYPE,\n 'message_version': TransportUserMessage.MESSAGE_VERSION,\n 'routing_metadata': {\n 'endpoint_name': expected_fields.pop('endpoint'),\n }\n })\n self.assertEqual(expected_fields, msg.payload)", "async def dump_message_field(obj, msg, field, field_archiver=None):\n fname, ftype, params = field[0], field[1], field[2:]\n fvalue = getattr(msg, fname, None)\n field_archiver = field_archiver if field_archiver else dump_field\n return await field_archiver(eref(obj, fname, True), fvalue, ftype, params)", "def message_to_python(self, raw_message):\n return self.Message(self, raw_message)", "def get_proto_fields():\n raise NotImplementedError()", "def _propagate_types(self):\n pass", "def _ConvertAnyMessage(self, value, message, path):\n if isinstance(value, dict) and not value:\n return\n try:\n type_url = value['@type']\n except KeyError:\n raise ParseError(\n '@type is missing when parsing any message at {0}'.format(path))\n\n try:\n sub_message = _CreateMessageFromTypeUrl(type_url, self.descriptor_pool)\n except TypeError as e:\n raise ParseError('{0} at {1}'.format(e, path))\n message_descriptor = sub_message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if _IsWrapperMessage(message_descriptor):\n self._ConvertWrapperMessage(value['value'], sub_message,\n '{0}.value'.format(path))\n elif full_name in _WKTJSONMETHODS:\n methodcaller(_WKTJSONMETHODS[full_name][1], value['value'], sub_message,\n '{0}.value'.format(path))(\n self)\n else:\n del value['@type']\n self._ConvertFieldValuePair(value, sub_message, path)\n value['@type'] = type_url\n # Sets Any message\n message.value = sub_message.SerializeToString()\n message.type_url = type_url", "async def load_message_field(obj, msg, field, field_archiver=None):\n fname, ftype, params = field[0], field[1], field[2:]\n field_archiver = field_archiver if field_archiver else load_field\n await field_archiver(obj[fname], ftype, params, eref(msg, fname))", "def test_parameters_message(self):\n expected_topic = self.factory.common_topic + WAPMF.PARAMETERS\n values = {\n \"bool_parameter\": False,\n \"int_parameter\": 1,\n \"float_parameter\": 13.37,\n \"string_parameter\": \"foo\",\n }\n expected_payload = json.dumps(values)\n expected_message = Message(expected_topic, expected_payload)\n\n serialized_message = self.factory.make_from_parameters(values)\n\n self.assertEqual(expected_message, serialized_message)", "def _map_onto(self, field_struct, value, options=None):\n if isinstance(value, list):\n # Fill 'repeated' structure\n # a.b = [1, 2]\n # a.b.add() = 1\n # a.b.add() = 2\n for sub in value:\n if hasattr(field_struct, \"add\"):\n nested = field_struct.add()\n # Composite lists will never\n # need to be set by us\n self._map_onto(nested, sub)\n elif hasattr(field_struct, 'append'):\n # Scalar lists will always\n # need to be set by us\n field_struct.append(self._process_value(sub))\n if options:\n self._check_field_length(field_struct, sub, options)\n else:\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto an object/message field.\")\n elif isinstance(value, dict):\n # Fill message structure\n # a.b = {c: 1, d: 2}\n # a.b.c = 1\n # a.b.d = 2\n for key in value:\n nested = getattr(field_struct, key)\n r = self._map_onto(nested, value[key], self._get_options(field_struct, key))\n if r:\n self._checked_set(field_struct, key, r[0])\n elif isinstance(value, tuple):\n # Fill message structure (in order)\n # a.b = (1, 2)\n # a.b.c = 1\n # a.b.d = 2\n if not hasattr(field_struct, 'DESCRIPTOR'):\n raise FieldWrongTypeException(\"Tried to map illegal structure \" +\n str(value) +\n \" onto a list/repeated field.\")\n fields = field_struct.DESCRIPTOR.fields\n for i in range(len(value)):\n nested = getattr(field_struct, fields[i].name)\n r = self._map_onto(nested, value[i], self._get_options(field_struct, fields[i].name))\n if r:\n self._checked_set(field_struct, fields[i].name, r[0])\n else:\n return [self._process_value(value), ]", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def testConvertIntegerToFloat(self):\n message = protojson.decode_message(MyMessage, '{\"a_float\": 10}')\n\n self.assertTrue(isinstance(message.a_float, float))\n self.assertEquals(10.0, message.a_float)", "def _StructMessageToJsonObject(self, message):\n fields = message.fields\n ret = {}\n for key in fields:\n ret[key] = self._ValueMessageToJsonObject(fields[key])\n return ret", "def fpext(self, typ):", "def type_fields(self, res, op_item):\n result = []\n cast_func = {}\n header = res[0]\n for heading in header:\n cast_func[heading] = DataType.str\n\n if \"field_type\" in op_item:\n for f, p in findall(FIELD_TYPE_RE, op_item[\"field_type\"]):\n cast_func[p] = self.dt.get_func(f)\n first = True\n for row in res[1:]:\n new_row = []\n for idx in range(len(header)):\n\n heading = header[idx]\n cur_value = row[idx]\n if type(cur_value) is tuple:\n cur_value = cur_value[1]\n if heading == \"timespan\" and first:\n first = False\n new_row.append((cast_func[heading](cur_value), cur_value))\n\n result.append(new_row)\n\n return [header] + result", "def convertParams(name, params, to_string=False):\r\n \r\n new_params = {}\r\n \r\n for key, value in params.items():\r\n \r\n validator = RadiusAuthRestHandler.FIELD_VALIDATORS.get(key)\r\n\r\n if validator is not None:\r\n if to_string:\r\n new_params[key] = validator.to_string(key, value)\r\n else:\r\n new_params[key] = validator.to_python(key, value)\r\n else:\r\n new_params[key] = value\r\n\r\n return new_params", "def test_to_python(self):\n field = MultiLingualTextField()\n self.assertEqual(None, field.to_python(None), \"to_python of None should always return None.\")\n\n content = json.dumps({\n \"nb\": \"test-nb\",\n \"en\": \"test-en\",\n })\n\n structure = MultiLingualTextStructure(content, use_default_for_empty=True)\n self.assertEqual(structure, field.to_python(structure), \"to_python of a MultiLingualTextStructure object should\"\n \" return the object. As this is already the correct \"\n \"representation.\")\n\n result_from_string = field.to_python(content)\n self.assertEqual(MultiLingualTextStructure, type(result_from_string),\n \"to_python of a string should return the respective MultiLingualTextStructure object.\")\n self.assertEqual(\"test-nb\", result_from_string[\"nb\"])\n self.assertEqual(\"test-en\", result_from_string[\"en\"])", "def _get_fields(self,\n field_pbs: Sequence[descriptor_pb2.FieldDescriptorProto],\n address: metadata.Address, path: Tuple[int, ...],\n oneofs: Optional[Dict[str, wrappers.Oneof]] = None\n ) -> Dict[str, wrappers.Field]:\n # Iterate over the fields and collect them into a dictionary.\n #\n # The saving of the enum and message types rely on protocol buffers'\n # naming rules to trust that they will never collide.\n #\n # Note: If this field is a recursive reference to its own message,\n # then the message will not be in `api_messages` yet (because the\n # message wrapper is not yet created, because it needs this object\n # first) and this will be None. This case is addressed in the\n # `_load_message` method.\n answer: Dict[str, wrappers.Field] = collections.OrderedDict()\n for i, field_pb in enumerate(field_pbs):\n is_oneof = oneofs and field_pb.HasField('oneof_index')\n oneof_name = nth(\n (oneofs or {}).keys(),\n field_pb.oneof_index\n ) if is_oneof else None\n\n field = wrappers.Field(\n field_pb=field_pb,\n enum=self.api_enums.get(field_pb.type_name.lstrip('.')),\n message=self.api_messages.get(field_pb.type_name.lstrip('.')),\n meta=metadata.Metadata(\n address=address.child(field_pb.name, path + (i,)),\n documentation=self.docs.get(path + (i,), self.EMPTY),\n ),\n oneof=oneof_name,\n )\n answer[field.name] = field\n\n # Done; return the answer.\n return answer", "def test_fixed_type():\n name = \"a_fixed_field\"\n namespace = \"md5\"\n aliases = [\"md5\", \"hash\"]\n default = types.Fixed(16, namespace=namespace, aliases=aliases)\n python_type = types.Fixed\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"fixed\",\n \"name\": name,\n \"size\": default.size,\n \"namespace\": namespace,\n \"aliases\": aliases,\n },\n }\n\n assert expected == field.to_dict()", "def convertfields(key_comm, obj, inblock=None):\n # f_ stands for field_\n convinidd = ConvInIDD()\n if not inblock:\n inblock = [\"does not start with N\"] * len(obj)\n for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)):\n if i == 0:\n # inblock[0] is the iddobject key. No conversion here\n pass\n else:\n obj[i] = convertafield(f_comm, f_val, f_iddname)\n return obj", "def from_msg(cls, msg):\n if cls._debug:\n log.debug('msg=%s', msg)\n key, seq_s, uuid, prop_s, body = msg\n key = key if key else None\n seq = struct.unpack('!q', seq_s)[0]\n body = body if body else None\n if body:\n body = pipeline.load(body)\n #body = json.loads(body_s)\n #prop = json.loads(prop_s)\n prop = pipeline.load(prop_s)\n return cls(seq, uuid=uuid, key=key, properties=prop, body=body)", "def _get_field_type_converter(pipeline_builder):\n converter_config = [\n {\n 'fields': ['/id'],\n 'targetType': 'LONG',\n 'dataLocale': 'en,US'\n }\n ]\n field_type_converter = pipeline_builder.add_stage('Field Type Converter')\n field_type_converter.set_attributes(conversion_method='BY_FIELD',\n field_type_converter_configs=converter_config)\n return field_type_converter, pipeline_builder", "def _python_value_to_proto_value(feast_value_type, value) -> ProtoValue:\n\n # Detect list type and handle separately\n if \"list\" in feast_value_type.name.lower():\n\n if feast_value_type == ValueType.FLOAT_LIST:\n return ProtoValue(\n float_list_val=FloatList(\n val=[\n item\n if type(item) in [np.float32, np.float64]\n else _type_err(item, np.float32)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.DOUBLE_LIST:\n return ProtoValue(\n double_list_val=DoubleList(\n val=[\n item\n if type(item) in [np.float64, np.float32]\n else _type_err(item, np.float64)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.INT32_LIST:\n return ProtoValue(\n int32_list_val=Int32List(\n val=[\n item if type(item) is np.int32 else _type_err(item, np.int32)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.INT64_LIST:\n return ProtoValue(\n int64_list_val=Int64List(\n val=[\n item\n if type(item) in [np.int64, np.int32]\n else _type_err(item, np.int64)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.STRING_LIST:\n return ProtoValue(\n string_list_val=StringList(\n val=[\n item\n if type(item) in [np.str_, str]\n else _type_err(item, np.str_)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.BOOL_LIST:\n return ProtoValue(\n bool_list_val=BoolList(\n val=[\n item\n if type(item) in [np.bool_, bool]\n else _type_err(item, np.bool_)\n for item in value\n ]\n )\n )\n\n if feast_value_type == ValueType.BYTES_LIST:\n return ProtoValue(\n bytes_list_val=BytesList(\n val=[\n item\n if type(item) in [np.bytes_, bytes]\n else _type_err(item, np.bytes_)\n for item in value\n ]\n )\n )\n\n # Handle scalar types below\n else:\n if pd.isnull(value):\n return ProtoValue()\n elif feast_value_type == ValueType.INT32:\n return ProtoValue(int32_val=int(value))\n elif feast_value_type == ValueType.INT64:\n return ProtoValue(int64_val=int(value))\n elif feast_value_type == ValueType.FLOAT:\n return ProtoValue(float_val=float(value))\n elif feast_value_type == ValueType.DOUBLE:\n assert type(value) is float or np.float64\n return ProtoValue(double_val=value)\n elif feast_value_type == ValueType.STRING:\n return ProtoValue(string_val=str(value))\n elif feast_value_type == ValueType.BYTES:\n assert type(value) is bytes\n return ProtoValue(bytes_val=value)\n elif feast_value_type == ValueType.BOOL:\n assert type(value) is bool\n return ProtoValue(bool_val=value)\n\n raise Exception(f\"Unsupported data type: ${str(type(value))}\")", "def _to_cpp(self, msg):\n buf = BytesIO()\n msg.serialize(buf)\n value = buf.getvalue()\n return value", "def _get_typed_arg_value(self, given_value, param_def, strict):\n param_type = param_def[\"type\"]\n if isinstance(given_value, unicode):\n # Convert all unicode to str in UTF-8\n given_value = given_value.encode(\"utf8\") # Make all unicode into str\n\n if isinstance(given_value, IonObjectBase) and (given_value._get_type() == param_type or\n param_type in given_value._get_extends()):\n return given_value\n elif is_ion_object_dict(given_value) and (param_type == \"NoneType\" or hasattr(objects, param_type)):\n return self.create_ion_object(given_value)\n elif param_type in (\"str\", \"bool\", \"int\", \"float\", \"list\", \"dict\", \"NoneType\"):\n arg_val = get_typed_value(given_value, targettype=param_type, strict=strict)\n return arg_val\n else:\n raise BadRequest(\"Cannot convert param value to type %s\" % param_type)", "def ConvertMessage(self, value, message, path):\n self.recursion_depth += 1\n if self.recursion_depth > self.max_recursion_depth:\n raise ParseError('Message too deep. Max recursion depth is {0}'.format(\n self.max_recursion_depth))\n message_descriptor = message.DESCRIPTOR\n full_name = message_descriptor.full_name\n if not path:\n path = message_descriptor.name\n if _IsWrapperMessage(message_descriptor):\n self._ConvertWrapperMessage(value, message, path)\n elif full_name in _WKTJSONMETHODS:\n methodcaller(_WKTJSONMETHODS[full_name][1], value, message, path)(self)\n else:\n self._ConvertFieldValuePair(value, message, path)\n self.recursion_depth -= 1", "def parse_and_decode(cls, data: bytes) -> \"Message\":\n if len(data) < cls.calc_size() + 1:\n raise NotEnoughData()\n if data[0] != cls.type:\n raise InvalidType()\n\n return cls(*unpack('<' + cls.fmt, data[1:cls.calc_size() + 1]))", "def decode_message(self, buf, message_type=None):\n self.debugStack = 0\n value, typedef, _ = self._decode_message(\"\", buf, message_type)\n return value, typedef", "def test_decode_missing_field():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n message = dumps({\n \"mediaType\": DerivedSchema.MEDIA_TYPE,\n })\n assert_that(\n calling(codec.decode).with_args(message),\n raises(ValidationError, r\".*DerivedSchema\"),\n )", "def data_to_msg(self, data):\n fields_names = [self.primary_key] + [field.name for field in self.fields]\n data_dict = {}\n for idx, field in enumerate(fields_names):\n data_dict[field] = data[idx]\n return MsgWithTag.from_dict(data_dict)", "def _to_message_record(parsed):\n return MessageRecord(record_type=parsed.record_type,\n transaction_sequence_n=parsed.transaction_sequence_n,\n record_sequence_n=parsed.record_sequence_n,\n message_type=parsed.message_type,\n message_text=parsed.message_text,\n original_record_sequence_n=parsed.sequence_n,\n message_record_type=parsed.message_record_type,\n message_level=parsed.message_level,\n validation_n=parsed.validation)", "def fromString(line: Union[bytes, str]) -> SBSMessage:\n if isinstance(line, bytes):\n line = line.decode()\n\n values = line.rstrip(DELIMITER).split(\",\")\n\n if len(FieldNames) != len(values):\n raise Exception(\n \"Incorrect number of msg fields. \"\n f\"Expected {len(FieldNames)}, got {len(values)}. \"\n f\"values={values}, line={line}\"\n )\n\n attrs = {}\n for k, v in zip(FieldNames, values):\n v = v.strip() # remove any surrounding spaces\n if v:\n # perform type conversion if necessary\n if k in IntegerFields:\n v = int(v)\n elif k in FloatFields:\n v = float(v)\n elif k in BooleanFields:\n v = True if v == \"1\" else False\n elif k in DateFields:\n Y, M, D = [int(i) for i in v.split(\"/\")]\n v = datetime.date(Y, M, D)\n elif k in TimeFields:\n H, M, S = v.split(\":\")\n S, F = S.split(\".\")\n microsecond = int(int(F) * 1e3)\n v = datetime.time(\n hour=int(H), minute=int(M), second=int(S), microsecond=microsecond\n )\n # elif k in StringFields:\n # v = v.strip()\n # else:\n # # field is expected to be a string field\n # logger.warning(\n # 'Unexpected field name: {}'.format(k))\n else:\n v = None\n\n attrs[k] = v\n\n return SBSMessage(**attrs)", "def checkField(fromFieldType, toFieldType, delimiter):\n\n if fromFieldType == \"String\":\n if not toFieldType == \"String\":\n arcpy.AddError(\"Copy To Field must be of type text when Read From Field is of type text.\")\n else:\n if not toFieldType == \"String\":\n if delimiter != \"\":\n arcpy.AddError(\"Copy To Field must be of type text when Read From Field is of type numeric or date and you are using a delimiter.\")\n\n if delimiter == \"\":\n if fromFieldType == \"SmallInteger\":\n if not toFieldType in [\"Integer\", \"SmallInteger\", \"Float\", \"Double\"]:\n if toFieldType == \"Date\":\n arcpy.AddError(\"Copy To Field must be of type text.\")\n\n if fromFieldType == \"Integer\":\n if toFieldType in [\"SmallInteger\", \"Integer\", \"Float\", \"Double\", \"Date\"]:\n arcpy.AddError(\"Copy To Field must be of type text.\")\n\n else:\n if fromFieldType in [\"Float\", \"Double\" , \"Date\"]:\n if toFieldType in [\"Integer\", \"SmallInteger\", \"Float\", \"Double\" , \"Date\"]:\n arcpy.AddError(\"Copy To Field must be of type text.\")", "def _fields_to_dict(fields_in):\n dict_out = {}\n\n for key, val in fields_in.items():\n param = {}\n param['default'] = val.missing\n param['type'] = type(val.missing)\n if key == 'files' or key == 'urls':\n param['type'] = str\n\n val_help = val.metadata['description']\n if 'enum' in val.metadata.keys():\n val_help = \"{}. Choices: {}\".format(val_help, \n val.metadata['enum'])\n param['help'] = val_help\n\n try:\n val_req = val.required\n except:\n val_req = False\n param['required'] = val_req\n\n dict_out[key] = param\n return dict_out", "def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass", "def test_convert_protobuf_to_proto_plus(self):\n protobuf = ProtobufFixture()\n converted = util.convert_protobuf_to_proto_plus(protobuf)\n # Assert that the converted proto is an instance of the Message\n # wrapper class.\n self.assertIsInstance(converted, proto.Message)", "def from_any_pb(pb_type, any_pb):\n msg = pb_type()\n if not any_pb.Unpack(msg):\n raise TypeError(\n 'Could not convert {} to {}'.format(\n any_pb.__class__.__name__, pb_type.__name__))\n\n return msg", "def field_type(f, default=MISSING, *, unwrap=True) -> Union[tuple, Any]:\n return _field_type(f, TYPE, default, unwrap=unwrap)", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .mail_search_folder import MailSearchFolder\n from .message import Message\n from .message_rule import MessageRule\n from .multi_value_legacy_extended_property import MultiValueLegacyExtendedProperty\n from .single_value_legacy_extended_property import SingleValueLegacyExtendedProperty\n\n from .entity import Entity\n from .mail_search_folder import MailSearchFolder\n from .message import Message\n from .message_rule import MessageRule\n from .multi_value_legacy_extended_property import MultiValueLegacyExtendedProperty\n from .single_value_legacy_extended_property import SingleValueLegacyExtendedProperty\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"childFolderCount\": lambda n : setattr(self, 'child_folder_count', n.get_int_value()),\n \"childFolders\": lambda n : setattr(self, 'child_folders', n.get_collection_of_object_values(MailFolder)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"isHidden\": lambda n : setattr(self, 'is_hidden', n.get_bool_value()),\n \"messageRules\": lambda n : setattr(self, 'message_rules', n.get_collection_of_object_values(MessageRule)),\n \"messages\": lambda n : setattr(self, 'messages', n.get_collection_of_object_values(Message)),\n \"multiValueExtendedProperties\": lambda n : setattr(self, 'multi_value_extended_properties', n.get_collection_of_object_values(MultiValueLegacyExtendedProperty)),\n \"parentFolderId\": lambda n : setattr(self, 'parent_folder_id', n.get_str_value()),\n \"singleValueExtendedProperties\": lambda n : setattr(self, 'single_value_extended_properties', n.get_collection_of_object_values(SingleValueLegacyExtendedProperty)),\n \"totalItemCount\": lambda n : setattr(self, 'total_item_count', n.get_int_value()),\n \"unreadItemCount\": lambda n : setattr(self, 'unread_item_count', n.get_int_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields" ]
[ "0.5889569", "0.58535415", "0.5846825", "0.5840448", "0.58211553", "0.5739531", "0.5697468", "0.5670201", "0.5669158", "0.558879", "0.5576237", "0.5560981", "0.5541033", "0.5484416", "0.5465832", "0.54647964", "0.54605037", "0.5457013", "0.54551595", "0.54478323", "0.5439084", "0.54163504", "0.5412242", "0.5402496", "0.5397866", "0.5395045", "0.5393574", "0.5387875", "0.53606963", "0.535017", "0.53481925", "0.5342748", "0.53331727", "0.53284407", "0.5306772", "0.52983826", "0.5278419", "0.5277192", "0.5274084", "0.526558", "0.5261353", "0.5254191", "0.52447987", "0.5240428", "0.5238055", "0.5234739", "0.52281046", "0.5226112", "0.5211711", "0.52116644", "0.519751", "0.5193299", "0.5191959", "0.51870877", "0.5185765", "0.51856333", "0.5184296", "0.5181251", "0.51725787", "0.5164492", "0.5163703", "0.5160412", "0.51474315", "0.5145145", "0.5142984", "0.5139818", "0.5139703", "0.51379913", "0.51328176", "0.51304656", "0.51215935", "0.512031", "0.51093805", "0.50980955", "0.5097477", "0.5087476", "0.5085605", "0.50770676", "0.50748706", "0.5074567", "0.50734055", "0.5070529", "0.507018", "0.50679934", "0.506768", "0.50645167", "0.5063809", "0.50608045", "0.50464165", "0.5041312", "0.5035751", "0.5026782", "0.5023855", "0.5009148", "0.5008945", "0.5007217", "0.50061524", "0.49956217", "0.49944204", "0.4990779" ]
0.5692826
7
This method sets the generators from a protocol buffer. This does... not sure yet
def set_generators_from_proto(self, table): self.__logger.info("Setting Generator functions from Msg") # Let us know for field in table.info.schema.info.fields: # Iterate over the fields in the proto # Let us know what field is being accessed self.__logger.info("Gathering fakers %s", field.name) if field.info.aux.dependent != "": # If the aux.dependent field is empty continue # Goto the top of the loop if the above is the case self.__logger.info("Independent field %s", field) # Getting the field # Get the parameters out of the generator parms = self.get_field_parameters( field.info.aux.generator.parameters) # Grab our params # Load this data into the system, associate field with params and generator self.generator_fcns[field.name] = (field.info.aux.generator.name, parms) self.__logger.debug(parms) # Tell us self.__logger.debug(field.info.aux.generator.name) # Tell us self.__logger.debug(self.generator_fcns[field.name]) # Tell us
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_generator(self, gen):\n self.generator = gen", "def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def _config_generator(self, **kwargs):\n self.__logger.info(kwargs)\n generator = GeneratorFactory(self.generator_type, **kwargs)\n self._msg.input.generator.config.CopyFrom(generator.to_msg())", "def __init__(self, gen):\n self.gen = gen", "def __call__(self, gen) -> Generator:\n self.stream = gen\n self.prepare()\n return self.stream", "def _build_generators(self) -> None:\n try:\n for generator in self._datasource_config[\"batch_kwargs_generators\"].keys():\n self.get_batch_kwargs_generator(generator)\n except KeyError:\n pass", "def _generators(self):\n return self.free_group.generators", "def set_paths_gen(self, paths_gen): #w:\r\n self.paths_gen = paths_gen", "def __iter__(self):\n yield from self.gen", "def generator_setup():\n PaaSPureGenerator()", "def _define_generators(self):\n\t\treturn {\n\t\t \"transaction_id\" : Mgcp._generate_uint32,\n\t\t \"connection_id\" : Mgcp._generate_uint32,\n\t\t \"request_id\" : Mgcp._generate_uint32,\n\t\t \"timestamp\" : Mgcp._generate_timestamp\n\t\t}", "def __init__(\n self,\n models,\n tgt_dict,\n tgt_dict_mt,\n beam_size=1,\n beam_size_mt=1,\n max_len_a=0,\n max_len_b=200,\n max_len_a_mt=0,\n max_len_b_mt=200,\n max_len=0,\n min_len=1,\n normalize_scores=True,\n len_penalty=1.0,\n len_penalty_mt=1.0,\n unk_penalty=0.0,\n temperature=1.0,\n match_source_len=False,\n no_repeat_ngram_size=0,\n eos=None,\n eos_mt=None,\n symbols_to_strip_from_output=None,\n lm_model=None,\n lm_weight=1.0,\n ):\n super().__init__()\n\n from examples.speech_to_speech.unity.sequence_generator import SequenceGenerator\n\n self.generator = SequenceGenerator(\n models,\n tgt_dict,\n beam_size=beam_size,\n max_len_a=max_len_a,\n max_len_b=max_len_b,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict),\n eos=eos,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n lm_model=lm_model,\n lm_weight=lm_weight,\n )\n self.eos = self.generator.eos\n\n self.generator_mt = SequenceGenerator(\n models,\n tgt_dict_mt,\n beam_size=beam_size_mt,\n max_len_a=max_len_a_mt,\n max_len_b=max_len_b_mt,\n max_len=max_len,\n min_len=min_len,\n normalize_scores=normalize_scores,\n len_penalty=len_penalty_mt,\n unk_penalty=unk_penalty,\n temperature=temperature,\n match_source_len=match_source_len,\n no_repeat_ngram_size=no_repeat_ngram_size,\n search_strategy=search.BeamSearch(tgt_dict_mt),\n eos=eos_mt,\n symbols_to_strip_from_output=symbols_to_strip_from_output,\n )", "def testGeneratorType(self):", "def _populate_next(self, graph, *args, yield_response_gen=False, **kwargs):\n\n if yield_response_gen:\n kwargs['yield_response_gen'] = yield_response_gen\n format, *header_chunks, (resp, gen) = self.data_next(**kwargs)\n self._populate(graph, header_chunks)\n yield format\n yield from header_chunks\n yield resp, gen\n else:\n generator = self.data_next(**kwargs)\n format = next(generator)\n self._populate(graph, generator)", "def inputs(self, bytes_gen: InputType) -> None:\n if hasattr(bytes_gen, '__call__'):\n self._inputs = bytes_gen()\n else:\n self._inputs = bytes_gen", "def _initialize_generator(self,gen,obj=None):\n # CEBALERT: use a dictionary to hold these things.\n if hasattr(obj,\"_Dynamic_time_fn\"):\n gen._Dynamic_time_fn = obj._Dynamic_time_fn\n\n gen._Dynamic_last = None\n # CEB: I'd use None for this, except can't compare a fixedpoint\n # number with None (e.g. 1>None but FixedPoint(1)>None can't be done)\n gen._Dynamic_time = -1\n\n gen._saved_Dynamic_last = []\n gen._saved_Dynamic_time = []", "def semigroup_generators(self):", "def generators(self):\n return self._generators", "def initialize(self, protocol, subset='train'):\n\n self.batches_ = []\n\n for current_file in getattr(protocol, subset)():\n\n # create a dummy protocol that contains only one file\n dummy = get_dummy_protocol(current_file)\n\n # initialize batch generator for current file\n generator = SpeechSegmentGenerator(self.feature_extraction,\n per_label=self.per_label, per_fold=self.per_fold,\n duration=self.duration, min_duration=self.min_duration,\n max_duration=self.max_duration,\n label_min_duration=self.label_min_duration, parallel=1)\n\n # keep track of it\n self.batches_.append(generator(dummy, subset='train'))", "def registerPlotGenerator(self, generator):\n self.gen = generator", "def generator(self, random, args):\n\t\traise NotImplementedError", "def __init__(self, model_name, logger=None, gpu_ids=None):\n super().__init__(model_name, 'generator', logger, gpu_ids)", "def prepare_gen(self, targets):\r\n pass", "def fit_generator(self, generator, *args, **kwargs):\n self.model.fit_generator(\n generator,\n *args, **kwargs\n )", "def testgen(self):\n self.parse()\n self.generate()", "def _init_data_generators(self, ephase, list_iterstore, dict_iterstore):\n # Fetch input data generators\n Xin_gen, Xin_val_gen = \\\n self.callbacks['get_input_data_generators'](ephase, list_iterstore, dict_iterstore)\n\n # Fetch target data generators\n Xt_gen, Xt_val_gen = \\\n self.callbacks['get_target_data_generators'](ephase, list_iterstore, dict_iterstore)\n\n if Xin_gen is None:\n raise Exception('Required data generators are unavailable')\n\n if (ephase == NNModelPhase.PREDICT or ephase == NNModelPhase.TEST):\n if Xin_val_gen is not None or Xt_val_gen is not None:\n raise Exception('In NNModelPhase.PREDICT|TEST, `X2_gen` is unused. But it is not None.')\n\n # Sync the data generators (input, target)\n Xin_gen.sync_tgt_generator(Xt_gen)\n if (Xin_val_gen is not None):\n Xin_val_gen.sync_tgt_generator(Xt_val_gen)\n\n return Xin_gen, Xin_val_gen", "def generator(self, random, args):\r\n raise NotImplementedError", "def gen(self):\n raise NotImplementedError(\"(%s).gen\" % self)", "def update_generator_parameters(self):\n gens = self.get_attribute('generator_parameters')\n if not len (gens):\n genInfo = generator_parameters()\n else:\n genInfo = generator_parameters(gens[-1])\n genInfo.set_attribute('submission_details', self._json_base__get_submission_details())\n genInfo.set_attribute('version', genInfo.get_attribute('version')+1)\n\n gens.append(genInfo.json())\n self.set_attribute('generator_parameters', gens)", "def create_buffers(self):", "def _dataset_split_generators(self):\n raise NotImplementedError()", "def get_load_generator(self):\n raise NotImplementedError", "def _initialize_buffers(self) -> None:", "def generate(self):\n pass", "def __init__(self, generator: FormulaGenerator = NumberGenerator(),\n power_generator: FormulaGenerator = TokenGenerator(\"2\")):\n super().__init__([generator])\n self.power_generator = power_generator", "def get_generator(self, file_generator, batch_size=None, **kwargs):\n raise NotImplementedError('')", "def get_generator_class(self) -> Any:", "def generate(self, generate):\n\n self._generate = generate", "def _build_generators(self, mode='eval'):\n # Stacking encoded inputs\n with tf.variable_scope(f'{get_current_scope()}encodings/stacked/'):\n x_encoded_stack = tf.stack(self._x_encoded, axis=3)\n self._x_encoded_stack = tf.reshape(\n x_encoded_stack,\n tf.concat([tf.shape(x_encoded_stack)[:2], [self._num_dims_generator * self.num_tracks]], 0)\n )\n\n # Building the Feedback module\n with tf.variable_scope(f'{get_current_scope()}feedback/'):\n self._feedback_layer.build(is_train=self._is_train)\n\n self._x_feedback, self._feedback_final_state = self._apply_feedback(\n self._x_encoded_stack,\n single_step=False\n )\n\n # Building the per-track Generators\n for i in range(self.num_tracks):\n with tf.variable_scope(f'generator_inputs/{self.tracks[i]}'):\n inputs = tf.concat([self._x_encoded[i], self._x_feedback], axis=-1)\n generator_inputs = inputs[:, 0:-1, :]\n\n with tf.variable_scope(f'generator_targets/{self.tracks[i]}'):\n generator_targets = self._x_encoded[i][:, 1:, :]\n\n self.generators[i].build(x=generator_inputs, y=generator_targets,\n lengths=self._lengths, is_train=self._is_train, mode=mode)\n\n # Saving trainable Feedback module variable\n self._trainable_feedback_variables = self._feedback_layer.trainable_variables", "def __build_generators(self, x, y, split=0.9):\n\n # Sanity check\n assert len(x) == len(y)\n\n # Split dataset into train and validation sets\n cut = int(split * len(x))\n x_train = x[:cut]\n x_valid = x[cut:]\n y_train = y[:cut]\n y_valid = y[cut:]\n\n if self.input_type == \"mols\":\n self.__train_gen = HetSmilesGenerator(\n x_train,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = HetSmilesGenerator(\n x_valid,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n else:\n self.__train_gen = DescriptorGenerator(\n x_train,\n y_train,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = DescriptorGenerator(\n x_valid,\n y_valid,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n # Calculate number of batches per training/validation epoch\n train_samples = len(x_train)\n valid_samples = len(x_valid)\n self.__steps_per_epoch = train_samples // self.batch_size\n self.__validation_steps = valid_samples // self.batch_size\n\n print(\n \"Model received %d train samples and %d validation samples.\"\n % (train_samples, valid_samples)\n )", "def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def fromgenotype(self):\n\t\tpass", "def __prepair_patterns(self):\n\n\t\tgen = self.randlib.Generate\n\t\tbuff_type = c_ubyte * (self.max_buff_size * 512)\n\t\tself.pat_array = []\n\t\t\n\t\tfor i in range(self.buffer_cnt):\n\t\t\ttemp = buff_type()\n\t\t\tgen(temp, self.max_buff_size * 512, self.seed+i) \n\t\t\tself.pat_array.append(temp)", "def __set__(self,obj,val):\n super(Dynamic,self).__set__(obj,val)\n\n dynamic = callable(val) \n if dynamic: self._initialize_generator(val,obj)\n if not obj: self._set_instantiate(dynamic)", "def generator(self):\n return [None, 1]", "def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError", "def setgenerate(self, generate, genproclimit=None):\n if genproclimit is None:\n return self.proxy.setgenerate(generate)\n else:\n return self.proxy.setgenerate(generate, genproclimit)", "def generator(T):\n \n class generator_interposer(T):\n \n def __init__(self, *args, **kwargs):\n gen_i = self._get_int()\n \n # Capture the instantiation location\n frame = inspect.stack()[1]\n gen_i.srcinfo_inst = SourceInfo(frame.filename, frame.lineno)\n\n # Call the user's constructor \n with gen_i:\n super().__init__(*args, **kwargs)\n\n self._int_field_info = field_info() \n if gen_i.ctor_level == 0:\n self.build_model()\n \n pass\n\n # Add the interposer class \n ret = type(T.__name__, (generator_interposer,), dict())\n\n if not hasattr(T, \"_gen_init\"):\n def __getattribute__(self, a):\n ret = object.__getattribute__(self, a)\n \n if isinstance(ret, type_base) and not is_raw_mode():\n # We're not in an expression, so the user\n # wants the value of this field\n ret = ret.get_val()\n \n return ret\n \n def __setattr__(self, field, val):\n try:\n # Retrieve the field object so we can check if it's \n # a type_base object. This will throw an exception\n # if the field doesn't exist\n fo = object.__getattribute__(self, field)\n except:\n object.__setattr__(self, field, val)\n else:\n if isinstance(fo, type_base):\n if not is_raw_mode():\n # We're not in an expression context, so the \n # user really wants us to set the actual value\n # of the field\n fo.set_val(val)\n else:\n raise Exception(\"Attempting to use '=' in a constraint\")\n else:\n object.__setattr__(self, field, val) \n \n def randomize(self):\n model = self.get_model()\n Randomizer.do_randomize([model])\n \n def build_field_model(self, name):\n if self._int_field_info.model is None:\n model = FieldCompositeModel(name, self._int_field_info.is_rand, self)\n self._int_field_info.model = model\n \n # Iterate through the fields and constraints\n # First, assign IDs to each of the randomized fields\n with expr_mode():\n for f in dir(self):\n if not f.startswith(\"__\") and not f.startswith(\"_int\"):\n fo = getattr(self, f)\n \n if hasattr(fo, \"_int_field_info\"):\n if fo._int_field_info.model is None:\n fo._int_field_info.model = fo.build_field_model(f)\n\n model.add_field(fo._int_field_info.model)\n \n # Now, elaborate the constraints\n for f in dir(self):\n if not f.startswith(\"__\") and not f.startswith(\"_int\"):\n fo = getattr(self, f)\n if isinstance(fo, constraint_t):\n clear_exprs()\n push_constraint_scope(ConstraintBlockModel(f))\n try:\n fo.c(self)\n except Exception as e:\n print(\"Exception while processing constraint: \" + str(e))\n raise e\n fo.set_model(pop_constraint_scope())\n model.add_constraint(fo.model)\n clear_exprs()\n \n self._int_field_info.model.name = name\n return self._int_field_info.model\n \n def get_model(self):\n with expr_mode():\n if self._int_field_info.model is None:\n self._int_field_info.model = self.build_field_model(None)\n \n return self._int_field_info.model\n \n def _get_int(self):\n if not hasattr(self, \"_gen_int\"):\n self._gen_int = GeneratorInt()\n return self._gen_int\n \n setattr(T, \"__getattribute__\", __getattribute__)\n setattr(T, \"__setattr__\", __setattr__)\n setattr(T, \"randomize\", randomize)\n# setattr(T, \"randomize_with\", randomize_with)\n setattr(T, \"build_field_model\", build_field_model)\n setattr(T, \"get_model\", get_model)\n# setattr(T, \"__enter__\", __enter__)\n# setattr(T, \"__exit__\", __exit__)\n# setattr(T, \"do_pre_randomize\", do_pre_randomize)\n# setattr(T, \"do_post_randomize\", do_post_randomize)\n setattr(T, \"_int_field_info\", field_info(True))\n setattr(T, \"_get_int\", _get_int)\n setattr(T, \"_ro_init\", True)\n \n \n \n \n return ret", "def generate(self):", "def register_generator(self, generator):\n product_type = generator().product_type()\n self.generator_map[product_type] = generator", "def generator(self):\n return self._generator", "def load_generator(\n ckpt, is_stylegan1, G_res, out_size, noconst, latent_dim, n_mlp, channel_multiplier, dataparallel, base_res_factor\n):\n if is_stylegan1:\n generator = G_style(output_size=out_size, checkpoint=ckpt).cuda()\n else:\n generator = Generator(\n G_res,\n latent_dim,\n n_mlp,\n channel_multiplier=channel_multiplier,\n constant_input=not noconst,\n checkpoint=ckpt,\n output_size=out_size,\n base_res_factor=base_res_factor,\n ).cuda()\n if dataparallel:\n generator = th.nn.DataParallel(generator)\n return generator", "def generate(self):\n raise NotImplementedError", "def feed_dict_generator(self):\n pass", "def feed_dict_generator(self):\n pass", "def _setup_random_gen(\n self,\n probabilities: List[float],\n random_nums: List[int]\n ) -> None:\n RandomGen._probabilities = probabilities\n RandomGen._random_nums = random_nums\n self._random_gen = RandomGen()", "def _prepare_message(self, model, data, data_val, kwargs, generator=False):\n self._check_compile(model, kwargs)\n kwargs = self._check_serialize(kwargs)\n gen_setup = []\n\n if generator:\n nb_data_chunks = [get_nb_chunks(d) for d in data]\n nb_data_val_chunks = [get_nb_chunks(dv) for dv in data_val]\n for d_c, dv_c in szip(nb_data_chunks, nb_data_val_chunks):\n is_val_one = dv_c == 1\n is_train_one = d_c == 1\n\n if dv_c is not None:\n # many to one\n if d_c > dv_c and is_val_one:\n gen_setup.append(1)\n\n # one to many\n elif d_c < dv_c and is_train_one:\n gen_setup.append(2)\n\n # equal\n elif d_c == dv_c:\n gen_setup.append(3)\n\n else: # pragma: no cover\n Exception('Nb batches in train generator and'\n 'validation generator not compatible')\n\n data_hash = cm.create_gen_hash(data)\n data, data_val = pickle_gen(data, data_val)\n else:\n data_hash = cm.create_data_hash(data)\n\n return data, data_val, data_hash, gen_setup", "def example_generator(self, mode: str):\n raise NotImplementedError", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def __iter__(self):\n yield from self.url.generator", "def parse(self):\n gen = self.v6_gen() # read from workers\n gen = self.tuple_gen(gen) # convert v6->tuple\n gen = self.batch_gen(gen) # assemble into batches\n for b in gen:\n yield b", "def _generate(self, **kwargs):\n # Converted to numpy array by _validate. Simply assign to correct attribute\n self._samples = self.parameter_schema['parameter_samples']\n super()._generate()", "def generate(self):\r\n raise NotImplementedError", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def build(self):\n # add ops for generator (A->B) to graph\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,\n init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='G')\n\n if self.training:\n # add ops for other generator (B->A) and discriminators to graph\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')\n\n # generate fake images\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n\n # generate reconstructed images\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n\n # generate identity mapping images\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))\n\n # add loss ops to graph\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,\n reconstructedB, identA, identB)\n\n # add optimizer ops to graph\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else: # only need generator from A->B during testing\n fakeB = self.G(self.realA)\n return fakeB", "def generators(self) -> List[Generator]:\n return self._generators", "def testExplicitGeneratorUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setReturn(11)\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])", "def testGenerator(self,):\n return tf.data.Dataset.from_generator(self.testData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def __init__( self, generator):\n DictObject.__init__( self, generator.generate_dict())", "def makegenerators(self, adata, val_split, batch_size, splitseed):\n \n return countloader(adata.obsm[self.embed_name], adata.X[:, adata.var['Variance Type'] == self.mode], adata.obs['size factors'], \n val_split, batch_size, splitseed)", "def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras", "def __init__(self, proto):\n self.proto = proto", "def getGenerators(self) -> list:\n return self.state[GENERATORS]", "def init_batch(self):\n pass", "def __init__(self, generators: List[Generator] = None, evaluators: List[Evaluator] = None): # noqa: E501\n self.swagger_types = {\n 'generators': List[Generator],\n 'evaluators': List[Evaluator]\n }\n\n self.attribute_map = {\n 'generators': 'generators',\n 'evaluators': 'evaluators'\n }\n self._generators = generators\n self._evaluators = evaluators", "def disable_buffering(self) -> None:\n self._next = partial(next, self._gen)\n self.buffered = False", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def test_generator(self):\n args = Args()\n args.files = ['tests/xproto/test.xproto']\n args.target = 'tests/xtarget/test.xtarget'\n args.output = 'tests/out/dir/'\n args.write_to_file = \"target\"\n args.dest_file = None\n args.dest_extension = None\n\n expected_args = Args()\n expected_args.files = [os.path.abspath(os.getcwd() + '/' + args.files[0])]\n expected_args.target = os.path.abspath(os.getcwd() + '/' + args.target)\n expected_args.output = os.path.abspath(os.getcwd() + '/' + args.output)\n\n with patch(\"xosgenx.xosgen.XOSGenerator.generate\") as generator:\n XosGen.init(args)\n actual_args = generator.call_args[0][0]\n self.assertEqual(actual_args.files, expected_args.files)\n self.assertEqual(actual_args.target, expected_args.target)\n self.assertEqual(actual_args.output, expected_args.output)", "def generator(self):\n\n # generates speech turns long enough to contain at least one segment\n speech_turns = super(SpeechTurnSubSegmentGenerator, self).generator()\n\n # number of speech turns per \"speech turn batch\"\n if self.per_fold is not None:\n n_speech_turns = self.per_label * self.per_fold\n else:\n n_speech_turns = self.per_label * len(self.data_)\n\n endOfBatch = EndOfBatch()\n while True:\n\n # for each speech turn in batch\n for z in range(n_speech_turns):\n speech_turn = next(speech_turns)\n\n # for each segment in speech turn\n for X in self.iter_segments_(speech_turn['X']):\n\n # all but 'X' fields are left unchanged\n segment = dict(speech_turn)\n segment['X'] = X\n\n # remember that this segment belongs to this speech turn\n segment['z'] = z\n\n yield segment\n\n # let `batchify` know that the \"segment batch\" is complete\n yield endOfBatch", "def test_generators_get(self):\n response = self.client().get('/generators')\n self.assertEqual(response.status_code, 200)\n loaded_generators = GeneratorRegister.from_json(response.data.decode())\n\n self.assertEqual(loaded_generators, self.handler.generators)\n\n self.handler.generators = None\n response = self.client().get('/generators')\n self.assertEqual(response.status_code, 404)", "def __init__(self, buffer_size, batch_size, num_agents, seed):\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.batch_size = batch_size\n self.num_agents = num_agents\n self.seed = random.seed(seed)", "def _generate(self, **kwargs):\n self._samples = numpy.array(list(itertools.product(*self.parameter_schema.values())), dtype=object)\n super()._generate()", "def generators(self, generators: List[Generator]):\n if generators is None:\n raise ValueError(\"Invalid value for `generators`, must not be `None`\") # noqa: E501\n\n self._generators = generators", "def _split_generators(\n self,\n dl_manager: tfds.download.DownloadManager\n ) -> Iterable[tfds.core.SplitGenerator]:\n split_generators = []\n if 'train' in self.builder_config.supported_modes:\n split_generators.append(\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n 'mode': 'train',\n },\n ),\n )\n if 'validation' in self.builder_config.supported_modes:\n split_generators.append(\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n 'mode': 'validation',\n },\n ),\n )\n if 'test' in self.builder_config.supported_modes:\n split_generators.append(\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs={\n 'mode': 'test',\n },\n ),\n )\n return split_generators", "def setup(self):\n for gen in self._feature_stats_generators:\n gen.setup()", "def __iter__(self):\n return self.new_generator()", "def pkt_gen(self):\n for i in range(self.num_pkts):\n # create the test packets\n pkt = Ether()/IP()/TCP()/'hello there pretty world!!!'\n rank = random.sample(range(0, 100), 1)[0]\n pkt_id = i\n tuser = Tuser(len(pkt), 0b00000001, 0b00000100, rank, pkt_id)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n # write the pkt and metadata into storage\n self.pkt_in_pipe.put((pkt, tuser))\n\n # wait for 10 cycles\n #for j in range(PREAMBLE + len(pkt) + IFG):\n yield self.wait_line_clks(self.PREAMBLE + len(pkt) + self.IFG)", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def test_create_generator(self) -> None:\n res = generate.create_generator(self._config)\n self.assertIsInstance(res, generate.GenerateDataImpl)", "def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def generated(self, generated):\n\n self._generated = generated", "def __getGenerator(self):\n\n return \"{\\\\*\\\\generator %s;}\\n\" % fix(self.generator)" ]
[ "0.6843186", "0.6817292", "0.623496", "0.61516976", "0.5911144", "0.5882668", "0.5799233", "0.56797016", "0.5637793", "0.561574", "0.560835", "0.5581869", "0.55451405", "0.5529971", "0.552042", "0.5490284", "0.54892546", "0.5484604", "0.54761255", "0.54730356", "0.5460637", "0.54278845", "0.54165554", "0.538063", "0.5357414", "0.53445476", "0.53444463", "0.5327144", "0.53267354", "0.53182125", "0.53124446", "0.53106064", "0.5289871", "0.5287127", "0.52797186", "0.5267638", "0.52517605", "0.5248484", "0.5239909", "0.5230168", "0.5219424", "0.5199184", "0.51840806", "0.51840806", "0.51840806", "0.5177885", "0.51439404", "0.51306826", "0.51305926", "0.50873923", "0.50819486", "0.5079681", "0.5077888", "0.50720996", "0.50693876", "0.5064068", "0.5054888", "0.5051817", "0.5051817", "0.5049304", "0.50437367", "0.5040232", "0.5038924", "0.5033541", "0.5013806", "0.50137067", "0.5009079", "0.50076187", "0.4995572", "0.49922937", "0.49867305", "0.49829653", "0.49785343", "0.4975632", "0.4974596", "0.49732113", "0.49664792", "0.49663538", "0.49633703", "0.49393174", "0.49269268", "0.49149594", "0.49149594", "0.49149594", "0.49057907", "0.49033165", "0.48931977", "0.48919126", "0.48884353", "0.48633626", "0.48450878", "0.4841855", "0.48390964", "0.48357832", "0.48333257", "0.48321876", "0.48318997", "0.4825366", "0.4824736", "0.48243952" ]
0.6150093
4
Create a map of duplicates and probabilities according to a pdf, i.e. uniform and store for reuse on each original event current version taken directly from FEBRL needs review b/c number of duplicates stored starts at 2?
def generate_duplicate_pdf(self): num_dup = 1 prob_sum = 0.0 prob_list = [(num_dup, prob_sum)] max_dups = self.duplicate_cfg["Max_duplicate"] uniform_val = 1.0 / float(max_dups) for i in range(max_dups - 1): num_dup += 1 prob_list.append((num_dup, uniform_val + prob_list[-1][1])) return prob_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a", "def filna_dict(mes):\n key = [f'pdf_{count + 1}' for count in range(mes)]\n value = ['stans.pdf' for count in range(mes)]\n filna_tobe_inserted = dict(zip(key, value))\n return filna_tobe_inserted", "def filna_dict(mes):\n key = [f'pdf_{count+1}'for count in range(mes)]\n value = ['stans.pdf'for count in range(mes)]\n filna_tobe_inserted = dict(zip(key,value))\n return filna_tobe_inserted", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def prob_dist(line1, line2, model):\n vocab = set(counts_un.keys())\n probs = dict()\n for line3 in vocab:\n probs[line3] = model.get_trigram_prob(line1, line2, line3)\n return probs", "def create_probability_object(self):\n self.update_frequencies(self)\n prob_dict = {}\n for symbol in self.all_symbols.keys():\n prob_dict[symbol] = self.all_symbols[symbol] / self.total_symbols\n return prob_dict\n # self.prob_dict = prob_dict", "def get_probability(letters, n):\n return {l: c/n for l, c in letters.items()}", "def _compute_register_probs(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.pmf(bits, probability)\n\n return probs / sum(probs)", "def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def generate_pdf_training_data(cls):\n sz = cls.test_set_size\n _x = np.zeros((sz, cls.state_size))\n _y = np.zeros((sz, cls.action_size))\n u = dict()\n u[str(_x[0])] = True\n for _i in range(0, sz):\n _pdf = np.random.randint(100, size=cls.action_size)\n _pdf = _pdf / np.sum(_pdf)\n _x[_i] = np.random.randint(3, size=cls.action_size)\n while str(_x[_i]) in u:\n _x[_i] = np.random.randint(3, size=cls.action_size)\n u[str(_x[_i])] = True\n _y[_i] = _pdf\n return _x, _y", "def generate_transition_bigram_probabilities(transition_unigram_counts, transition_bigram_counts):\r\n\ttransition_bigram_probabilities = dict()\r\n\tfor tag_bigram in transition_bigram_counts:\r\n\t\ttransition_bigram_probabilities[tag_bigram] = float(transition_bigram_counts[tag_bigram])/transition_unigram_counts[tag_bigram[0]]\r\n\treturn transition_bigram_probabilities", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def get_ngramlogprobs(freqdict):\n return", "def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def unigram_model(list_of_words, unigram_count, N=count_token()):\n d = pd.read_csv(unigram_count)\n proba_dict = {list_of_words[i]: (d[el].values[0] / float(N)) if el in d.columns.values else 0.0 for i, el in enumerate(list_of_words) }\n return proba_dict", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def probability(self):\r\n \r\n my_dict = dict()\r\n \r\n for i in self.__dtmc:\r\n \r\n sum_Pij = float(sum([self.__dtmc[i][j] for j in self.__dtmc[i]]))\r\n \r\n if sum_Pij == 0:\r\n \r\n my_dict[i] = dict()\r\n \r\n elif sum_Pij > 0:\r\n \r\n if i not in my_dict:\r\n \r\n my_dict[i] = dict()\r\n \r\n for j in self.__dtmc[i]:\r\n \r\n Pij = self.__dtmc[i][j] / sum_Pij\r\n \r\n my_dict[i][j] = Pij\r\n \r\n return my_dict", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def _calculate_measurement_probs(measurements):\n total_mes = len(measurements)\n unique_mes = [list(x) for x in {tuple(x) for x in measurements}]\n total_unique_mes = len(unique_mes)\n len_qubits = len(unique_mes[0])\n measurements_probabilities = {}\n for i in range(total_unique_mes):\n strqubits = ''\n for qubit_idx in range(len_qubits):\n strqubits += str(unique_mes[i][qubit_idx])\n prob = measurements.count(unique_mes[i]) / total_mes\n measurements_probabilities[strqubits] = prob\n\n return measurements_probabilities", "def probabilityGet(NS,NH,SList,HList):\n global PS\n global PH\n PS = NS/(NS+NH) #probability of Spam\n PH = NH/(NS+NH) #probability of Ham\n AllPSpam = {} \n AllPHam = {}\n\n lambd = input(\"Choose a value for your lambda: \\n(a) 0.05 \\n(b) 0.5 \\n(c) 1 \\n(d) 2 \\nEnter letter of your choice: \") #Changeable lambda\n if lambd == 'a':\n lam= 0.05\n elif lambd == 'b':\n lam = 0.5\n elif lambd == 'd':\n lam = 2\n else:\n lam = 1\n\n for every_word,count in SList.items(): #computes probability of words in spam \n print(every_word, count)\n L_Spam = (count+lam)/(NS+(5000*lam))\n AllPSpam[every_word] = L_Spam #contains all the probability of everyword in Spam\n for every_word,count in HList.items(): #computes probability of words in ham\n L_Ham = (count+lam)/(NH+(5000*lam))\n AllPHam[every_word] = L_Ham #contains all the probability of everyword in Ham\n print(\"Testing of emails now begins!\")\n testingPhase(AllPSpam, AllPHam)", "def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value", "def frecuencia_abs(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def sdd(events,probs):\n \n import random\n nprobs=[x*1000 for x in probs] #so, here i multiply each float in 'probs' by 1000 and store the products in 'nprobs'\n newlist=[]\n for a in range(len(events)) : #then, in this loop, i create a list (newlist), in which each event appears 1000*its probability times\n b=nprobs[a]\n b=int(b)\n for c in range(b) :\n newlist.append(events[a]) \n return (random.choice(newlist)) #and finally, i ramdonly sample ", "def prime_error_rate_dic(aa_order):\n aa_error_rate_dic = {}\n for i in aa_order:\n #first element of definitions are the from mutation rate\n #and the second element is the to mutation rate\n aa_error_rate_dic[i] = [0.0, 0.0]\n return aa_error_rate_dic", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def counts_to_probs(some_dict, num):\n new_d = dict()\n for key in some_dict:\n value = some_dict[key]\n new_d[key] = value/num\n return new_d", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def question_1(patient):\n result = {}\n for disease, symptoms in patient.symptoms.iteritems():\n prob = calculate_probability(disease, symptoms)\n result[disease.name] = \"%.4f\" % prob\n return result", "def calcul_ngram_pmi(ngram_freq,ngram_keys,n):\r\n if isinstance(n,collections.abc.Iterable):\r\n mi = {}\r\n for ni in n:\r\n mi = {**mi,**calcul_ngram_pmi(ngram_freq,ngram_keys,ni)}\r\n return mi\r\n\r\n if n!=1:\r\n target_ngrams = ngram_keys[n]\r\n else:\r\n target_ngrams = [l for l in ngram_keys[n] if ToolWord().is_english_word(l[0])] \r\n\r\n n1_totalcount = sum([ngram_freq[k] for k in ngram_keys[1] if k in ngram_freq])\r\n target_n_total_count = sum([ngram_freq[k] for k in ngram_keys[n] if k in ngram_freq])\r\n mi = {}\r\n for target_ngram in target_ngrams:\r\n target_ngrams_freq = ngram_freq[target_ngram]\r\n joint_proba = target_ngrams_freq/target_n_total_count\r\n indep_proba = reduce(mul,[ngram_freq[(char,)] for char in target_ngram])/((n1_totalcount)**n)\r\n pmi = math.log(joint_proba/indep_proba,hp.e) \r\n ami = pmi/len(target_ngram) \r\n mi[target_ngram] = (pmi,ami)\r\n return mi", "def frequentOneItem(self):\n\n candidate = {}\n # global finalPatterns, minSup, Database\n # self.minSup = self.minSup\n for i in range(len(self.Database)):\n for j in range(len(self.Database[i])):\n if self.Database[i][j] not in candidate:\n candidate[self.Database[i][j]] = [i]\n else:\n candidate[self.Database[i][j]] += [i]\n self.finalPatterns = {keys: value for keys, value in candidate.items() if len(value) >= self.minSup}\n #print(candidate)", "def get_probability(fields, dic):\r\n sum_ = sum(dic.values())\r\n p = 0.0\r\n for f in fields:\r\n value = dic.get(f, 0.0) + 0.0001\r\n p = p + math.log(float(value)/float(sum_))\r\n return p", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def prior(training_data, label_list):\n\n smooth = 1 # smoothing factor\n logprob = {}\n # TODO: add your code here\n numfile1 = 0\n numfile2 = 0\n for dic in training_data:\n if(dic[\"label\"] == label_list[0]):\n numfile1 += 1\n elif(dic[\"label\"] == label_list[1]):\n numfile2 += 1\n numtotal = numfile1 + numfile2\n\n prob1 = (numfile1+smooth)/(numtotal+2)\n prob2 = (numfile2 + smooth) / (numtotal + 2)\n\n logprob[label_list[0]] = math.log(prob1)\n logprob[label_list[1]] = math.log(prob2)\n\n\n return logprob", "def probabilities(doc, doc_length, prob_dict):\n\tfor elem in doc:\n\t\tdoc[elem] = doc[elem]/doc_length\n\tfor key in doc.keys():\n\t\tif key in stop_words:\n\t\t\tdoc.pop(key)\n\tfor key in doc.keys():\n\t\ttry:\n\t\t\tdoc[key] = prob_dict[key]\n\t\texcept KeyError:\n\t\t\tdoc[key] = 0.0\n\t\t\t#doc[key] = doc[key]/doc_length\n\treturn doc", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def question_2(patient):\n result = {}\n for disease, symptoms in patient.symptoms.iteritems():\n symptoms_list = generate_all_symptoms(symptoms)\n if not symptoms_list: # there are no unknowns\n symptoms_list = [symptoms]\n max_prob = 0.0\n min_prob = 1.0\n for sym_list in symptoms_list:\n prob = calculate_probability(disease, sym_list)\n if prob > max_prob:\n max_prob = prob\n if prob < min_prob:\n min_prob = prob\n min_str = \"%.4f\" % min_prob\n max_str = \"%.4f\" % max_prob\n result[disease.name] = [min_str, max_str]\n patient.set_max_prob(disease, max_prob)\n patient.set_min_prob(disease, min_prob)\n return result", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def generate_transition_trigram_probabilities(transition_bigram_counts, transition_trigram_counts):\r\n\ttransition_trigram_probabilities = dict()\r\n\tfor tag_trigram in transition_trigram_counts:\r\n\t\ttransition_trigram_probabilities[tag_trigram] = float(transition_trigram_counts[tag_trigram])/transition_bigram_counts[(tag_trigram[0], tag_trigram[1])]\r\n\treturn transition_trigram_probabilities", "def get_population_prob_dist(population, prev_word_array, review_batch):\n prob_dist = {}\n\n for individual in population:\n prob_dist[individual] = _prob_of_next_word(individual, prev_word_array, review_batch)\n\n return prob_dist", "def calcTFdict(doc):\n\n TFDict = {}\n\n #counts number of appearances of term in document\n for term in doc:\n if term in TFDict.keys():\n TFDict[term] +=1\n else:\n TFDict[term] = 1\n\n #Computing tf for each term\n for key in TFDict:\n TFDict[key] = TFDict[key]/len(doc)\n\n return TFDict", "def generate_perfomances(x):\n\n\ty=OrderedDict()\n\n\t# Cycles through both the values and the keys in the dictionary, creating a new dictionary which has within it\n\t# The names coupled with their normal distribution values\n\n\tfor xValues,names in zip(x.values(),x): y[names]=random.normalvariate(xValues[0],xValues[1])\n\treturn y", "def build_probdist(prob_filename):\n fits_file = fits.open(prob_filename)\n \n z_hdu = fits_file[1]\n p_hdu = fits_file[2]\n \n z_data = z_hdu.data\n p_data = np.exp(p_hdu.data)\n \n return ProbDist(z_data, p_data)", "def make_prob_dictionary(to_read, probabilities):\n\tf = open(to_read)\n\tfor i in f:\n\t\tx = i.strip().split()\n\t\tprobabilities[x[0][:-1]] = float(x[1])\n\tf.close()\n\treturn probabilities", "def frequency(walks):\n\tP_m = {}\n\tfor walk in walks:\n\t\tfor item in walk:\n\t\t\ttry:\n\t\t\t\tP_m[item] += 1\n\t\t\texcept:\n\t\t\t\tP_m[item] = 1\n\tfor key, value in P_m.items():\n\t\tP_m[key] = value**0.75\n\treturn P_m", "def occupation_distribution(data):", "def create_n_1_gram_map(self) -> Dict[str, List[str]]:\n assert self.count_map is not None, 'count map is not initialized'\n # assert self.n_grams > 1, 'n-grams must be greater than 1 in order to create n_1 gram map'\n\n res: Dict[str, List[str]] = {}\n for sequence in self.model:\n sequence: str = cast(str, sequence)\n n_minus_1_grams = self.get_n_minus_1_grams(sequence)\n if n_minus_1_grams not in res:\n res[n_minus_1_grams] = []\n res[n_minus_1_grams].append(sequence)\n\n self.n_1_gram_map = res\n return res", "def calcIDFDict(countDict, numfiles):\n\n IDFDict = {}\n for term in countDict:\n IDFDict[term] = math.log(numfiles / countDict[term])\n\n return IDFDict", "def compute_IDF(doc_info):\n number_of_docs = len(doc_info)\n idf_table = {}\n\n for idx, doc in enumerate(doc_info):\n for word in doc['freq_dict']:\n if word not in idf_table:\n idf_table[word] = 1\n else:\n idf_table[word] += 1\n\n for word in idf_table.keys():\n idf_table[word] = math.log(number_of_docs/idf_table[word])\n\n return idf_table", "def build_distributions(self):\n res = {}\n n_partitions = self.partition_num\n partition_num = 1\n # each part size\n partition_size = int(math.floor(self.size / n_partitions))\n\n for n in range(int(partition_size), self.size + 1, int(partition_size)):\n if self.learn_start <= n <= self.priority_size:\n distribution = {}\n # P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))\n pdf = list(\n map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))\n )\n pdf_sum = math.fsum(pdf)\n distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))\n # split to k segment, and than uniform sample in each k\n # set k = batch_size, each segment has total probability is 1 / batch_size\n # strata_ends keep each segment start pos and end pos\n cdf = np.cumsum(distribution['pdf'])\n strata_ends = {1: 0, self.batch_size + 1: n}\n step = 1.0 / self.batch_size\n index = 1\n for s in range(2, self.batch_size + 1):\n while cdf[index] < step:\n index += 1\n strata_ends[s] = index\n step += 1.0 / self.batch_size\n\n distribution['strata_ends'] = strata_ends\n\n res[partition_num] = distribution\n\n partition_num += 1\n\n return res", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def distr(self,X):\r\n return {x:X.count(x) for x in set(X)}", "def _get_marginal_pdfs( res, nbins=51, verbose=True ):\n\tvparam_names = res.vparam_names\n\tweights = res.weights\n\tsamples = res.samples\n\n\tpdfdict = {}\n\n\tfor param in vparam_names :\n\t\tipar = vparam_names.index( param )\n\t\tparamvals = samples[:,ipar]\n\n\t\tif nbins>1:\n\t\t\tif param in res.bounds :\n\t\t\t\tparvalmin, parvalmax = res.bounds[param]\n\t\t\telse :\n\t\t\t\tparvalmin, parvalmax = 0.99*paramvals.min(), 1.01*paramvals.max()\n\t\t\tparambins = np.linspace( parvalmin, parvalmax, nbins, endpoint=True ).flatten()\n\t\t\tbinindices = np.digitize( paramvals, parambins )\n\n\t\t\t# we estimate the marginalized pdf by summing the weights of all points in the bin,\n\t\t\t# where the weight of each point is the prior volume at that point times the\n\t\t\t# likelihood, divided by the total evidence\n\t\t\tpdf = np.array( [ weights[np.where( binindices==ibin )].sum() for ibin in range(len(parambins)) ] )\n\t\telse :\n\t\t\tparambins = None\n\t\t\tpdf = None\n\n\n\t\tmean = (weights * samples[:,ipar]).sum()\n\t\t#print(samples[:,ipar]-mean)\n\t\t#print(weights)\n\t\tstd = np.sqrt( (weights * (samples[:,ipar]-mean)**2 ).sum() )\n\n\n\t\tpdfdict[param] = (parambins,pdf,mean,std,res.logz)\n\n\t\tif verbose :\n\t\t\tif np.abs(std)>=0.1:\n\t\t\t\tprint( ' <%s> = %.2f +- %.2f'%( param, np.round(mean,2), np.round(std,2)) )\n\t\t\telif np.abs(std)>=0.01:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( param, np.round(mean,3), np.round(std,3)) )\n\t\t\telif np.abs(std)>=0.001:\n\t\t\t\tprint( ' <%s> = %.4f +- %.4f'%( param, np.round(mean,4), np.round(std,4)) )\n\t\t\telse :\n\t\t\t\tprint( ' <%s> = %.3e +- %.3e'%( param, mean, std) )\n\n\n\t\tif param == 'x0' :\n\t\t\tsalt2 = sncosmo.Model( source='salt2')\n\t\t\tsalt2.source.set_peakmag( 0., 'bessellb', 'ab' )\n\t\t\tx0_AB0 = salt2.get('x0')\n\t\t\tmBmean = -2.5*np.log10( mean / x0_AB0 )\n\t\t\tmBstd = 2.5*np.log10( np.e ) * std / mean\n\t\t\tmBbins = -2.5*np.log10( parambins / x0_AB0 )\n\n\t\t\tpdfdict['mB'] = ( mBbins, pdf, mBmean, mBstd )\n\t\t\tif verbose:\n\t\t\t\tprint( ' <%s> = %.3f +- %.3f'%( 'mB', np.round(mBmean,3), np.round(mBstd,3)) )\n\n\treturn( pdfdict )", "def learn_distributions(file_lists_by_category):\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category", "def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))", "def doc_prob(self, doc, cat):\n features = self.get_features(doc) \n # Multiply the probabilities of all the features together\n p = Decimal(1)\n for f in features:\n p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob))) \n return p", "def histogram_equalize(img):\n\n img_copy = np.copy(img)\n\n elements,counts = np.unique(img_copy,return_counts=True)\n pdf = counts/counts.sum()\n cdf = np.cumsum(pdf)\n new_values = cdf * 255\n\n old_new_map = dict(zip(elements,new_values))\n\n img_new = np.zeros(img_copy.shape)\n for i in old_new_map:\n img_new[img_copy == i] = old_new_map[i]\n\n return img_new", "def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted", "def spamrisk_map(spam_wc, not_spam_wc, total_wc):\n risk_map = dict()\n spam_length = 0\n for w, v in spam_wc.iteritems():\n spam_length += v\n not_spam_length = 0\n for w, v in not_spam_wc.iteritems():\n not_spam_length += v\n total_length = not_spam_length + spam_length\n\n for word, value in total_wc.iteritems():\n\n if word not in spam_wc and word in not_spam_wc:\n risk_map[word] = 0.01\n elif word in spam_wc and word not in not_spam_wc:\n risk_map[word] = 0.99\n else:\n g = float(not_spam_wc[word] * 2)\n b = float(spam_wc[word])\n risk_map[word] = ( b / spam_length ) / ( ( g / not_spam_length) +(b / spam_length) ) \n\n return risk_map", "def probabilities(self):\n raise NotImplementedError", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict", "def sample_pagerank(corpus, damping_factor, n):\n data = []\n\n #Choosing a random page from the corpus and adding to data\n sample = random.choice(list(corpus.keys()))\n data.append(sample)\n\n for _ in range(n-1):\n prob_distrib = transition_model(corpus, sample, damping_factor)\n\n #Choosing a page from the corpus based on transition model and adding to data\n sample = np.random.choice(list(prob_distrib.keys()), p=list(prob_distrib.values()))\n data.append(sample)\n\n #Dividing the number of times each page was visited by numebr of samples \n pagerank = {k : v/n for k, v in Counter(data).items()}\n\n return pagerank", "def get_Pre_Succ(I):\n #Docs = I.docs\n #Docs_id = Docs.keys()\n Docs = I.getIndex().all_ids_\n Docs_id = [ int(float(k)) for k in Docs] \n N_pgs = len(Docs_id)\n Index_P = { id:idx for idx,id in enumerate(Docs_id)}\n Counter_Index_P = { idx:id for idx,id in enumerate(Docs_id)}\n \n print \"\\nBuilding Pi...\"\n Succ = { Index_P[p]:(I.getLinksForDoc(p),len(I.getLinksForDoc(p))) for p in Docs_id }\n P = {}\n for e in Succ:\n succ_e,l_e = Succ[e]\n for s in succ_e: \n if Index_P.get(s,\"Unknown_Doc_id\") not in P:\n P[Index_P.get(s,\"Unknown_Doc_id\")] = set()\n P[Index_P.get(s,\"Unknown_Doc_id\")].add(e) \n \n return P,Succ,Index_P,Counter_Index_P,N_pgs", "def ind_sim(n,CV,BV,N,p,d): \n dic={}\n dic2={}\n for i in range(N):\n Bt=random.choices('HL', weights=(p,1-p), k=n)\n pb=[round((1-p), 5) if x=='L' else p for x in Bt] \n Ct=random.choices('HL', weights=(p,1-p), k=n)\n pc=[round((1-p), 5) if x=='L' else p for x in Ct] \n [npvt,pr]=NPV(Bt,Ct,BV,CV,d,np.prod(pb),np.prod(pc))\n if npvt in dic.keys():\n dic[npvt] += 1\n else:\n dic[npvt] = 1\n dic2[npvt] =pr\n return (dic, dic2)", "def build_distributions(self):\n res = {}\n n_partitions = self.partition_num\n partition_num = 1\n # each part size\n partition_size = int(math.floor(self.size / n_partitions))\n\n for n in range(partition_size, self.size + 1, partition_size):\n if self.learning_starts <= n <= self.priority_size:\n distribution = {}\n # P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))\n pdf = list(\n map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))\n )\n pdf_sum = math.fsum(pdf)\n distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))\n # split to k segment, and than uniform sample in each k\n # set k = batch_size, each segment has total probability is 1 / batch_size\n # strata_ends keep each segment start pos and end pos\n cdf = np.cumsum(distribution['pdf'])\n strata_ends = {1: 0, self.batch_size + 1: n}\n step = 1 / float(self.batch_size)\n index = 1\n for s in range(2, self.batch_size + 1):\n while cdf[index] < step:\n index += 1\n strata_ends[s] = index\n step += 1 / float(self.batch_size)\n\n distribution['strata_ends'] = strata_ends\n\n res[partition_num] = distribution\n\n partition_num += 1\n\n return res", "def sample_pagerank(corpus, damping_factor, n):\n all_pages = []\n first_sample_prob = random.randint(0, len(corpus) - 1)\n distribution_count = dict()\n\n for u in corpus:\n distribution_count[u] = 0\n all_pages.append(u)\n\n sample = all_pages[first_sample_prob]\n for i in range(n - 1): # n - 1 because first sample was already calculated\n selection_bucket = dict()\n selection_start = 0.0\n sample_distribution = transition_model(corpus, sample, damping_factor)\n sample_prob = random.random()\n for u in sample_distribution:\n floor = selection_start\n ceiling = selection_start + sample_distribution[u]\n selection_start = ceiling\n selection_bucket[u] = [floor, ceiling]\n for u in selection_bucket:\n v = selection_bucket[u]\n if v[0] < sample_prob < v[1]:\n sample = u\n distribution_count[u] += 1\n distribution = dict()\n for u in distribution_count:\n distribution[u] = float(distribution_count[u]) / n\n\n return distribution", "def get_distribution(doc):\n word_count = {}\n word_count = clean(doc, word_count)\n factor = 1.0 / sum(word_count.values())\n dist = {k: v * factor for k, v in word_count.items()}\n return dist", "def probability(self, samples):\n pass", "def dictionary(word_list):\n word_list.append(\"\")\n for i in range(len(word_list)-2):\n prob_dict.setdefault(word_list[i], []).append(word_list[i+1])", "def factorize(n:int,primesDict:dict = primesDict):\r\n\r\n \r\n if isPrime(n,primesDict):\r\n return {n:1}\r\n\r\n factors = {}\r\n\r\n lastPrime = getLastPrime(primesDict)\r\n print (lastPrime,\"Lastprimes\")\r\n if lastPrime < n:\r\n print (\"Creating DictS\")\r\n\r\n prma(n,lastPrime,primesDict)\r\n\r\n for i in primesDict:\r\n if n%i == 0 :\r\n count = 0\r\n while n % i**(count+1) == 0 :\r\n count+=1 \r\n factors[i]= count\r\n\r\n return factors", "def graphQualityPerPosition(inputFastq):\n\n histD = {}\n\n count = 0\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n count += 1\n if count < 200000:\n continue\n if count > 1200000:\n break\n\n qInts = convertQualityStr(qualityStr) \n for i in range(len(qInts)):\n q = qInts[i]\n if q < 0 or q > 40:\n raise Exception(\"Invalid quality value %s at position %s of %s\" % (q, i, qualityStr))\n\n if not histD.has_key(i):\n histD[i] = [0]*41\n\n histD[i][q] += 1\n\n print \"Histogram of quality score per position\"\n allk = histD.keys()\n allk.sort()\n for k in allk:\n print \"%s|\" % k, \"|\".join(str(x) for x in histD[k])", "def iterate_pagerank(corpus, damping_factor):\n distribution = dict()\n corpus_length = len(corpus)\n for u in corpus: #On first iteration, each page is equally likely.\n distribution[u] = 1.0 / corpus_length\n\n difference = 1.0\n max_difference = 0.0\n while ( difference > 0.001 ):\n old_distribution = distribution.copy()\n for u in corpus: #Page we are currently looking at\n prob = (1.0 - damping_factor) / corpus_length\n for x in corpus:\n if u == x:\n continue\n if u in corpus[x]:\n links = list(corpus[x])\n prob += damping_factor * (distribution[x] / len(links))\n distribution[u] = prob\n difference = abs(distribution[u] - old_distribution[u])\n if difference > max_difference: max_difference = difference\n return distribution", "def test_most_probable_value(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame(\n {'A': [0, 1, 2, 3, 4, 3, 2, 1, 1, 1], 'C': ['f1', 'f3', 'f4', 'f3', 'f4', 'f2', 'f2', 'f1', 'f3', 'f4']})\n df2 = pd.DataFrame(\n {'A': [2, 3, 4, 5, 7, 4, 6, 5, 7, 8], 'C': ['f7', 'f3', 'f5', 'f8', 'f9', 'f2', 'f3', 'f6', 'f7', 'f7']})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist0 = hg.Categorize(unit('C'))\n hist1 = hg.Categorize(unit('C'))\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n\n # fill them\n hist0.fill.numpy(df1)\n hist1.fill.numpy(df2)\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n\n assert hist0.mpv == 'f3'\n assert hist1.mpv == 'f7'\n assert hist2.mpv == 1.5\n assert hist3.mpv == 4.5", "def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts", "def prob_density_func(xs,norm=True,data_range='data'):\n if data_range=='data':\n dist_keys = set(xs)\n elif data_range=='ext_data':\n dist_keys = range(min(xs),max(xs)+1)\n else:\n dist_keys = data_range\n \n pdf = dict([(k,0.0) for k in dist_keys])\n for x in xs:\n pdf[x] += 1.0\n if norm:\n pdf.update([(k,pdf[k]/sum(pdf.values())) for k in pdf.keys()])\n return pdf", "def edit_probs(result):\n for i in range(TOP_E):\n p = result.data[i][1]\n p = round(p, 4)\n # p_str = str(p)[1:]\n result.data[i][1] = p\n\n return result", "def _compute_register_probs(cls, num_values):\n probs = -np.log((np.arange(num_values) + 1) / (num_values + 1))\n return probs / sum(probs)", "def compute_idfs(documents):\n idfs = dict()\n total_num_documents = len(documents)\n words = set(word for sublist in documents.values() for word in sublist)\n \n for word in words:\n num_documents_containing_word = 0\n \n for document in documents.values():\n if word in document:\n num_documents_containing_word += 1\n \n idf = math.log(total_num_documents / num_documents_containing_word)\n idfs[word] = idf\n\n return idfs", "def calculate_tftd(pl_with_duplicates):\n # print(pl_with_duplicates)\n counter = collections.Counter(pl_with_duplicates)\n pl_tftd = [[int(docId), counter[docId]] for docId in counter.keys()]\n return pl_tftd", "def iterate_pagerank(corpus, damping_factor):\n # List all pages in corpus\n pages = list(corpus.keys())\n # {p: i}\n links = dict()\n\n # Fix corpus\n for p in corpus.keys():\n # If no links, then it has one link for every page in corpus\n if corpus[p] == set():\n corpus[p] = set(pages)\n \n for page in pages:\n links[page] = []\n for p in corpus.keys():\n if page in corpus[p]:\n links[page].append(p)\n #print(corpus)\n #print(links)\n\n probabilities = dict()\n updated_probabilities = dict()\n\n # Initial PR = 1/N\n for p in corpus.keys():\n probabilities[p] = 1 / len(corpus.keys())\n updated_probabilities[p] = float(0)\n\n # PR differences\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n\n # Recalculate\n i = 0\n p_corpus = (1 - damping_factor) / len(corpus)\n while max(d.values()) > 0.001:\n for p in corpus.keys():\n p_link = 0\n # Links\n for lp in links[p]:\n if (i % 2) == 0:\n p_link += (probabilities[lp] / len(corpus[lp]))\n else:\n p_link += (updated_probabilities[lp] / len(corpus[lp]))\n pr = p_corpus + (damping_factor * p_link)\n\n # Update probabilities or updated_probabilities dictionary\n if (i % 2) == 0:\n updated_probabilities[p] = pr\n else:\n probabilities[p] = pr\n \n # Increase count\n i += 1\n\n # Update differences dictionary\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n #print(\"P\", \"\\033[93m {}\\033[00m\" .format(probabilities))\n #print(\"UP\", \"\\033[96m {}\\033[00m\" .format(updated_probabilities))\n #print(\"D\", \"\\033[91m {}\\033[00m\" .format(d))\n\n # When PR's do not change by > 0.001\n return probabilities", "def get_gold_probdist():\n\n # Read in the dataset as a pandas dataframe.\n card_data_annot = gspd.read_in_categorised()\n\n # Based on the frequencies of each category in the data, create probability distribution and return.\n probdist_dict = gspd.freq_dist_to_prob_dist(card_data_annot)\n return probdist_dict", "def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def estimate_probabilities(previous_n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary, k=1.0):\r\n \r\n previous_n_gram = tuple(previous_n_gram)\r\n \r\n # add <e> <unk> to the vocabulary\r\n # <s> is not needed since it should not appear as the next word\r\n vocabulary = vocabulary + [\"<e>\", \"<unk>\"]\r\n vocabulary_size = len(vocabulary)\r\n \r\n probabilities = {}\r\n for word in vocabulary:\r\n probability = estimate_probability(word, previous_n_gram, \r\n n_gram_counts, n_plus1_gram_counts, \r\n vocabulary_size, k=k)\r\n probabilities[word] = probability\r\n\r\n return probabilities", "def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True", "def get_expected_probability(probabilities):\n\n expected = dict()\n for a, b in cwr(probabilities.keys(), 2):\n if a == b:\n expected[\"\".join(sorted([a, b]))] = probabilities[a] * probabilities[b]\n else:\n expected[\"\".join(sorted([a, b]))] = 2 * (probabilities[a] * probabilities[b])\n\n return expected", "def get_pr_totals(articles, ambiguous_forms, uri_pr, skip_nils, ambiguous_only):\n total_per_form=get_freq_totals(articles, ambiguous_forms, skip_nils, ambiguous_only)\n form_pageranks=defaultdict(dict)\n for form, meanings in total_per_form.items():\n if ambiguous_only and form not in ambiguous_forms:\n continue\n #for uri, total in meanings.items():\n #acc_per_form_meaning[system][form][uri]=correct_per_form[form][uri]/total\n for uri in meanings.keys():\n if uri in uri_pr:\n form_pageranks[form][uri]=uri_pr[uri]\n return form_pageranks", "def PSPLdict():\n pspl_dict = {}\n # individual files\n PSPLs = glob.glob(\"./msresist/data/PSPL/*.csv\")\n for sp in PSPLs:\n if sp == \"./msresist/data/PSPL/pssm_data.csv\":\n continue\n sp_mat = pd.read_csv(sp).sort_values(by=\"Unnamed: 0\")\n\n if sp_mat.shape[0] > 20: # Remove profiling of fixed pY and pT, include only natural AA\n assert np.all(sp_mat.iloc[:-2, 0] == AAlist), \"aa don't match\"\n sp_mat = sp_mat.iloc[:-2, 1:].values\n else:\n assert np.all(sp_mat.iloc[:, 0] == AAlist), \"aa don't match\"\n sp_mat = sp_mat.iloc[:, 1:].values\n\n if np.all(sp_mat >= 0):\n sp_mat = np.log2(sp_mat)\n\n pspl_dict[sp.split(\"PSPL/\")[1].split(\".csv\")[0]] = sp_mat\n\n # NetPhores PSPL results\n f = pd.read_csv(\"msresist/data/PSPL/pssm_data.csv\", header=None)\n matIDX = [np.arange(16) + i for i in range(0, f.shape[0], 16)]\n for ii in matIDX:\n kin = f.iloc[ii[0], 0]\n mat = f.iloc[ii[1:], :].T\n mat.columns = np.arange(mat.shape[1])\n mat = mat.iloc[:-1, 2:12].drop(8, axis=1).astype(\"float64\").values\n mat = np.ma.log2(mat)\n mat = mat.filled(0)\n mat = np.clip(mat, a_min=0, a_max=3)\n pspl_dict[kin] = mat\n\n return pspl_dict", "def create_count_map(self) -> Dict[int, int]:\n res: Dict[int, int] = {}\n for sequence_data in self.model.values():\n sequence_data: NGramsSequence = cast(NGramsSequence, sequence_data)\n for count in sequence_data.next_count.values():\n count: int = cast(int, count)\n if count not in res:\n res[count] = 0\n res[count] += 1\n self.count_map = res\n logger.success('created count map')\n return res", "def pre_build_idf_table(self):\r\n doc_per_word_table = dict() # in how many documents does a word occur\r\n \r\n for doc in self.documents:\r\n # converting list to set will delete any duplicate words\r\n doc = self.preprocess_document(doc)\r\n doc_words = set(self.word_tokenize_preprocessed(doc))\r\n\r\n for word in doc_words:\r\n if word in doc_per_word_table:\r\n doc_per_word_table[word] += 1\r\n else:\r\n doc_per_word_table[word] = 1\r\n\r\n total_documents = len(self.documents)\r\n idf_table = dict()\r\n\r\n for word in doc_per_word_table:\r\n idf_table[word] = math.log2(total_documents / float(doc_per_word_table[word]))\r\n\r\n return idf_table", "def PRGA(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n\n tab[i], tab[j] = tab[j], tab[i]\n K = tab[(tab[i] + tab[j]) % MOD]\n yield K", "def _pade_frequencies(num: int):\n num = 2*num\n a = -np.diagflat(range(1, 2*num, 2))\n b = np.zeros_like(a, dtype=np.float_)\n np.fill_diagonal(b[1:, :], 0.5)\n np.fill_diagonal(b[:, 1:], 0.5)\n eig, v = linalg.eig(a, b=b, overwrite_a=True, overwrite_b=True)\n sort = np.argsort(eig)\n izp = 1j*eig[sort]\n resids = (0.25*v[0]*np.linalg.inv(v)[:, 0]*eig**2)[sort]\n assert np.allclose(-izp[:num//2][::-1], izp[num//2:])\n assert np.allclose(resids[:num//2][::-1], resids[num//2:])\n assert np.all(~np.iscomplex(resids))\n return izp[num//2:], resids.real[num//2:]", "def compute_idfs(documents):\n idf={}\n words={}\n # idf= no.of doc/no. of doc in which it lies\n for doc in documents:\n for wrd in set(documents[doc]):\n if wrd.lower() not in words:\n words[wrd.lower()]=0\n words[wrd.lower()]+=1 \n for word in words:\n idf[word]=len(documents)/words[word]\n return idf", "def draw(probs, occ_rep, Nsamp):\n Ns, L = occ_rep.shape\n shots= np.empty((Nsamp,L))\n results = np.random.choice(list(range(len(probs))), Nsamp, p=probs)\n for ii in range(Nsamp):\n shots[ii, : ] = occ_rep[results[ii], :]\n return shots" ]
[ "0.6266391", "0.6231039", "0.6222904", "0.6209188", "0.62075746", "0.60514987", "0.60154533", "0.59842813", "0.59767723", "0.5969421", "0.5939433", "0.58987904", "0.58945656", "0.58854735", "0.58664787", "0.5817961", "0.57828134", "0.5771365", "0.5759954", "0.5755793", "0.57386154", "0.57328147", "0.5705914", "0.5696651", "0.5670643", "0.5664535", "0.5656928", "0.56161195", "0.5610407", "0.5603261", "0.55979073", "0.5591729", "0.5591487", "0.5588176", "0.5571931", "0.5557059", "0.55467325", "0.5538656", "0.55374914", "0.5516762", "0.5516424", "0.55156845", "0.5511292", "0.5496555", "0.5496114", "0.5487536", "0.54872304", "0.54870445", "0.5486652", "0.548508", "0.5484305", "0.5482165", "0.54793835", "0.5473399", "0.5461445", "0.5457154", "0.54478526", "0.54444695", "0.54432917", "0.5442365", "0.54228073", "0.54212564", "0.5419907", "0.53963155", "0.5394618", "0.5393484", "0.5389921", "0.53890085", "0.5387849", "0.5387446", "0.5385966", "0.53847283", "0.5382418", "0.5362347", "0.536211", "0.5360683", "0.5358989", "0.5358226", "0.5356124", "0.5353728", "0.53536326", "0.53534293", "0.5342802", "0.53403294", "0.53360736", "0.5331935", "0.5323172", "0.53191036", "0.53087753", "0.5308092", "0.530797", "0.5304306", "0.52855045", "0.5282324", "0.52810264", "0.52776366", "0.527671", "0.52678907", "0.52670556", "0.52667767" ]
0.74976236
0
Setter method for original
def reset_original(self): self._original = [] # Empty out self._originals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setOriginal(self,neworiginal):\n\t\tself.original = neworiginal;", "def original(self, original):\n self._original = original", "def original(self) -> Any:\n raise NotImplementedError", "def __init__(self, orig):\n self.orig = orig", "def update_original_data(self):\n pass", "def set_from_original(self):\n self.image = self.orig_image\n self.update_img()\n self.update_size()", "def _patch_implementation(self, original, *args, **kwargs):\n pass", "def original(self):\n return self._original", "def original(self):\n return self._original", "def __setattr__(self, name, value):\n try:\n orig = object.__getattribute__(self, 'orig')\n except AttributeError:\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(orig, name, value)", "def getOriginal(self,):\n\t\treturn self.original;", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def _copy_data_from(self, original):\n raise NotImplementedError()", "def _save_state_as_orig(self):\n self._orig = None\n self._orig = deepcopy(self)", "def original_price(self, original_price):\n\n self._original_price = original_price", "def set_as_overriden(self):\n raise NotImplementedError(\n \"{} Method `set_as_overriden` not implemented!\".format(repr(self))\n )", "def __set__(self, obj, value):\r\n pass", "def _set_attributes(self):", "def restore(self):\n if self.obj:\n for attrib in self.attribs:\n setattr(self.obj, attrib, getattr(self, attrib))", "def modified(self):\n raise NotImplementedError", "def __originate__(self):\n self.pos_to_num = deepcopy(self.o_pos_to_num)\n self.num_to_pos = deepcopy(self.o_num_to_pos)", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def copy(self):", "def _modifyProperty(self,dec):\n pass", "def copy(self):\r\n return self.replace()", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")", "def _original_data(self, data: np.ndarray):\n if self._raw_data is None:\n self._raw_data = data", "def copy(self):\n return super().copy()", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def __copy__(self):\n raise NotImplementedError", "def tweak(self):\n\n return tweak_base(self)", "def pre_set(self, value):\r\n return value", "def __set__(self, instance, value):\r\n if instance:\r\n return instance._values[self.column.column_name].setval(value)\r\n else:\r\n raise AttributeError('cannot reassign column values')", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def __set__(self, obj, value):\n\n return setattr(obj, '_' + self.name, value)", "def clone(self, **kwargs):\n return super(AttentionWrapperState, self)._replace(**kwargs)", "def set(self, obj, value):\n pass", "def restore(self):\n raise NotImplementedError", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def set_value (self):\n raise NotImplementedError", "def override__args(self,arg,value):\n self.__args[arg] = value", "def transform(self, original_input):\n raise NotImplementedError()", "def __set__(self, model_instance, value):\r\n raise ValueError, 'Virtual property is read-only'", "def __copy__(self):\n return type(self)(self.value)", "def __copy__(self):\n return self.copy()", "def old(self, old):\n\n self._old = old", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def replace(self, *args, **kwargs): # real signature unknown\r\n pass", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def set_proxy(self):", "def proxy_set(self, value):\n setter = getattr(self, self.proxy_setter)\n return setter(value)", "def __setitem__(self):\n raise ValueError(\"Dataset objects are immutable\")", "def copy(self):\n # YOUR CODE HERE\n raise NotImplementedError()", "def _set_origin_value(self, origin):\n self.origin_value = origin", "def _backup_line(self):\n if self._orig_line is None:\n self._orig_line = self._line", "def set(self, obj, value):\n raise NotImplementedError", "def copy(self):\r\n raise Exception, \"not implemented\"", "def setData(self,newData):\r\n pass", "def __setattr__(self, name, value):\n super(Message, self).__setattr__(name, value)\n if name not in ('bcc', '_dirty', '_processed'): \n self.__dict__['_dirty'] = True", "def clone(self) -> \"set_default\":\n return type(self)(self.value)", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def __copy__(self, *args, **kwargs): # real signature unknown\n pass", "def _set_coords_copy(self,coords):\n return self.copy()._set_coords_inplace(coords)", "def update_proxy(self, instance, value):\n self.value = value", "def revert(self):\n original = getattr(self, \"_original\", None)\n if not original:\n return\n\n if hasattr(self, \"output\"):\n output = self.output\n keep_output = True\n else:\n keep_output = False\n\n del self._original\n\n self.__dict__ = original.__dict__\n\n if keep_output:\n self.output = output", "def explore_original(self):\n pass", "def set(self, U):\n pass", "def set(self, U):\n pass", "def wrap(cls, orig):\n # hack to give the timestamp this class' specialized methods\n orig.__class__ = cls\n return orig", "def setSharp(self,sharp):\n super(self.__class__, self).setSharp(self, sharp)", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def original_sub_id(self, original_sub_id):\n\n self._original_sub_id = original_sub_id", "def _fset(self, value):\n # type: (...) -> None\n rtype = type_\n if isinstance(type_, TypeVar):\n type_map = dict(\n zip(self.__parameters__, self.__orig_class__.__args__)\n )\n rtype = type_map[type_]\n vars(self)[private_attr] = cast(rtype, value)", "def __adjust(self, *args):\n return \"adjust\"", "def Set(self) -> None:", "def __setattr__(self, attr, value):", "def override(self):\n return None", "def set(self, **kwargs):\n raise NotImplementedError", "def __set__(self, instance, value):\n raise AttributeError(\"A Default Property is Read only\")", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def transform(self, X, copy=...):\n ...", "def copyOf(cls, original):\n newSource = CraftDamageSource(original.translationIndex)\n # Check ignoresArmor\n if original.ignoresArmor():\n newSource.setIgnoreArmor()\n # Check magic\n if original.isMagic():\n newSource.setMagic()\n # Check fire\n if original.isExplosion():\n newSource.setExplosion()\n return newSource", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied", "def set_py(self, value):\n pass", "def pre_revert(self):", "def _update_proxy(self, change):\n # The superclass handler implementation is sufficient.\n super(AbstractItemView, self)._update_proxy(change)", "def fset(self, value):\n message = \"Overriding a constant value is an illegal operation: {0} = {1}.\".format(\n name.__name__,\n value)\n raise TypeError(message)" ]
[ "0.8130244", "0.8047649", "0.7397236", "0.730968", "0.6972755", "0.68666035", "0.6818994", "0.67645526", "0.67645526", "0.6702549", "0.6689829", "0.6665114", "0.6546259", "0.6500433", "0.64820886", "0.64437467", "0.6417436", "0.6361053", "0.63502926", "0.6325794", "0.6284538", "0.6282416", "0.6282416", "0.6282416", "0.62746584", "0.6216856", "0.61496", "0.61461294", "0.6127255", "0.6127255", "0.6127255", "0.61167186", "0.61080843", "0.6093877", "0.60340893", "0.6031542", "0.60135156", "0.6006188", "0.6006039", "0.6002202", "0.6002202", "0.6002202", "0.59819347", "0.5952958", "0.5943124", "0.59147006", "0.59143114", "0.5902095", "0.5897437", "0.58949274", "0.5885246", "0.5860751", "0.5854436", "0.5854269", "0.584152", "0.584152", "0.584152", "0.58216435", "0.5815073", "0.58135486", "0.58127767", "0.58127236", "0.58061314", "0.58038145", "0.58025795", "0.580156", "0.5801076", "0.57992494", "0.5794609", "0.57763267", "0.5767538", "0.5734254", "0.5734254", "0.57204103", "0.5711552", "0.5710563", "0.5708994", "0.5705624", "0.5705624", "0.56819856", "0.5675748", "0.56744117", "0.5668513", "0.5662943", "0.5662439", "0.5660252", "0.5653791", "0.56497926", "0.56392753", "0.5625525", "0.561601", "0.561121", "0.561121", "0.561121", "0.56094384", "0.5607116", "0.56065476", "0.55996686", "0.5596766", "0.559315" ]
0.5829867
57
Create an orriginal record.
def generate_original(self): fakers = self.schema # Get the schema; Not sure what this is self.reset_original() # Set the self._original value to be empty self.__logger.debug("generate_original()") # Let us know self.__logger.debug("Event ID %d" % self.record_count) # Let us know darr = [] # Data array for i, fake in enumerate(fakers): # Enumerate the fakers if self.is_dependent[i] is True: # Skip over if there is a dependent continue # Skipper if self.generator_fcns[fake][1] is None: # Check if there are params value = self.fake.fake( self.generator_fcns[fake][0], params=None) # Create fake datum darr.append(value) # Append the data to the list else: # If there are self.__logger.info(self.generator_fcns[fake][1]) value = self.fake.fake( self.generator_fcns[fake][0], self.generator_fcns[fake][1]) # Create fake datum if isinstance(value, list): # If it is a list darr.extend(value) # Extend else: # Otherwise if value not in darr: darr.append(value) # Just append the value self.record_counter() # Count the number of records self.cache_original(darr) # Cache the results return darr # Return the data array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def _inmate_record_get_or_create(self):\n raise NotImplementedError('_inmate_record_get_or_create needs to be implemented with the new format')", "def create_record(self, zone_id, record, record_type, data, ttl=60):\r\n self.record.createObject({\r\n 'domainId': zone_id,\r\n 'ttl': ttl,\r\n 'host': record,\r\n 'type': record_type,\r\n 'data': data})", "def create_new_record(account,userName,password):\n new_record = Records(account,userName,password)\n return new_record", "def create(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def create_record(record, force=True, dry_run=False):\n errors = \"\"\n\n if _collection_in_record(record, 'institution'):\n json = strip_empty_values(institutions.do(record))\n elif _collection_in_record(record, 'experiment'):\n json = strip_empty_values(experiments.do(record))\n elif _collection_in_record(record, 'journals'):\n json = strip_empty_values(journals.do(record))\n elif _collection_in_record(record, 'hepnames'):\n json = strip_empty_values(hepnames.do(record))\n elif _collection_in_record(record, 'job') or \\\n _collection_in_record(record, 'jobhidden'):\n json = strip_empty_values(jobs.do(record))\n elif _collection_in_record(record, 'conferences'):\n json = strip_empty_values(conferences.do(record))\n else:\n json = strip_empty_values(hep.do(record))\n\n if dry_run:\n return errors, json\n\n return json\n # control_number = json.get('control_number', json.get('recid'))\n # if control_number:\n # control_number = int(control_number)\n\n # if force and control_number:\n # # Searches if record already exists.\n # with db.session.begin_nested():\n # record = Record.get_record(control_number)\n # if record is None:\n # # Adds the record to the db session.\n # record = Record.create(json, _id=control_number)\n # else:\n # record.update(json)\n # record.commit()\n # db.session.commit()\n # logger.info(\"Elaborated record {}\".format(control_number))\n # return errors, dict(record)", "def cli_add_record(record_data):\n new_record = None\n try:\n new_record = api.insert_record( record_data)\n except DuplicateRecord as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n except MissingRequiredInformaton as error:\n debug(\"%(error)s\" % locals())\n print \"Adding new record failed. %(error)s\" % locals()\n return None\n\n return new_record", "def create_record(self, kind, content, reaction=None):\n with db.transaction():\n rv = Record(\n kind=kind,\n user=self,\n reaction=reaction,\n content=content\n )\n return rv", "def create_record(self, context, record):\n record = self.dns_manager.create_record(context, record)\n return record", "def create_record(\n cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"\n ):\n import ipdb\n\n ipdb.set_trace()\n\n if legacy_id_key is None:\n legacy_id_key = \"pid\"\n try:\n with db.session.begin_nested():\n record_uuid = uuid.uuid4()\n provider = pid_provider.create(\n object_type=\"rec\",\n object_uuid=record_uuid,\n )\n dump[\"pid\"] = provider.pid.pid_value\n record = model.create(dump, record_uuid)\n record.commit()\n db.session.commit()\n return record\n except IlsValidationError as e:\n click.secho(\"VALIDATION ERROR\", fg=\"blue\")\n click.secho(\n \"RECID {0} did not pass validation. ERROR: \\n {1}\".format(\n dump[legacy_id_key],\n [\n \"{0}: {1}\".format(\n error.res[\"field\"], error.res[\"message\"]\n )\n for error in e.errors\n ],\n ).join(\"\\n\"),\n fg=\"blue\",\n )\n click.secho(e.original_exception.message, fg=\"blue\")\n db.session.rollback()\n raise e", "def test_create_record(self):\n pass", "def _create_internal(self, data, commit=True):\n input_data = self.to_model(data)\n self.validate_all(input_data)\n if not input_data:\n raise UnprocessableEntity(\"Can not create using empty data.\")\n entity = self.model(**input_data)\n db_session.add(entity)\n if commit:\n db_session.commit()\n else:\n db_session.flush() # Flush to get id of created entity.\n \n return entity", "def new_record(self, values=None):\n return Record(schema=self.table_schema, values=values)", "def create(self, identity, data=None, record=None, errors=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})", "def create(self, **kwargs):\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db, skip_moderation=True)\n return obj", "def create(cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"):\n record = cls.create_record(\n dump, model, pid_provider, legacy_id_key=legacy_id_key\n )\n return record", "def new_archive_record(self, event):\n dbmanager = self.engine.db_binder.get_manager(self.data_binding)\n dbmanager.addRecord(event.record)", "def create(self, key, record, overwrite=False):\n if key in self.db and not overwrite:\n raise ValueError(\"A record for key \\\"%s\\\" already exists.\" % key)\n self.db[key] = copy(record)", "def new_record(self, name: str = None, new_record: dict = None, notes: str = None, created_by: str = None,\n my_conn: Optional[dict] = None, t_log: Optional[TimeLogger] = None, verbose: bool = False):\n\n if my_conn is None:\n my_conn = self.my_conn\n else:\n self.my_conn = my_conn\n\n table_name = self.table_name\n self.all_records = \\\n generic_new_record_db(\n table_name=table_name, name=name, notes=notes, new_record=new_record, my_conn=my_conn, t_log=t_log,\n data_df=self.all_records, created_by=created_by, verbose=verbose\n )", "def _post_record(\n self, ret_record_args, error, cost, start_time, end_time, record\n ):\n\n ret_record_args['main_error'] = str(error)\n ret_record_args['calls'] = record\n ret_record_args['cost'] = cost\n ret_record_args['perf'] = Perf(start_time=start_time, end_time=end_time)\n ret_record_args['app_id'] = self.app_id\n ret_record_args['tags'] = self.tags\n\n ret_record = Record(**ret_record_args)\n\n if error is not None:\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_error(record=ret_record, error=error)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(\n self._handle_error, record=ret_record, error=error\n )\n\n raise error\n\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_record(record=ret_record)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(self._handle_record, record=ret_record)\n\n return ret_record", "def _create_report_record(self, trade, common_object, reset_period,\n nominal, provision, short_end_rate, forward_rate):\n pass", "def new_archive_record(self, event):\n # If the record was software generated, then any corrections have\n # already been applied in the LOOP packet.\n if event.origin != 'software':\n for obs_type in self.corrections:\n try:\n event.record[obs_type] = eval(self.corrections[obs_type], None, event.record)\n except (TypeError, NameError):\n pass\n except ValueError, e:\n syslog.syslog(syslog.LOG_ERR, \"engine: StdCalibration archive error %s\" % e)", "def _new_record():\n nonlocal key\n nonlocal value_list\n nonlocal record\n nonlocal origin\n nonlocal field_offset_map\n key = None\n value_list = None\n if source is not None:\n origin = Origin(source, None, None)\n field_offset_map = {}\n record = RFC822Record(data_cls(), origin, data_cls(), field_offset_map)", "def create_record(self, name, zone, type, data, extra=None):\n id = \"id-%s\" % (name)\n\n zone = self.get_zone(zone_id=zone.id)\n\n if id in self._zones[zone.id][\"records\"]:\n raise RecordAlreadyExistsError(record_id=id, value=None, driver=self)\n\n record = Record(id=id, name=name, type=type, data=data, extra=extra, zone=zone, driver=self)\n self._zones[zone.id][\"records\"][id] = record\n return record", "def createNewRecord(eventID=None):\n eventID=cleanRecordID(eventID)\n rec = None\n if eventID > 0:\n #test that countEvent record exits\n cnt = CountEvent.query.filter(CountEvent.ID == eventID).count()\n if cnt > 0:\n rec = Assignment(eventID,getUID())\n db.session.add(rec)\n else:\n flash(printException(\"Invalid countEvent ID during Count Event creation.\",\"error\"))\n \n return rec", "def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()", "def create_record(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}\"\n record_data = self.retrieve_record_body(record)\n response = util.rest(\"POST\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def abstract_create(self, model, params):\n # we check that the given fields exist\n self.check_fields_existence(model, params.keys())\n\n # then we create the record after preparing params\n return self.env[model].sudo().create(self._prepare_params(params))", "def create_record(self, name, zone, type, data, extra=None):\n if (extra is None) or (\"entry\" not in extra):\n # If no entry is specified, we look for an available one. If all\n # are full, raise error.\n record_id = self._get_available_record_entry(zone)\n if not record_id:\n raise WorldWideDNSError(value=\"All record entries are full\", driver=zone.driver)\n else:\n record_id = extra.get(\"entry\")\n if name == \"\":\n name = \"@\"\n if type not in self.RECORD_TYPE_MAP:\n raise RecordError(\n value=\"Record type is not allowed\",\n driver=zone.driver,\n record_id=record_id,\n )\n extra = {\n \"S%s\" % record_id: name,\n \"T%s\" % record_id: type,\n \"D%s\" % record_id: data,\n }\n zone = self.update_zone(zone, zone.domain, extra=extra)\n record = self.get_record(zone.id, record_id)\n return record", "def newRecord(self, projSerial, repairType, repairedBy, date, repairNote):\n # Generate new ID.\n newRepairID = self.getNextIndex(\"repairID\")\n \n # Insert new record.\n self.insert((newRepairID,) +\n (projSerial, repairType, repairedBy, date, repairNote))\n\n return newRepairID", "def create(self, **kwargs):\n return self.save(self.new(**kwargs))", "def add_record(self, record):\n pass", "def create(self,**extra_fields):\r\n print(extra_fields)\r\n data = self.model(**extra_fields)\r\n data.save(using=self._db)", "def create():", "def create():", "def add_a_record(self, record):\n '''\n doc = { \"P/N\": record,#record.get_PN(),\n \"supplier\": \"\",\n \"inventory\": \"\",\n \"specification\": \"\",\n \"description\": \"\",\n \"OEM\": \"\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}'''\n self.collection.insert(record)", "def save_record(record):\n record. save_details()", "def add_record(self, record: Optional[Record] = None, **kwargs):\n\n if record is None:\n record = Record(**kwargs)\n else:\n record.update(**kwargs)\n\n return self.db.insert_record(record=record)", "def NewRecord(self, default={}):\n return HEP.JSONReferenceObject(self.data.get('metadata', {}).get('new_record', default))", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)", "def record_create_for_project(project_id, values):\n values['project_id'] = project_id\n values['created_at'] = datetime.datetime.utcnow()\n values['updated_at'] = datetime.datetime.utcnow()\n\n session = get_session()\n with session.begin():\n record_ref = models.ProjectAccountRecord()\n record_ref.update(values)\n record_ref.save(session=session)\n\n return record_ref", "def create(self, **kwargs):\n reverse_one_to_one_fields = frozenset(kwargs).intersection(\n self.model._meta._reverse_one_to_one_field_names\n )\n if reverse_one_to_one_fields:\n raise ValueError(\n \"The following fields do not exist in this model: %s\"\n % \", \".join(reverse_one_to_one_fields)\n )\n\n obj = self.model(**kwargs)\n self._for_write = True\n obj.save(force_insert=True, using=self.db)\n return obj", "def create(cls, *args, **kwargs):\r\n return cls(*args, **kwargs).save()", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def updated_full_record(full_record):\n full_record[\"access\"][\"status\"] = \"embargoed\"\n full_record[\"created\"] = \"2023-03-23T00:00:00.000000+00:00\"\n full_record[\"id\"] = \"abcde-fghij\"\n full_record[\"metadata\"][\"resource_type\"][\"id\"] = \"other\"\n\n return full_record", "def create(self, **attributes):\n return self.save(self.model(**attributes))", "def add_record(self, record: Dict, src_name: SourceName) -> None:\n concept_id = record[\"concept_id\"]\n record[\"src_name\"] = src_name.value\n label_and_type = f\"{concept_id.lower()}##identity\"\n record[\"label_and_type\"] = label_and_type\n record[\"item_type\"] = \"identity\"\n try:\n self.batch.put_item(Item=record)\n except ClientError as e:\n logger.error(\n \"boto3 client error on add_record for \"\n f\"{concept_id}: {e.response['Error']['Message']}\"\n )\n for attr_type, item_type in ITEM_TYPES.items():\n if attr_type in record:\n value = record.get(attr_type)\n if not value:\n continue\n if isinstance(value, str):\n items = [value.lower()]\n else:\n items = {item.lower() for item in value}\n for item in items:\n self._add_ref_record(\n item, record[\"concept_id\"], item_type, src_name\n )", "def create(self):\n\n pass", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def create_record(self, name, zone, type, data, extra=None):\n params = {\"type\": self.RECORD_TYPE_MAP[type], \"name\": name, \"data\": data}\n if extra:\n try:\n params[\"priority\"] = extra[\"priority\"]\n except KeyError:\n params[\"priority\"] = None\n try:\n params[\"port\"] = extra[\"port\"]\n except KeyError:\n params[\"port\"] = None\n try:\n params[\"weight\"] = extra[\"weight\"]\n except KeyError:\n params[\"weight\"] = None\n\n if \"ttl\" in extra:\n params[\"ttl\"] = extra[\"ttl\"]\n\n res = self.connection.request(\n \"/v2/domains/%s/records\" % zone.id, data=json.dumps(params), method=\"POST\"\n )\n\n return Record(\n id=res.object[\"domain_record\"][\"id\"],\n name=res.object[\"domain_record\"][\"name\"],\n type=type,\n data=data,\n zone=zone,\n ttl=res.object[\"domain_record\"].get(\"ttl\", None),\n driver=self,\n extra=extra,\n )", "def _create(self, model_obj):\n conn = self._get_session()\n\n try:\n conn.add(model_obj)\n except DatabaseError as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return model_obj", "def create_record_data_for_removal(\n record, trx_type: str, removal_date: dt.datetime, credit_value: float\n) -> Dict:\n record_data = {\n \"trx_type\": trx_type,\n \"artist\": \"; \".join([artist.artist_name for artist in record.artists]),\n \"title\": record.title,\n \"genre\": record.genre.genre_name,\n \"label\": \"; \".join([label.label_name for label in record.labels]),\n \"year\": record.year,\n \"record_format\": record.record_format.format_name,\n \"vinyl_color\": record.vinyl_color,\n \"lim_edition\": record.lim_edition,\n \"number\": record.number,\n \"remarks\": record.remarks,\n \"purchase_date\": str(record.purchase_date),\n \"price\": record.price,\n \"rating\": record.rating,\n \"is_digitized\": record.is_digitized,\n \"is_active\": 0,\n \"removal_date\": removal_date,\n \"credit_value\": credit_value,\n }\n return record_data", "def write(self):\n if not self._table: raise ValueError ( \"_table is Null\" )\n if self._isnew:\n for m in self._modified_values:\n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n try:\n rec = CFG.CX.insert ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n #this will automatically re-read the data from the db, to take all changes\n #done by triggers and default values into account.\n self._objectid = rec['objectid']\n\n #print \"Record # {0} inserted into {1}.\".format(self._objectid, self._table.name)\n self.raiseEvent ( \"record_added\", self )\n \n except pg.DatabaseError, e:\n print \"Error inserting record.\"\n raise Record.DataManipulationError ( \"Inserting a new record into '{0}'\".format(self._table.name),\n str(self._modified_values),\n e)\n elif self._ismodified:\n \n for m in self._modified_values: \n self._modified_values[m] = self._table[m].val_py2sql(self._modified_values[m])\n \n self._modified_values['objectid'] = self._objectid\n del self.TextCache[self._objectid]\n try:\n rec = CFG.CX.update ( CFG.DB.SCHEMA + \".\" + self._table.name,\n self._modified_values )\n self.read() \n self.raiseEvent ( \"record_saved\", self )\n except pg.DatabaseError, e:\n print \"Error updating record\"\n raise Record.DataManipulationError ( \"Updating record {1} of '{0}'\".format(self._table.name, self._objectid),\n str(self._modified_values),\n e)", "def _create(self, model_obj: Any):\n conn = self.provider.get_connection()\n\n try:\n model_obj.save(\n refresh=True,\n index=self.model_cls._index._name,\n using=conn,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj", "def create(self):", "def __create_record(self, id, message_reference_beginning):\n\n instrument = self.get_random_instrument()\n\n message_reference = self.__create_message_reference(\n message_reference_beginning, id)\n function = self.__get_function()\n message_creation_timestamp = datetime.now(timezone.utc)\n linked_message = \\\n self.__get_linked_message(self.message_reference_list)\n # message_reference is added to message_reference_list after\n # generating linked_message, otherwise the linked_message\n # could be this settlement instruction's own message reference\n self.message_reference_list.append(message_reference)\n linkage_type = self.__get_linkage_type()\n place_of_trade = self.__get_place_of_trade()\n trade_datetime = datetime.now(timezone.utc)\n deal_price = self.__get_deal_price()\n currency = self.create_currency()\n isin = self.__get_isin(instrument)\n place_of_listing = self.__get_place_of_listing(instrument)\n quantity = self.create_random_integer()\n party_bic = self.create_random_string(10)\n party_iban = self.__get_party_iban()\n account_type = self.__get_account_type()\n safekeeper_bic = self.create_random_string(10)\n settlement_type = self.__get_settlement_type()\n counterparty_bic = self.create_random_string(10)\n counterparty_iban = self.__get_counterparty_iban()\n settlement_date = self.__get_settlement_date()\n instruction_type = self.__get_instruction_type()\n status = self.__get_status()\n\n record = {\n 'message_reference': message_reference,\n 'function': function,\n 'message_creation_timestamp': message_creation_timestamp,\n 'linked_message': linked_message,\n 'linkage_type': linkage_type,\n 'place_of_trade': place_of_trade,\n 'trade_datetime': trade_datetime,\n 'deal_price': deal_price,\n 'currency': currency,\n 'isin': isin,\n 'place_of_listing': place_of_listing,\n 'quantity': quantity,\n 'party_bic': party_bic,\n 'party_iban': party_iban,\n 'account_type': account_type,\n 'safekeeper_bic': safekeeper_bic,\n 'settlement_type': settlement_type,\n 'counterparty_bic': counterparty_bic,\n 'counterparty_iban': counterparty_iban,\n 'settlement_date': settlement_date,\n 'instruction_type': instruction_type,\n 'status': status\n }\n\n for key, value in self.create_dummy_field_generator():\n record[key] = value\n\n return record", "def create(self, validated_data):", "def _to_acknowledgement_record(parsed):\n return AcknowledgementRecord(record_type=parsed.record_type,\n transaction_sequence_n=parsed.transaction_sequence_n,\n record_sequence_n=parsed.record_sequence_n,\n original_group_id=parsed.group_id,\n original_transaction_sequence_n=parsed.original_transaction_sequence_n,\n original_transaction_type=parsed.original_transaction_type,\n transaction_status=parsed.transaction_status,\n creation_date_time=parsed.creation_date_time,\n processing_date=parsed.processing_date,\n creation_title=parsed.creation_title,\n submitter_creation_n=parsed.submitter_creation_n,\n recipient_creation_n=parsed.recipient_creation_n)", "def create_item(obj: endpoint_model):\n # should this error if exists?\n new_obj = db.save(obj)\n return new_obj", "def save(self, record):\n pass", "def create_record_w_file(client, record, headers):\n # Create draft\n record[\"files\"] = {\"enabled\": True}\n response = client.post(\"/records\", json=record, headers=headers)\n assert response.status_code == 201\n recid = response.json[\"id\"]\n\n # Attach a file to it\n response = client.post(\n f\"/records/{recid}/draft/files\", headers=headers, json=[{\"key\": \"test.pdf\"}]\n )\n assert response.status_code == 201\n response = client.put(\n f\"/records/{recid}/draft/files/test.pdf/content\",\n headers={\n \"content-type\": \"application/octet-stream\",\n \"accept\": \"application/json\",\n },\n data=BytesIO(b\"testfile\"),\n )\n assert response.status_code == 200\n response = client.post(\n f\"/records/{recid}/draft/files/test.pdf/commit\", headers=headers\n )\n assert response.status_code == 200\n\n # Publish it\n response = client.post(f\"/records/{recid}/draft/actions/publish\", headers=headers)\n assert response.status_code == 202\n\n return recid", "def record_create_for_user(project_id, user_id, values):\n values['project_id'] = project_id\n values['user_id'] = user_id\n\n session = get_session()\n with session.begin():\n record_ref = models.UserAccountRecord()\n record_ref.update(values)\n record_ref.save(session=session)", "def _create(self, **attributes: Dict[str, object]) -> str:\n pass", "def save_record(rec,err_list=[]):\n if validate_form(rec):\n # Set the sign of qty based on transaction type\n if not rec.qty:\n rec.qty = 0\n else:\n rec.qty = abs(rec.qty)\n \n if rec.trx_type.lower() == \"remove\" and rec.qty != 0:\n rec.qty = rec.qty * -1\n \n Transaction(g.db).save(rec)\n try:\n g.db.commit()\n #Save the date and comment to session\n session['last_trx'] = {\"created\":rec.created,\"note\":rec.note}\n return True\n \n except Exception as e:\n err_list.append(printException('Error attempting to save Transaction record',str(e)))\n \n g.db.rollback()\n return False", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)\n self._init_owners(identity, record, **kwargs)", "def save_ehr_record(self, ehr_record, patient_record, record_moved=False):\n self._check_index_service()\n drf = self._get_drivers_factory(self.ehr_repository)\n ehr_record.bind_to_patient(patient_record)\n if ehr_record.is_persistent:\n if record_moved:\n ehr_record = self.version_manager.update_field(ehr_record, 'patient_id',\n ehr_record.patient_id, 'last_update')\n else:\n raise OperationNotAllowedError('An already mapped record can\\'t be assigned to a patient')\n else:\n # saving a new record, this is the first revision\n ehr_record.increase_version()\n # calculate and set the structure ID for the given record\n self._set_structure_id(ehr_record)\n with drf.get_driver() as driver:\n try:\n driver.add_record(driver.encode_record(ehr_record))\n except Exception, e:\n # if a new structure was created, delete it (reference counter is 0)\n self.index_service.check_structure_counter(ehr_record.structure_id)\n raise e\n self.index_service.increase_structure_counter(ehr_record.structure_id)\n patient_record = self._add_ehr_record(patient_record, ehr_record)\n return ehr_record, patient_record", "async def create(\n self,\n invocation_record: MutationUseCaseInvocationRecord[UseCaseArgs],\n ) -> None:", "def create(self, collection_id, parent_id, record, id_generator=None,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n obj = self.collection.serialize(record)\n obj.parent_id = parent_id\n setattr(obj, modified_field, datetime.datetime.utcnow())\n try:\n Session.add(obj)\n Session.flush()\n except IntegrityError as e:\n logger.exception('Object %s for collection %s raised %s', record, self.collection, e)\n process_unicity_error(e, Session, self.collection, record)\n # TODO: store new timestamps date\n return self.collection.deserialize(obj)", "def create(self, *args, **kwargs):\n obj, created = self.get_or_create(stub=self.model.STUB_DEFAULT)\n if not created:\n with transaction.atomic():\n obj.delete()\n obj = self.create(stub=self.model.STUB_DEFAULT)\n return obj", "def _put_assume_new(self, _id=None, **data):\n if _id is None:\n _id = str(uuid4())\n doc = dict(_id=_id, **data)\n try:\n current_doc = self._db.create_document(doc, throw_on_exists=True)\n except couchdb.http.ResourceConflict:\n # TODO: _rev is in header, don't need to get entire doc\n # Don't use self.get, don't want to actually download an attachment\n current_doc = self._db.get(_id)\n current_doc.update(doc)\n current_doc.save()\n return current_doc", "def create_event(record, user):\n # If the event is cancelled, we don't need to store it\n if record['status'] == 'cancelled':\n return\n time_zone = pytz.timezone(user.cal_meta_data.time_zone or 'UTC')\n event = Event()\n event.user = user\n event.event_id = record['id']\n event.summary = record['summary']\n event.description = record.get('description', '')\n event.location = record.get('location', '')\n event.is_creator = record['creator'].get('self', False)\n if not event.is_creator:\n event.creator_email = record['creator'].get('email', '')\n # Defaulting below field to False but it will be updated once we process\n # attendees list\n event.is_attendee = False\n\n start, end = record['start'], record['end']\n if start.get('dateTime'):\n event.start_datetime = parser.parse(start['dateTime'])\n else:\n event.start_datetime = time_zone.localize(parser.parse(start['date']))\n if end.get('dateTime'):\n event.end_datetime = parser.parse(end['dateTime'])\n else:\n event.end_datetime = time_zone.localize(parser.parse(end['date']))\n event.created_at = parser.parse(record['created'])\n event.save()\n create_attendees(event, record.get('attendees', []))\n return event", "def create_historical_record(self, instance, history_type):\n history_date = getattr(instance, '_history_date', now())\n history_changeset = self.get_history_changeset(instance)\n manager = getattr(instance, self.manager_name)\n attrs = {}\n for field in instance._meta.fields:\n attrs[field.attname] = getattr(instance, field.attname)\n\n for field_name in self.additional_fields:\n loader = getattr(self, 'get_%s_value' % field_name)\n value = loader(instance, type)\n attrs[field_name] = value\n\n manager.create(\n history_date=history_date, history_type=history_type,\n history_changeset=history_changeset, **attrs)", "def create(self):\n if self.id:\n raise ResourceAlreadyCreatedError\n data_dict = {k: v for k, v in self.to_dict().items() if v is not None}\n result = self._client.raw_post(self.managing_endpoint, data_dict, 201)\n # TODO: update object from result\n self._id = result.get(\"id\", None)", "def create_model(self, form):\n try:\n model = self.model()\n form.populate_obj(model)\n checksum = models.sha256_checksum(model.full_path)\n checksum_m = models.get_or_create(self.session, models.Checksum, value=checksum)[0]\n instance = self.session.query(self.model).filter_by(checksum=checksum_m).first()\n if instance:\n model = instance\n self.session.add(model)\n self._on_model_change(form, model, True)\n self.session.commit()\n except Exception as ex:\n if not self.handle_view_exception(ex):\n flash(gettext('Failed to create record. %(error)s', error=str(ex)), 'error')\n logger.exception('Failed to create record.')\n self.session.rollback()\n return False\n else:\n self.after_model_change(form, model, True)\n return model", "async def create(self, payload):\n\n async with self.db.manager.database.transaction():\n obj = await self._expand(await self.db.create(**payload))\n self.log.info(f\"New {self.db_model_name}: {obj}\")\n return obj", "def create(self, data):\n raise NotImplementedError", "def apply_ruling(self, ruling, record):\r\n record.update(ruling)\r\n return record", "def create_data_record(self, data_dict):\n source_dict = deepcopy(data_dict)\n assert not self.is_conflicting_keys(data_dict,\n self.default_values), \"Conflicting keys between default_values and extra_values\"\n source_dict.update(self.default_values)\n return {\n '_index': self.get_full_index(),\n '_type': 'python_log',\n '_source': source_dict\n }", "def emit(self, record):\n try:\n record.created = datetime.fromtimestamp(record.created)\n self._collection.insert_one(self.format(record))\n except InvalidDocument as e:\n logging.error(\"Unable to save log record: %s\", e.message,\n exc_info=True)", "def create(self):\n ...", "def create(self, *args, **kwargs):\n pass", "def createOr(self):\n return _libsbml.FbcOr_createOr(self)", "def new(self):\n\n if not hasattr(self, 'required_attribs'):\n self.required_attribs = []\n\n # sanity check\n for req_var in self.required_attribs:\n if req_var not in self.kwargs:\n err = \"The '%s' kwarg is required when creating new %s!\"\n msg = err % (req_var, self.collection)\n self.logger.error(msg)\n self.logger.error('Incoming kwargs dict: %s' % self.kwargs)\n raise ValueError(msg)\n\n # do it\n self.logger.warn('Creating new %s record!' % self.collection)\n\n for req_var in self.required_attribs:\n setattr(self, req_var, self.kwargs[req_var])\n\n self.created_on = datetime.now()\n self.updated_on = datetime.now()\n self.created_by = flask_login.current_user._id\n self._id = self.mdb.insert({})\n\n try:\n self.save()\n except pymongo.errors.DuplicateKeyError as e:\n self.mdb.remove({'_id': self._id})\n self.logger.error(e)\n self.logger.error('Cannot create asset: %s' % self)\n raise ValueError('Duplicate key error prevented asset creation!')", "def _add_record(self, holder_barcode, plate, holder_img, pins_img):\n guid = str(uuid.uuid4())\n self._store_writer.to_image(pins_img, holder_img, guid)\n img_path = self._store_writer.get_img_path()\n holder_image_path = self._store_writer.get_holder_img_path()\n record = Record.from_plate(holder_barcode, plate, img_path, holder_image_path)\n\n self.records.append(record)\n self._process_change()", "async def create(self, **state):\n connection = state.pop(self.connection_kwarg, None)\n obj = self.model(**state)\n await obj.save(force_insert=True, connection=connection)\n return obj", "def create(self):\n\n raise NotImplementedError", "def store(self, cursor: sqlite3.Cursor, record: ModelledTable) -> bool:\n\n if not isinstance(record, self.record):\n raise Exception(\"Wrong type\")\n\n fields = list(self.table_fields.keys())\n data: Dict[str, Any] = {}\n\n for field in fields:\n data[field] = getattr(record, field)\n\n for _field, (_attr, _model) in self.foreigners.items():\n data[_field] = data[_attr][_field]\n del data[_attr]\n\n if data[self.id_field] is None:\n fields.remove(self.id_field)\n del data[self.id_field]\n else:\n fields.append(self.id_field)\n\n sql = (\n f\"INSERT OR REPLACE INTO [{self.table}] ([{'], ['.join(fields)}])\"\n f\" VALUES (:{', :'.join(fields)})\"\n )\n\n _LOGGER.debug(sql)\n _LOGGER.debug(data)\n\n cursor.execute(sql, data)\n\n setattr(record, self.id_field, cursor.lastrowid)\n\n return True", "def new_archive_record(self, event):\n # No need to do anything if the record is already in the target\n # unit system\n if event.record['usUnits'] == self.target_unit: return\n # Perform the conversion\n converted_record = self.converter.convertDict(event.record)\n # Add the new unit system\n converted_record['usUnits'] = self.target_unit\n # Replace the old record with the new, converted record\n event.record = converted_record", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def perform_create(self, serializer):\n extra_data = self.get_additional_data(True)\n serializer.save(**extra_data)", "def insert_or_update(self, table, record):\n try:\n request = s.query(table=table, query={'sys_id': record['sys_id']})\n #request.get_single()\n response = request.update(record)\n print >> sys.stderr, 'update'\n except NoResults:\n # Record does not exist so create it\n response = self.snow.insert(table=table, payload=record)\n print >> sys.stderr, 'create'\n return response", "def test_patch_record(self):\n pass", "def create(cls, **validated_data):\n instance = cls(**validated_data)\n if isinstance(instance, cls):\n db.session.add(instance)\n try:\n db.session.commit()\n return instance\n except Exception as error:\n db.session.rollback()\n print(error.args)\n return None", "def new_archive_record(self, event):\n now = int(time.time() + 0.5)\n delta = now - event.record['dateTime']\n if delta > event.record['interval'] * 60:\n logdbg(\"Skipping record: time difference %s too big\" % delta)\n return\n if self.last_ts is not None:\n self.save_data(self.get_data(now, self.last_ts))\n self.last_ts = now\n #-- TBD: make this tunable on/off via variable\n #-- if self.max_age is not None:\n #-- self.prune_data(now - self.max_age)", "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def prepare_record_data_for_DB_insert(record_data: Dict) -> Dict:\n if record_data[\"artist\"] is None or record_data[\"title\"] is None:\n raise AssertionError(\"Artist and / or Title cannot be None.\")\n\n artist_list = [art.strip() for art in record_data[\"artist\"].split(\";\")]\n artist_country_list = [\n co.strip() for co in record_data[\"artist_country\"].split(\";\")\n ]\n label_list = [lab.strip() for lab in record_data[\"label\"].split(\";\")]\n\n if len(artist_list) != len(artist_country_list):\n raise AssertionError(\n \"Need the same number of artists and artist countries.\"\n )\n\n record_data[\"artist\"] = artist_list\n record_data[\"artist_country\"] = artist_country_list\n record_data[\"label\"] = label_list\n return record_data", "def create_nautobot_record(nautobot_model, ids: Mapping, attrs: Mapping, multivalue_attrs: Mapping):\n try:\n record = nautobot_model(**ids, **attrs)\n record.clean()\n record.save()\n for attr, value in multivalue_attrs.items():\n getattr(record, attr).set(value)\n return record\n except IntegrityError as exc:\n logger.error(\n \"Nautobot reported a database integrity error\",\n action=\"create\",\n exception=str(exc),\n model=nautobot_model,\n model_data=dict(**ids, **attrs, **multivalue_attrs),\n )\n except DjangoValidationError as exc:\n logger.error(\n \"Nautobot reported a data validation error - check your source data\",\n action=\"create\",\n exception=str(exc),\n model=nautobot_model,\n model_data=dict(**ids, **attrs, **multivalue_attrs),\n )\n except ObjectDoesNotExist as exc: # Including RelatedObjectDoesNotExist\n logger.error(\n \"Nautobot reported an error about a missing required object\",\n action=\"create\",\n exception=str(exc),\n model=nautobot_model,\n model_data=dict(**ids, **attrs, **multivalue_attrs),\n )\n\n return None", "def create():\n pass", "def createOr(self):\n return _libsbml.FbcAnd_createOr(self)" ]
[ "0.66684353", "0.6392694", "0.61336666", "0.6101538", "0.6050459", "0.6033388", "0.5997482", "0.5970368", "0.5938527", "0.59184444", "0.5904673", "0.5903052", "0.58274496", "0.5815766", "0.5784867", "0.57735556", "0.57151145", "0.5684695", "0.5666976", "0.5666811", "0.5662072", "0.5658043", "0.56156427", "0.5602748", "0.55915666", "0.5588513", "0.55806524", "0.5561766", "0.55617636", "0.5553409", "0.5548835", "0.5547606", "0.5535782", "0.5533844", "0.5533844", "0.5533744", "0.55319613", "0.55113703", "0.54984975", "0.54984486", "0.5474879", "0.5448471", "0.54268664", "0.5421097", "0.5421097", "0.5421097", "0.5419918", "0.53989255", "0.5392647", "0.53878254", "0.5368432", "0.536593", "0.5363449", "0.53535575", "0.5346288", "0.5338726", "0.5326715", "0.53158635", "0.5302905", "0.5296164", "0.52901316", "0.52863383", "0.52828294", "0.52827185", "0.52708834", "0.52703935", "0.5265293", "0.52643514", "0.52623945", "0.5261045", "0.52560407", "0.5253312", "0.52527636", "0.52417225", "0.52130014", "0.52122664", "0.52098376", "0.52071685", "0.52050036", "0.52024657", "0.5200044", "0.51933974", "0.51884323", "0.51875913", "0.51737213", "0.5171412", "0.515898", "0.5157902", "0.515608", "0.5151933", "0.5144869", "0.51422614", "0.5132738", "0.51284605", "0.51244056", "0.512335", "0.51162714", "0.51106286", "0.51041096", "0.5102775", "0.51026726" ]
0.0
-1
Determines whether original record will be duplicated Gets the maximum number of duplicated records to generate
def expect_duplicate(self): # Reset everything for this record self._expect_duplicate = False self.__dupcntr = 0 self.__maxdup = 0 # Get the probability to generate duplicate for next record if self.fake.random.random() < self.duplicate_cfg["Prob_duplicate"]: self._expect_duplicate = True self.__maxdup = self.random_select_ndups() else: self._expect_duplicate = False self.__maxdup = 0 self.__logger.debug("expect_duplicate ndups: %d", self.__maxdup)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_duplicate(self):\n return bool(self.duplicated)", "def isRepeated(self):\n return self._field.label == FieldDescriptor.LABEL_REPEATED", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def process_duplicate_rows(self):\n pass", "def is_repetition(self):\n return self.id == 1", "def Get_dup_records(ds,key_var):\n temp = ds.groupby([key_var]).agg({key_var:'count'}).rename(columns={key_var:'Freq'}).reset_index()\n temp = temp[temp['Freq']>1]\n print(\"Total Duplicate records:: \" +str(temp.shape[0]))\n\n return temp", "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())", "def test_are_duplicates_length(self):\n rules = [\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1), \"Class\": \"apple\"},\n name=1),\n pd.Series({\"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1),\n \"Class\": \"apple\"}, name=2)\n ]\n duplicate = _are_duplicates(rules[0], rules[1])\n self.assertTrue(duplicate is False)", "def test_duplicate_entries(self):", "def has_duplicates(l):\r\n return len(set(l)) < len(l)", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def duplicate_record_check(cur):\n # get all created tables from db\n cur.execute(\"SELECT * FROM information_schema.tables WHERE table_schema='public'\")\n result = cur.fetchall()\n\n # create list of tables\n table_list = [table[2] for table in result]\n\n print('Checking tables for duplicate records...')\n\n # check each table for duplicates\n for table_name in table_list:\n cur.execute(f\"SELECT COUNT(*) FROM {table_name}\")\n row_count = cur.fetchall()\n cur.execute(f\"SELECT DISTINCT COUNT(*) FROM {table_name}\")\n distinct_count = cur.fetchall()\n if row_count[0][0] == distinct_count[0][0]:\n print(f\"GREAT, no duplicate records found in {table_name}!\")\n elif distinct_count[0][0] < row_count[0][0]:\n print(f\"WARNING, duplicate records found! {distinct_count[0][0]}\"\n f\"distinct record count is less than total record count of {row_count[0][0]}\")", "def is_unique(self, field):\n old_length = len(self.archive)\n self.archive.add(self.create_hash(field))\n return len(self.archive) > old_length", "def __numRecordsMoreThanMax(self, numRecords):\n return numRecords > self.maxRecordCount", "def check_no_duplicates(examples):\n return len(examples) == len(set(examples))", "def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))", "def single_records(df,\n key_cols=['report_date', 'plant_id_eia', 'generator_id']):\n len_1 = len(df)\n len_2 = len(df.drop_duplicates(subset=key_cols))\n return bool(len_1 == len_2)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def dupable_matches_required(self):\n return 2", "def is_unique(self):\r\n return self._unique", "def get_duplicate_rows(df):\n\treturn df.duplicated().sum()", "def duplicate_and_unique_movies(dataset, index_):\r\n for row in dataset.values():\r\n \r\n key=row[index_]\r\n if key in review_max.keys():\r\n num=review_max[key]\r\n num+=1\r\n review_max[key]=num\r\n else:\r\n review_max[key]=1\r\n \r\n movies_clean=[num for num in review_max.values() if num>1]", "def check_duplicate(self, state):\n pass", "def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))", "def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def _recalculate_ticket(self, ticket):\n ids = self._get_dups_recursively(ticket.id)\n\n dups = \", \".join([str(i) for i in ids])\n dup_count = len(ids)\n\n if ticket.values.get('dups', None) == dups \\\n and int(ticket.values.get('dup_count', '')) == dup_count:\n return False\n\n self.env.log.debug('Recalculated ticket %s with dups %s (%d)' % (\n ticket.id, dups, dup_count))\n\n ticket['dups'] = dups\n ticket['dup_count'] = str(dup_count)\n\n # delete fields if there are no dups\n if dup_count == 0:\n ticket['dups'] = None\n ticket['dup_count'] = None\n\n return True", "def _is_duplicated_rule(self, table_entry: TableEntry) -> bool:\n te_hash = _hash(table_entry)\n if te_hash in self.table_entries: # avoiding duplicated ipv4 forwarding rules\n return True", "def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def is_article_duplicate(cls, article):\n return cls.db.hkeys(\"article_map\").count(article.link) != 0", "def check_duplicates(in_file, sep_type=\"\", header_rows=0):\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_True=np.where(dup==True)\n len_dup_True_indx=len(dup_True[0])\n\n if len_dup_True_indx == 0:\n print(\"No duplicated rows in %s\" %(in_file))\n else:\n print(\"%i duplicated rows found in %s\" %(len_dup_True_indx, in_file))", "def test_identify_duplicates_3(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def only_once(self) -> bool:\n return self.times == 1", "def test_duplicates():\n\n conn = psycopg2.connect(host=\"sculptor.stat.cmu.edu\", database=c.DB_USER,\n user=c.DB_USER, password=c.DB_PASSWORD)\n cur = conn.cursor()\n cur.execute(\"\"\" SELECT COUNT(CONCAT(song_title, ' ', artist_name)) \n FROM songs \"\"\")\n count1 = cur.fetchone()[0]\n cur.execute(\"\"\" SELECT COUNT(DISTINCT CONCAT(song_title, ' ', artist_name))\n FROM songs \"\"\")\n count2 = cur.fetchone()[0]\n assert count1-count2 == 0", "def test_identify_duplicates_1(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def duplicated(self, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.duplicated)(self, **kwargs)", "def is_duplicate(self, url):\n dupl_check_sql = '''\n SELECT url FROM {} WHERE url=?\n '''.format(\n self.tablename\n )\n with self.conn:\n return self.conn.execute(dupl_check_sql, (url,)).fetchone()", "def test_duplicate_ids():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def worksheet_has_duplicate_keys(self, ws, fn):\n self.is_not_used()\n results = {}\n\n for x in ws.iter_rows(2, ws.max_row, values_only=True): # enumerate our worksheet keys\n key = x[0]\n if key in results: # see if key is already in the dictionary\n results[key] = results[key] + 1 # if yes then increment found counter\n else:\n results[key] = 1 # key wasn't in the dictionary so add it now\n\n for key, value in list(results.items()): # enumerate our keys\n if results[key] == 1: # if value > 1 then it is a duplicate key\n del results[key] # not a duplicate so remove from dictionary\n else:\n results[key] = 'occurrences: ' + str(value)\n\n if len(results.keys()) > 0:\n self.error(\n '[{}] ({}) contains the following duplicate keys in the first column:'.format(fn.upper(), ws.title))\n self.error(str(results))\n return True\n else:\n return False", "def contains_duplicate_full_slow_set(self, nums: List[int]) -> bool:\n return len(nums) != len(set(nums))", "def has_new_entry(self):\n if self.new_entry:\n self.new_entry -= 1\n return True", "def IsDuplicate(self, header, payload_string, cur_time): # pylint: disable=unused-argument\n last_seq = self._recv_seq_nums[(header.source, header.type)]\n last_time = self._recv_times[(header.source, header.type)]\n cur_seq = header.sequence\n\n # Sequence numbers expire after maximum latency.\n if cur_time - last_time < aio_header.AIO_EXPIRATION_TIME_US * 1e-6:\n # Expected duplication.\n if cur_seq == last_seq:\n return True\n # Out of order.\n if (cur_seq - last_seq) % 2**16 > aio_header.AIO_ACCEPTANCE_WINDOW:\n return True\n return False", "def test_max_number_of_records(self):\n self._config['Number of examples'] = '2'\n result = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertLen(result, 2)", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def _get_duplicate_xml_record_id(self, records):\n all_records = {}\n for record in records:\n record_id = \"%s/%s_noupdate_%s\" % (\n record.attrib.get('section', ''),\n record.attrib.get('id', ''),\n record.getparent().attrib.get('noupdate', '0'),\n )\n all_records.setdefault(record_id, []).append(record)\n # Remove all keys which not duplicated\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records", "def test_create_replicated_mapping_file(self):\r\n # 3 replicates, with two extra samples in the mapping file.\r\n obs = qiime.simsam.create_replicated_mapping_file(self.map_f, 3,\r\n self.otu_table.SampleIds)\r\n self.assertEqual(obs, exp_rep_map_lines)\r\n\r\n # Must specify at least one replicate.\r\n self.assertRaises(ValueError,\r\n qiime.simsam.create_replicated_mapping_file, self.map_f, 0,\r\n self.otu_table.SampleIds)", "def is_real_dupe(dupe_lst):\n if len(dupe_lst) >= 10:\n return True\n first_composer = dupe_lst[0]['composer']\n\n if len(dupe_lst) > 2:\n for d in dupe_lst:\n if d['composer'] != first_composer:\n return True\n return False", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def _check_duplicate_notes(self, tokens, curr_note, step) -> bool:\n same_note_cnt = 0\n idx = step - 3\n while idx > 0:\n prev_note = self._get_num(self.tgt_dict.string(tokens[0, idx : idx + 1]))\n if prev_note != curr_note:\n break\n same_note_cnt += 1\n idx -= 4\n\n if same_note_cnt > _config.PitchPara.Max_Same_Pitch.value:\n return True\n return False", "def test_check_bc_duplicates_var_len_no_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def duplicates_existing_address(self, new_address):\n\n addresses = Address.objects.filter(customer=self.user)\n\n for address in addresses:\n match = True\n for field in self.cleaned_data:\n value = self.cleaned_data[field]\n address_value = getattr(address, field)\n if value != address_value:\n match = False\n break\n if match:\n self.set_most_current_address(address)\n return False\n\n else:\n return True", "def isduplicate(a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio()\n return refs.eq(e1, e2)", "def duplicated(list):\n u, c = np.unique(list, return_counts=True)\n dup = u[c > 1]\n return dup", "def check_for_duplicate_phone_numbers(d):\n\n print('# This function is under maintenance. Please try again later.')\n return d", "def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')", "def test_check_bc_duplicates_var_len_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # Barcode 1 is the largest, with 5 nts, is sequence ACGTA. When the\r\n # last base at 5' end of primer is added to barcode 2, there is a\r\n # duplicate, as this is also ACGTA.\r\n expected_errors = [\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t1,1',\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def try_create_uniqe_title(self,title,plan_id):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,plan_id):\n return new_title\n return False\n else:\n return False", "def check_sparkdf_find_dupes(sparkdf,columns):\n\n\treturn sparkdf.groupBy(columns).count().where('count>1').sort('count', ascending=False)", "def remove_duplicates(self) -> bool:\n return self._remove_duplicates", "def isduplicate(self, a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio(similarity=self.similarity)\n return refs.eq(e1, e2)", "def _is_duplicate (asin, current_list):\n\n dup = False\n for m in current_list:\n try:\n if unicode(asin) == m['sku']:\n dup = True\n break\n except KeyError:\n pass\n return dup", "def duplicate_ages(self):\n if len(self.models) > 1:\n for i in range(len(self.models)-1):\n if self.models[i].glb[iage] == self.models[i+1].glb[iage]:\n return [True, self.models[i].name, self.models[i+1].name]\n return [False,]\n elif len(self.models) == 1:\n return [True, self.models[0].name, self.models[0].name]", "def is_unique(self, field):\n return field.scheme.is_unique", "def find_duplicates(lst):\n \"*** YOUR CODE HERE ***\"\n return len( set(lst) ) != len(lst)", "def is_generate_per_split(self):\n return True", "def check_for_duplicate_TISK(df_dict_new) -> int:\n return int(df_dict_new[\"DM_duplicate_TISK\"].iloc[0, 0])", "def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')", "def case_duplicate(item):\n\n data = item.data\n case_number = data.get(\"case_number\")\n person_id = data.get(\"person_id\")\n\n table = item.table\n if case_number:\n query = (table.case_number == case_number) & \\\n (table.deleted != True)\n else:\n disease_id = data.get(\"disease_id\")\n if person_id and disease_id:\n query = (table.disease_id == disease_id) & \\\n (table.person_id == person_id) & \\\n (table.deleted != True)\n else:\n return\n\n duplicate = current.db(query).select(table.id,\n table.person_id,\n limitby=(0, 1)).first()\n if duplicate:\n item.data.person_id = duplicate.person_id\n item.id = duplicate.id\n item.method = item.METHOD.UPDATE", "def check_for_duplicate_subject_identifier(self):\n pass", "def has_duplicates(list):\n for i in list:\n if list.count(i) > 1:\n return True\n else:\n return False", "def isduplicate(self, a, b):\n open(self.mybib, 'w').write(a)\n open(self.otherbib, 'w').write(b)\n res = sp.call('papers add {} --bibtex {} --update-key --mode r --debug'.format(self.otherbib, self.mybib), shell=True)\n return res != 0", "def line_frame_number_unique(self, line):\n hash_code = get_parent_hash(line)\n if hash_code == \"p\":\n return True\n\n # this is a pythonic way of doing\n if self.find_list_for_new_line(line) is None:\n return True\n\n return False", "def prepare_duplication(self):\n for field in self.fields:\n ofield = self.fields[field]\n\n if self.duplicate:\n if ofield.primary_key:\n self.exclude_field(field)\n continue\n\n if not self.auto_fields:\n # add others if needed\n if hasattr(ofield, 'auto_now') or \\\n hasattr(ofield, 'auto_now_add'):\n if ofield.auto_now or ofield.auto_now_add:\n self.exclude_field(field)\n continue", "def test_last_value_replicated(self):\n input_ = [\n self.indicator_record(date=datetime.date(2006, 9, 9),\n end_date=datetime.date(2006, 10, 9),\n value=0.1576),\n self.indicator_record(date=datetime.date(2006, 9, 10),\n end_date=datetime.date(2006, 10, 10),\n value=0.1890),\n self.indicator_record(date=datetime.date(2006, 9, 11),\n end_date=datetime.date(2006, 10, 11),\n value=0.2244),\n ]\n output = self.expander._daily_three_field_indicator_expander(input_)\n\n last_value_copied = [input_[-1].value == record.value for record in output[2:]]\n\n self.assertTrue(all(last_value_copied))", "def is_duplicate(kml, collection):\n\t\n\tresults = [ i for i in collection.find({'date': kml['date']}) ]\n\tif results:\n\t\tprint('\\nDuplicate found! %s\\n' % item)\n\t\treturn True\n\telse:\n\t\treturn False", "def tie_exists(self):\n return len(self.marks) == 9", "def duplicates_skipped(self) -> int:\n with self.lock():\n return self._duplicates_skipped", "def is_unique(x):\n return len(set(x)) == len(x)", "def check_unique(self):\n pass", "def handle_duplicates(self, database):\n number_of_duplicates = 0\n number_of_merged = 0\n if not database.session:\n logger.error(\"no database session\")\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout already has been checked\n if self.is_duplicate_with or self.manual_check_required_with:\n logger.debug(\"dup check - no check, since this workout is marked: {}\".format(self))\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout does not have start_time set, since the following checks are based on it\n if not self.start_time or not self.duration_sec:\n return (number_of_duplicates, number_of_merged)\n\n # potential duplicate if time is overlapping\n # this workout |-----------------|\n # 1st potential duplicate in db |-----------------|\n # 2nd potential duplicate in db |------------------------|\n # 3rd potential duplicate in db |----------------|\n # 4th potential duplicate in db |---------|\n # (Remark to line 2 of 1st filter: needed to use database functions, \n # because modifiers like timedelta do not work with sqlalchemy sql attributes)\n # TODO handle timezones (needed for sqlite strftime)\n duplicates = database.session.query(Workout)\\\n .filter(or_(and_(Workout.start_time < self.start_time,\n func.strftime('%s', Workout.start_time, 'utc') + Workout.duration_sec >= self.start_time.timestamp()),\n and_(Workout.start_time >= self.start_time,\n Workout.start_time < (self.start_time + datetime.timedelta(seconds=int(self.duration_sec))))))\\\n .filter(Workout.is_duplicate_with == None)\\\n .filter(Workout.manual_check_required_with == None)\\\n .all()\n\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of different sports -> set manual_check_required_with\n for duplicate in duplicates:\n if duplicate.sport_id != self.sport_id:\n self.manual_check_required_with = duplicate.id\n logger.debug(\"dup check - workout marked to be checked: {}\".format(duplicate))\n duplicates.remove(duplicate)\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of same sports (they are duplicate workouts) -> now find the leading workout\n leading_workout = None\n # Step 1: if one of the duplicates is a previously merged one, use it as the leading workout\n for duplicate in duplicates:\n if duplicate.source and duplicate.source == \"MERGED WORKOUT\":\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 1: {}\".format(leading_workout))\n break\n # Step 2: else if one of the duplicates is from Zwift, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.name and \"Zwift\" in duplicate.name:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 2: {}\".format(leading_workout))\n break\n # Step 3: else if one of the duplicates is a Garmin import, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.source and \"Garmin\" in duplicate.source:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 3: {}\".format(leading_workout))\n break\n # Step 4: else use this workout as the leading workout\n if not leading_workout:\n leading_workout = self\n logger.debug(\"Found leading workout in step 4: {}\".format(leading_workout))\n\n # create a new workout that will be treated as the leading one. Mark the duplicates \n if leading_workout.source == \"MERGED WORKOUT\":\n merged_workout = leading_workout\n else:\n merged_workout = Workout(source=\"MERGED WORKOUT\", external_id=datetime.datetime.now().timestamp())\n number_of_merged += 1\n merged_workout._merge_attributes(leading_workout)\n logger.debug(\"dup check - merged workout with leading: {}\".format(merged_workout))\n merged_workout.add(database)\n leading_workout.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n\n for duplicate in duplicates:\n if duplicate is leading_workout:\n # already merged above\n continue\n if duplicate.is_duplicate_with == merged_workout.id:\n # already merged\n continue\n merged_workout._merge_attributes(duplicate)\n logger.debug(\"dup check - merged workout duplicate: {}\".format(merged_workout))\n duplicate.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n logger.debug(\"dup check - duplicate workout marked: {}\".format(duplicate))\n\n return (number_of_duplicates, number_of_merged)", "def has_duplicates_set(L):\r\n return len(L) != len(set(L))", "def test_no_duplicates(self):\n with Historical_ROAs_Table() as t:\n sql = f\"SELECT DISTINCT({','.join(t.columns[:-1])}) FROM {t.name}\"\n distinct = len(t.execute(sql))\n sql = f\"SELECT * FROM {t.name}\"\n assert len(t.execute(sql)) == distinct", "def check_repeat(db, record):\n models = [TechRepublicData, SecurityNewsData, PyjobData, RedditData]\n temp = db.query(*models)\n\n for model in models:\n if temp.filter(model.title == record.title).count():\n return True", "def clean_duplicated_identifiers(rows):\n\n logger.info('Cleaning duplicates')\n unique_identifiers = []\n c = 0\n for row in rows:\n c += 1\n idf = row['identifier']\n logger.info(f'Searching duplicates {c} {idf}')\n if idf not in unique_identifiers:\n unique_identifiers.append(idf)\n yield row\n else:\n row['is_duplicate'] = True\n logger.info(f'{idf} is duplicated')\n yield row", "def _check_duplicate_xml_record_id(self):\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in \\\n self._get_duplicate_xml_record_id(xml_records).items():\n self.msg_args.append((\n \"%s:%d\" % (os.path.relpath(fobjs[0].base, self.module_path),\n fobjs[0].sourceline),\n name,\n ', '.join([os.path.relpath(fobj.base, self.module_path) +\n ':' + str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True", "def is_new_adjustment_charge_created(self):\n current_number_of_records = self.get_number_of_adjustment_charges_in_the_grid()\n if int(current_number_of_records) > int(self.number_of_adjustment_records_before_create):\n return True\n else:\n return False", "def n_replicates(self):\n return self.data.n_replicates.values", "def test_check_variable_len_bcs_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_variable_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_variable_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n # Barcode 1 is the largest, with 5 nts, is sequence ACGTA. When the\r\n # last base at 5' end of primer is added to barcode 2, there is a\r\n # duplicate, as this is also ACGTA.\r\n expected_errors = [\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t1,1',\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def get_unique(self):\n return self.serie.nunique()", "def find_duplicates():\n return AppServer.service.find_duplicated_files()", "def resolve_duplicates(df, verbose=1, **kargs):\n dfx = []\n codes_multirow = []\n # criterion = kargs.get('criterion', 'length')\n\n N0 = df.shape[0]\n col_value = LoincMTRT.col_value\n\n for code, dfe in df.groupby([LoincMTRT.col_code, ]): \n n = dfe.shape[0]\n\n if n == 1: \n dfx.append(dfe)\n else: \n codes_multirow.append(code)\n \n # --- determine which row to use\n\n col_new = 'length'\n assert not col_new in dfe.columns\n dfe[col_new] = dfe[col_value].apply(len)\n dfe = dfe.loc[dfe[col_new]==np.max(dfe[col_new])].iloc[[0]] # use double bracket to keep as dataframe\n \n assert dfe.shape[0] == 1\n dfx.append(dfe)\n\n df = pd.concat(dfx, ignore_index=True)\n print(\"(resolve_duplicates) sample size before/after: {} -> {}\".format(N0, df.shape[0]))\n\n return df", "def is_unique(self, id, items):\r\n copies = 0\r\n for i in items:\r\n if type(i) is dict:\r\n if i['id'] == id:\r\n copies = copies + 1\r\n else:\r\n if i.id == id:\r\n copies = copies + 1\r\n if copies >= 2:\r\n return False\r\n else:\r\n return True", "def is_duplicates(trajs):\n if len(trajs) < 2:\n return False \n for j in range(len(trajs)-1):\n for i in range(j+1, len(trajs)):\n R = (trajs[i].get_slice()[:,:2]==trajs[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass\n return False", "def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True", "def log_suppression(self, timestamp):\n self.repeats += 1\n if timestamp > self.timestamp:\n self.timestamp = timestamp\n self.save()", "def _check_duplicates(self):\n # check variables\n counter = Counter(self.variables())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateVariables(duplicates)\n\n # check parameters\n counter = Counter(self.parameters())\n duplicates = [key for key, value in counter.items() if value > 1]\n if duplicates != []:\n raise DuplicateParameters(duplicates)", "def has_duplicates_dict(L):\r\n unique = {}\r\n for e in L:\r\n if e in unique:\r\n return True\r\n unique[e] = 0\r\n return False", "def contains_duplicate(self, nums: List[int]) -> bool:\n if not nums:\n return\n\n nums.sort()\n\n if len(nums) == 1:\n return False\n\n for i in range(1, len(nums)):\n if nums[i - 1] == nums[i]:\n return True\n return False" ]
[ "0.675923", "0.63455653", "0.6291391", "0.5996474", "0.59781253", "0.5973371", "0.59171003", "0.5903125", "0.5873092", "0.58724254", "0.5841418", "0.58190286", "0.5798888", "0.5789265", "0.5784065", "0.57638514", "0.5761442", "0.57307065", "0.5723805", "0.5719764", "0.57114965", "0.5710191", "0.569416", "0.5692535", "0.5682265", "0.5623792", "0.5603852", "0.55927265", "0.5586643", "0.558243", "0.5562996", "0.55500257", "0.5507584", "0.5486521", "0.54840857", "0.5483224", "0.54698604", "0.54648685", "0.5454467", "0.5452944", "0.54489857", "0.54465854", "0.54456663", "0.5439426", "0.54254144", "0.54210484", "0.5419223", "0.54141474", "0.5409246", "0.5404819", "0.5401974", "0.5382877", "0.53723", "0.5364783", "0.53621864", "0.53531885", "0.5352737", "0.5346413", "0.53275156", "0.53252363", "0.5321689", "0.5317793", "0.53176206", "0.53142715", "0.5307754", "0.5303955", "0.53030527", "0.530241", "0.53001916", "0.52968276", "0.5289544", "0.5279963", "0.5278695", "0.5275937", "0.5273651", "0.52649647", "0.52647114", "0.5264621", "0.52640676", "0.52628607", "0.52626544", "0.5252148", "0.5251901", "0.5247686", "0.5240119", "0.52337474", "0.52321076", "0.52141273", "0.5212495", "0.5203045", "0.5201696", "0.5200495", "0.5197135", "0.51963705", "0.5194943", "0.519377", "0.5184501", "0.5183153", "0.51828825", "0.5174745" ]
0.7407912
0
Load data if data have been created. Create data otherwise.
def load_data(): if 'data' not in os.listdir('.'): os.mkdir('data') if 'id_to_word.pkl' not in os.listdir('data'): print('Loading data...') (x_train, y_train), (x_val, y_val) = imdb.load_data(num_words=max_features, skip_top=20, index_from=3) word_to_id = imdb.get_word_index() word_to_id ={k:(v+3) for k,v in word_to_id.items()} word_to_id["<PAD>"] = 0 word_to_id["<START>"] = 1 word_to_id["<UNK>"] = 2 id_to_word = {value:key for key,value in word_to_id.items()} print(len(x_train), 'train sequences') print(len(x_val), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_val = sequence.pad_sequences(x_val, maxlen=maxlen) y_train = np.eye(2)[y_train] y_val = np.eye(2)[y_val] np.save('./data/x_train.npy', x_train) np.save('./data/y_train.npy', y_train) np.save('./data/x_val.npy', x_val) np.save('./data/y_val.npy', y_val) with open('data/id_to_word.pkl','wb') as f: pickle.dump(id_to_word, f) else: x_train, y_train, x_val, y_val = np.load('data/x_train.npy'),np.load('data/y_train.npy'),np.load('data/x_val.npy'),np.load('data/y_val.npy') with open('data/id_to_word.pkl','rb') as f: id_to_word = pickle.load(f) return x_train, y_train, x_val, y_val, id_to_word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def load_data(self):\n raise NotImplementedError()", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def _load(self, create):\n if not self.db.has_key('size'):\n if create:\n # It's a new database, initialize the special keys\n self.db['head'] = 0\n self.db['count'] = 0\n self.db['size'] = self.newSize\n\n # Cache the special keys\n self.head = self.db['head']\n self.count = self.db['count']\n self.size = self.db['size']\n self._loaded = True", "def _load_training_data(self):\n self._save_training_data()", "def load_data(self, data):\n self.data = data\n self.validate()", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def post_load(self, data):\n return data", "def _load_test_data(self):\n self._save_test_data()", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def load_data(self):\n (\n self._market_status,\n self._selection_status,\n self._available_to_back,\n self._available_to_lay,\n self._traded_volume\n ) = _load_market_data(self.zip_file)\n\n self.data_is_loaded = True", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def _load_data(self):\n\n # This allows a simulated dataset to use the same constructor.\n if self.input_file is None:\n return\n\n logging.info(f\"Loading data from file {self.input_file}\")\n\n # Load the dataset.\n if os.path.isdir(self.input_file):\n self.data = get_matrix_from_mtx(self.input_file)\n else:\n self.data = get_matrix_from_h5(self.input_file)", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def create(self):\n\t\tif self.isInitialized():\n\t\t\tself.Loaded = self.loader.create()", "def data_loaded_check(self):\n return True", "def _load_data(self):\n logging.warning('-> loading EMPTY data...')", "def test_create_data(self):\n process = Process.objects.filter(slug=\"test-min\").latest()\n data = Data.objects.create(\n name=\"Test data\",\n contributor=self.contributor,\n process=process,\n )\n\n data.refresh_from_db()\n self.assertEqual(data.status, Data.STATUS_DONE)", "def load_data(self):\n super(MudderyObjectCreater, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n \n # set common object's info\n self.obj_list = {}\n\n for obj in data.obj_list.split(\",\"):\n obj_key = \"\"\n number = 0\n arg = obj.split(\":\", 1)\n if len(arg) == 1:\n obj_key = arg[0]\n number = 1\n elif len(arg) >= 2:\n obj_key = arg[0]\n number = int(arg[1])\n\n self.obj_list[obj_key] = number", "def load_data(data):\n\n # Agencies\n name = data['name']\n slug = Agency.slug_for(name)\n\n a, created = Agency.objects.get_or_create(slug=slug, name=name)\n\n a.abbreviation = data['abbreviation']\n a.description = data.get('description')\n a.keywords = data.get('keywords')\n a.common_requests = data.get('common_requests', [])\n a.no_records_about = data.get('no_records_about', [])\n\n # Only has a single, main branch/office\n if len(data['departments']) == 1:\n dept_rec = data['departments'][0]\n contactable_fields(a, dept_rec)\n\n a.save()\n add_request_time_statistics(data, a)\n\n # Offices\n if len(data['departments']) > 1:\n for dept_rec in data['departments']:\n if dept_rec.get('top_level'):\n # This is actually an agency\n sub_agency_name = dept_rec['name']\n sub_agency_slug = Agency.slug_for(sub_agency_name)\n\n sub_agency, created = Agency.objects.get_or_create(\n slug=sub_agency_slug, name=sub_agency_name)\n sub_agency.parent = a\n\n abbreviation = build_abbreviation(sub_agency_name)\n sub_agency.abbreviation = abbreviation\n sub_agency.description = dept_rec.get('description')\n sub_agency.keywords = dept_rec.get('keywords')\n sub_agency.common_requests = dept_rec.get(\n 'common_requests', [])\n sub_agency.no_records_about = dept_rec.get(\n 'no_records_about', [])\n contactable_fields(sub_agency, dept_rec)\n sub_agency.save()\n add_request_time_statistics(dept_rec, sub_agency)\n else:\n # Just an office\n office_name = dept_rec['name']\n office_slug = Office.slug_for(office_name)\n full_slug = slug + '--' + office_slug\n\n o, created = Office.objects.get_or_create(\n agency=a, slug=full_slug)\n\n o.office_slug = office_slug\n o.name = office_name\n contactable_fields(o, dept_rec)\n o.save()\n add_request_time_statistics(dept_rec, a, o)", "def data(self):\n file_name = join(PARENT_BASE_DIR, '.files', 'data.data.json')\n if isfile(file_name):\n debug(f'{file_name} file is exist.')\n debug(f'try for load {file_name} file ->->->->->->->->->->')\n start_load_file = time()\n with open(file_name, 'r', encoding='utf-8')as file:\n data = file.read()\n data = loads(data)\n debug(f'load file - [runtime: {time() - start_load_file}] <-<-<-<-<-<-<-<-<-<-')\n return data, 'data exist.'\n else:\n debug(f'{file_name} file is not exist.')\n return None, 'data not exist in \"base directory/.files/data.data.json\"'", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def load():\n\n # Path for the cache-file.\n cache_path = os.path.join(data_dir, \"collisions.pkl\")\n\n # If the DataSet-object already exists in a cache-file\n # then load it, otherwise create a new object and save\n # it to the cache-file so it can be loaded the next time.\n dataset = load_cached(cache_path=cache_path,\n in_dir=data_dir)\n\n return dataset", "def loadData(self, data):\n\n #Grab the guide settings in case we want to use them here (and are not stored in data arg)\n existing_data = self.saveData()\n existing_data.update(data)\n data = existing_data\n\n super(OSSMouthGuide, self).loadData( data )\n\n self.loadAllObjectData(data, \"Control\")\n self.loadAllObjectData(data, \"Transform\")\n\n\n return True", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def load_data(self):\n try:\n self.manager.load()\n except error:\n show_error_message(title='Initialization error!',\n message='File lords.sdb was not found!')\n else:\n self.update_widgets_values()", "def load_or_create_db(self):\n try:\n with open(self._filename, 'rb') as f:\n self.db = pickle.load(f)\n except FileNotFoundError:\n pass", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)", "def _load(self) -> None:\n self.record = self._saved_record\n self.counter = self._saved_counter\n self.current_objects = self._saved_objects", "def _load(self):\n\n # This can happen when the object is not loaded yet\n # Usually when __init__ calls super().__init__()\n # and OrderSource starts initializing the instance attributes\n if not hasattr(self, \"_data\"):\n return\n\n if self._data is None:\n try:\n self._data = self.storage.load(basket=self)\n except BasketCompatibilityError as error:\n msg = _(\"Basket loading failed: Incompatible basket (%s).\")\n messages.error(self.request, msg % error)\n self.storage.delete(basket=self)\n self._data = self.storage.load(basket=self)\n self.dirty = False\n self.uncache()\n return self._data", "def Load(self, path=None, create=False):\n # Clear the data, since we want to load new data, this ensures we dont see the wrong\n # data/path combination in case of an exception on load\n self.data = None\n\n # If we didnt get it as an arg, use our stored path\n if not path:\n path = self.path\n # Else, store the path so we know where the data came from. Destroying previous data info\n else:\n self.path = path\n\n\n # If path not a valid file\n if not os.path.isfile(path):\n # If we want to create missing data, create an Empty List and save it\n if create:\n self.data = []\n self.Save()\n\n # Else, no creation so Raise an error\n else:\n raise Exception('Couldnt load Timeline Data object, path is not a file: %s' % path)\n\n # Else, load the data\n else:\n self.data = yaml.load(open(path))\n\n return self.data", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexHistory._loadData(self, data)", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def before_dataobj_create(self, dataobj):", "def _loadData(self, data):\n self._data = data\n self.id = utils.cast(int, data.attrib.get('id'))\n self.accountID = utils.cast(int, data.attrib.get('accountID'))\n self.serverId = utils.cast(int, data.attrib.get('serverId'))\n self.machineIdentifier = data.attrib.get('machineIdentifier')\n self.name = data.attrib.get('name')\n self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))\n self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))\n self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))\n self.owned = utils.cast(bool, data.attrib.get('owned'))\n self.pending = utils.cast(bool, data.attrib.get('pending'))", "def check_for_new_data(self):\n return", "def _loadData(self, data):\n self._data = data\n self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))\n self.email = data.attrib.get('email')\n self.friend = utils.cast(bool, data.attrib.get('friend'))\n self.friendlyName = data.attrib.get('friendlyName')\n self.home = utils.cast(bool, data.attrib.get('home'))\n self.id = utils.cast(int, data.attrib.get('id'))\n self.server = utils.cast(bool, data.attrib.get('server'))\n self.servers = self.findItems(data, MyPlexServerShare)\n self.thumb = data.attrib.get('thumb')\n self.username = data.attrib.get('username', '')\n for server in self.servers:\n server.accountID = self.id", "def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()", "def _load_data(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n data = pickle.load(f)\n if data:\n self.profiles = data['profiles']\n self.user_data = data['user_data']\n self.api_data = data['api_data']\n else:\n return False", "def __post_init__(self):\n # ------------------------------------------------------------ 01\n # if path exists load data dict from it\n # that is sync with contents on disk\n if self.path.exists():\n _hashable_dict_from_disk = \\\n m.FrozenDict.from_yaml(self.path.read_text())\n # update internal dict from HashableDict loaded from disk\n self.__dict__.update(\n _hashable_dict_from_disk.get()\n )\n\n # ------------------------------------------------------------ 02\n # start syncing i.e. any updates via __setattr__ will be synced\n # to disc\n self.internal.start_syncing = True", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def load(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n if os.path.exists( settings_path ):\n self.fileList = simplejson.loads( open( settings_path, 'r' ).read() )\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n if os.path.exists( settings_path ):\n self.userList = simplejson.loads( open( settings_path, 'r' ).read() )", "def load_data(self):\n if not os.path.isfile(\"{}/OFF_data.json\".format(settings.DIR_PATH)):\n self.request_constructor(settings.R_COLLECTION['category'], 'NULL', 'tags')\n self.crawl_data('category')\n i = 0\n for item in self.categories:\n i += 1\n cat = item.get(\"name\")\n self.request_constructor(settings.R_COLLECTION['product'], cat, 'products')\n self.crawl_data('product')\n\n self.data = {\"categories\": self.categories, \"products\": self.products}\n self.save_data('OFF_data.json')\n else:\n with open(\"{}/OFF_data.json\".format(settings.DIR_PATH), 'r') as f:\n self.data = json.load(f)\n self.categories = self.data[\"categories\"]\n self.products = self.data[\"products\"]\n return self.categories, self.products", "def load_and_fix(self):\n # Read in json\n self.read_json()\n\n if self.size_to_load:\n self.data = self.data[:self.size_to_load]\n\n # Add names from database given _bsn:\n self.extend_dataframe_with_personnames()\n\n # Clean rows in the data_frame where the names column is empty - > thus no response from the database\n self.clean_none_response()\n\n # Fix path from A09.pdf to A09.json\n self.fix_path()\n\n # Get the correct names from the database response\n self.parse_names_from_response()\n\n print(\" --- Final Shape Data ---\")\n print(self.data.shape)\n print(list(self.data))\n\n # Save pickled object in ./data map\n self.save_obj(self.data, self.file_name_to_save)", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None", "def create_data_table(self):\n print(\"CREATE DATA\")\n #data_i = sqlite3.connect('data.db', check_same_thread=False)\n data_i = sqlite3.connect('data::memory:', check_same_thread=False)\n data_cursor = data_i.cursor()\n data_cursor.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='localdata'\")\n if data_cursor.fetchone()[0]==1:\n return\n data_cursor.execute(\"\"\"\n CREATE TABLE localdata (\n data_sig text,\n data text,\n checkin_time text,\n owner_verifying_sig text\n )\n \"\"\")\n data_i.commit()\n data_i.close()", "def create(self, data):\n raise NotImplementedError", "def load(self) -> None:\n data = get_dictionary()\n if 'error' in data:\n quit()\n self.data = data", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def mk_data(self):\n self.data = self.DEFAULTS.copy()\n\n for template in self.raw_data.get('extends', []):\n template_data = self.load_template(template)\n self.data.update(template_data)\n\n self.data.update(self.raw_data)\n\n str_replace(self.data)\n\n if self.data.get('redirect_stderr'):\n self.data.pop('stderr')", "def _process_data(self):\n assert not hasattr(self, 'changes'), '_process_data called twice.'\n assert hasattr(self, 'errors'), (\n '_process_data not called by is_valid().')\n r_by_t = Collection.resource_by_type\n\n # Create and load collection of new data\n new_collection = Collection()\n for rtype, items in self.data.items():\n resource_cls = r_by_t.get(rtype)\n if resource_cls:\n for seq, json_api_item in enumerate(items):\n item = json_api_item.copy()\n links = item.pop('links', {})\n item.update(links)\n resource = self.load_resource(resource_cls, item)\n resource._seq = seq\n new_collection.add(resource)\n\n # Create native representation of current feature data\n current_collection = Collection(DjangoResourceClient())\n feature_serializer = ViewFeatureSerializer(context=self.context)\n current_feature = feature_serializer.to_representation(self.feature)\n current_extra = current_feature.pop('_view_extra')\n del current_extra['meta']\n\n # Load feature into new and current collection\n current_feature_resource = self.load_resource(\n r_by_t['features'], current_feature)\n current_collection.add(current_feature_resource)\n current_feature.update(self.feature._in_extra)\n current_feature['id'] = str(current_feature['id'])\n resource_feature = self.load_resource(\n r_by_t['features'], current_feature)\n resource_feature._seq = None\n new_collection.add(resource_feature)\n\n # Populate collection of current data\n for rtype, items in current_extra.items():\n resource_cls = r_by_t[rtype]\n for item in items:\n resource = self.load_resource(resource_cls, item)\n current_collection.add(resource)\n\n # Add existing items not explicit in PUT content\n # This avoids 'delete' changes\n new_items = new_collection.get_all_by_data_id()\n for data_id, item in current_collection.get_all_by_data_id().items():\n if data_id not in new_items:\n rtype = item._resource_type\n resource = r_by_t[rtype]()\n json_api_rep = item.to_json_api()\n json_api_rep[rtype]['id'] = item.id.id\n resource.from_json_api(json_api_rep)\n resource._seq = None\n new_collection.add(resource)\n\n # Add existing items used in new collection to current collection\n # This avoids incorrect 'new' changes\n existing_items = current_collection.get_all_by_data_id()\n for data_id, item in new_collection.get_all_by_data_id().items():\n if item.id:\n item_id = item.id.id\n int_id = None\n existing_item = existing_items.get(data_id)\n try:\n int_id = int(item_id)\n except ValueError:\n pass\n if int_id and (existing_item is None):\n rtype = item._resource_type\n resource_cls = r_by_t[rtype]\n model_cls, serializer_cls = view_cls_by_name[rtype]\n obj = model_cls.objects.get(id=int_id)\n serializer = serializer_cls()\n data = serializer.to_representation(obj)\n resource = self.load_resource(resource_cls, data)\n current_collection.add(resource)\n\n # Load the diff\n self.changeset = CollectionChangeset(\n current_collection, new_collection)\n assert not self.changeset.changes.get('deleted'), (\n 'Existing items were not added, so deletions found:\\n%s'\n % self.changes['deleted'])", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True", "def _load(self, data):\n raise NotImplementedError(\"Don't know how to load the task\")", "def load_data(self):\n return self._load_data", "def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def __init__(self,path):\n self.path = path\n self.data = {}\n self.hasChanged = False\n #--Load\n if os.path.exists(self.path):\n ins = open(self.path)\n inData = compat.uncpickle(ins)\n self.data.update(inData)", "def db_override_user_data(self):\n util.log(\"Clearing old user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Attempt loading user data to database\", util.LogLevel.Info)\n start = time.time()\n # Library\n for card in self.library.values():\n self.db.lib_card_add(card)\n # Tags\n for tag, card_ids in self.tags.items():\n self.db.tag_new(tag)\n for card_id in card_ids:\n self.db.tag_card_add(tag, card_id)\n # Wants\n for list_name, cards in self.wants.items():\n self.db.wants_new(list_name)\n for card in cards:\n self.db.wants_card_add(list_name, card.multiverse_id)\n end = time.time()\n util.log(\"Finished in {}s\".format(str(round(end - start, 3))), util.LogLevel.Info)\n self.push_status(\"User data imported\")", "def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)", "def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))", "def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def load_or_generate_data(self) -> None:\n x = np.linspace(0, 10, self.n_samples).reshape(-1, 1)\n y_sin = np.sin(x * 1.5)\n noise = np.random.randn(*x.shape)\n y = (y_sin + noise).reshape(x.shape[0], 1)\n self.x, self.y = x, y", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def _pre_construct(self, data):\n logging.info(\"pre constructing (enter)\")\n self.ids = collections.defaultdict(set)\n self.collecting = True\n pre_construct_data = self.construct(data)\n self.collecting = False\n logging.info(\"pre constructing (exit)\")\n return pre_construct_data", "def loads(self, data):\n self._id = data.get('id', -1)\n self._created = data.get('created', 0) # datetime.strptime(data.get('created', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()\n self._stage = data.get('stage', 0) # self.stage_from_str(data.get('stage', ''))\n self._dir = data.get('direction', 0) # self.direction_from_str(data.get('direction', ''))\n self._timeframe = data.get('timeframe') # timeframe_from_str(data.get('timeframe', 't'))\n self._expiry = data.get('expiry', 0) # datetime.strptime(data.get('expiry', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def process_data(self):\n num_records = len(self.records_data)\n for i in range(len(self.keys)):\n student_key = self.keys[i]\n if (i < num_records):\n self._load_student_record(student_key,\n self.records_data[i])", "def _load(self, load_dict):\n self._data_ = load_dict", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def add(self, data, check_exists=True): # pragma: no cover\n raise NotImplementedError", "def prepare_data(self) -> None:\n if (self.root).is_dir():\n logger.info(\"Found the dataset.\")\n else:\n download_and_extract(self.root, DOWNLOAD_INFO)", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]", "def postLoad(self):\n pass", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def load_data(self, data):\n self._load_raw_data = data", "def prepare_data(self):", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "async def _process_create_data(self, data: dict) -> dict:\n return self.SCHEMA(data)", "def load(self):\n self.data = pd.read_pickle(self.DATE_PKL)\n self.data.index.name = DATE_COL\n\n for hname, h in self.handlers.items():\n print(\"Loading %s\" % hname)\n cur_out = '../'+h.out_path\n df = pd.read_pickle(cur_out).resample('D').ffill() # make daily and forward fill the values\n if hname in self.data.columns:\n # getting to a distinct column:\n i = 2\n while \"%s_%s\" % (hname, i) in self.data.columns:\n i += 1\n print(\"warning: %s was already in the data set, instead we merged new column as %s\" %\n (hname, hname + '_%s' % i))\n self.data = self.data.join(df, how='left', rsuffix=\"_%s\" % i)\n else:\n self.data = self.data.join(df, how='left')", "def loadData(path,filename):\n\tfilepath = path + filename\n\ttry:\n\t\t#print('try1')\n\t\twith open(filepath+\".pickle\",\"rb\") as handle:\n\t\t\tallVideoData = pickle.load(handle)\n\t\ttry:\n\t\t\tmetadata = allVideoData[0]\n\t\t\tdata = allVideoData[1]\n\t\texcept:\n\t\t\tmetadata = allVideoData\n\t\t\tdata = __initializeData()\n\t\t\tprint(\"WARNING\")\n\t\t\tprint(\"warning: no data attached to metadata, initializing empty set\")\n\t\t\ttime.sleep(1)\n\t\treturn metadata,data\n\texcept:\n\t\tprint('no file {} exists yet'.format(filepath+\".pickle\"))\n\t\tprint('if writeMetadata has already been used, be sure to save it with saveData()')\n\t\ttime.sleep(1)\n\t\tmetadata = False\n\t\treturn metadata,__initializeData()", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)", "def _load_data(self):\n\n def __correct_car_make(car_make):\n \"\"\" Corrects given make names to a standard make name. \"\"\"\n ## define model corrections\n correct_makes = {\n 'chevroelt': 'chevrolet',\n 'chevy': 'chevrolet',\n 'maxda': 'mazda',\n 'mercedes-benz': 'mercedes',\n 'toyouta': 'toyota',\n 'vokswagen': 'volkswagen',\n 'vw': 'volkswagen'\n }\n ## return corrected make\n return correct_makes[car_make] if car_make in correct_makes.keys() else car_make\n\n logger.debug('checking auto-mpg.data.txt')\n if not path.exists('auto-mpg.data.txt'):\n ## file not present, get it\n logger.debug('getting auto-mpg.data.txt')\n self._get_data()\n if not path.exists('auto-mpg.clean.txt'):\n ## file not present, clean it\n self._clean_data()\n \n ## we got the data and we cleaned it\n logger.debug('checking auto-mpg.clean.txt')\n try:\n with open('auto-mpg.clean.txt', 'r') as clean_data:\n logger.debug('auto-mpg.clean.txt exists')\n ## counter for auto objects\n counter = 0\n logger.debug('Parsing auto-mpg.clean.txt into AutoMPG objects')\n for auto_record in csv.reader(clean_data, delimiter= ' ', skipinitialspace= True):\n ## split the car name into 2 tokens\n split = auto_record[8].replace('\\'', '').split(' ', 1)\n ## handle the case for 'subaru'\n if len(split) < 2:\n make = f'{split[0]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), '')\n elif len(split) == 2:\n make = f'{split[0]}'\n model = f'{split[1]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), model)\n counter += 1\n ## append the auto object\n self.data.append(AutoMPG(auto.make, auto.model, auto.year, auto.mpg))\n except Exception as e:\n logger.info(f'Error occurred: {e}')", "def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())", "def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")", "def readDataFromFile(self):\n #import pdb; pdb.set_trace()\n if self.wantAnyDbm:\n try:\n if os.path.exists(self.filepath):\n self.data = anydbm.open(self.filepath,'w')\n self.notify.debug('Opening existing anydbm database at: %s.' % \\\n (self.filepath,))\n else:\n self.data = anydbm.open(self.filepath,'c')\n self.notify.debug('Creating new anydbm database at: %s.' % \\\n (self.filepath,))\n except anydbm.error:\n self.notify.warning('Cannot open anydbm database at: %s.' % \\\n (self.filepath,))\n \n else:\n try:\n # Try to open the backup file:\n file = open(self.filepath + '.bu', 'r')\n self.notify.debug('Opening backup pickle data file at %s.' % \\\n (self.filepath+'.bu',))\n # Remove the (assumed) broken file:\n if os.path.exists(self.filepath):\n os.remove(self.filepath)\n except IOError:\n # OK, there's no backup file, good.\n try:\n # Open the real file:\n file = open(self.filepath, 'r')\n self.notify.debug('Opening old pickle data file at %s..' % \\\n (self.filepath,))\n except IOError:\n # OK, there's no file.\n file = None\n self.notify.debug('New pickle data file will be written to %s.' % \\\n (self.filepath,))\n if file:\n data = cPickle.load(file)\n file.close()\n self.data = data\n else:\n self.data = {}", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data" ]
[ "0.6876719", "0.65628785", "0.6523932", "0.64879066", "0.644452", "0.6407347", "0.62683874", "0.6245403", "0.6236588", "0.6224349", "0.62203705", "0.61988556", "0.61792195", "0.6178383", "0.61401325", "0.6105626", "0.608856", "0.608699", "0.6083278", "0.60767114", "0.6059067", "0.60526735", "0.6043922", "0.60227436", "0.6014379", "0.6002285", "0.59831214", "0.59805083", "0.59703517", "0.5965779", "0.59611875", "0.59502995", "0.5936756", "0.593042", "0.5925219", "0.5924356", "0.5922307", "0.5912574", "0.59025806", "0.58980376", "0.58833617", "0.5876385", "0.58656573", "0.5863327", "0.5860841", "0.5847145", "0.5846282", "0.58345693", "0.582894", "0.5821364", "0.58143777", "0.5813094", "0.58002716", "0.5794245", "0.57920486", "0.57791847", "0.5776986", "0.57767874", "0.57662576", "0.5763156", "0.5751627", "0.5750085", "0.57416695", "0.5740415", "0.5740162", "0.5736819", "0.57171714", "0.5713387", "0.5710292", "0.56958365", "0.5684872", "0.56686616", "0.56683457", "0.5666448", "0.5665858", "0.5662549", "0.56609815", "0.56590164", "0.5656322", "0.56421995", "0.56353664", "0.5635109", "0.5626582", "0.5625573", "0.56187713", "0.5617804", "0.5617107", "0.56164134", "0.5615795", "0.5614167", "0.5605299", "0.56031835", "0.5599778", "0.55993396", "0.55955", "0.5595492", "0.5594149", "0.55932546", "0.55898714", "0.5588142", "0.5586357" ]
0.0
-1
Build the original model to be explained.
def create_original_model(): model = Sequential() model.add(Embedding(max_features, embedding_dims, input_length=maxlen)) model.add(Dropout(0.2)) model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)) model.add(GlobalMaxPooling1D()) model.add(Dense(hidden_dims)) model.add(Dropout(0.2)) model.add(Activation('relu')) model.add(Dense(2)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model():", "def build_model(self):\n pass", "def build_model(self):\n pass", "def build_model(self):\n raise NotImplementedError", "def _build_model(self):\n raise NotImplementedError()", "def build_model(self) -> nn.Module:\n pass", "def _build_model(self, **kwargs):\n pass", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['potential'] = DM()\n content['potential']['key'] = self.potential_key\n content['potential']['id'] = self.potential_id\n content['implementation'] = DM()\n content['implementation']['key'] = self.potential_LAMMPS_key\n content['implementation']['id'] = self.potential_LAMMPS_id\n\n for subset in self.subsets:\n subset.build_model(content)\n\n self._set_model(model)\n return model", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['key'] = self.key\n content['id'] = self.id\n content['system-family'] = self.family\n for cp in self.parameters:\n content.append('calculation-parameter', DM(cp))\n\n self._set_model(model)\n return model", "def MakeModel(self):\n pass", "def build_model_fn(self):", "def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()", "def build_model(config):\n # Load the pretrained model\n detr = get_detr_model(config, include_top=True, weights=\"detr\")\n detr.summary()\n return detr", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def build_model(self) -> DM:\n\n model = DM()\n model['artifact'] = DM()\n model['artifact']['web-link'] = DM()\n if self.url is not None:\n model['artifact']['web-link']['URL'] = self.url\n if self.label is not None:\n model['artifact']['web-link']['label'] = self.label\n if self.filename is not None:\n model['artifact']['web-link']['link-text'] = self.filename\n \n self._set_model(model)\n return model", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def build(self):\n self.originModel.build()\n return self", "def build(self):\n self.originModel.build()\n return self", "def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')", "def _build_model(self, model):\n model = model(self.state_dim, n_actions=self.n_actions)\n model.compile(loss=self._huber_loss,\n optimizer=optimizers.Adam(lr=self.learning_rate))\n return model", "def build(model_name):\n return pretrain.factory.create(model_name)", "def build_model(self):\n doc_input = Input(shape=(self.max_sent_num ,self.max_sent_length,512), dtype='float32')\n doc_in=Flatten()(doc_input)\n \n #masked3=Masking(mask_value=Special_value)(doc_input)\n \n # self.model_sent = self.build_sent_encoder()\n \n # doc_encoder= TimeDistributed(self.model_sent)(doc_in)\n \n # document_att= self.build_doc_encoder(doc_encoder)\n dense= Dense(DENSE_SIZE,activation='softmax')(doc_in)\n #doc_att = self.build_sent_encoder(sent_encoder)\n # dense the output to 2 because the result is a binary classification.\n output_tensor = Dense(3, activation='softmax', name='classification')(dense)\n # Create Sentence-level Model\n self.model = Model(doc_input, output_tensor)", "def build_model(self):\n # model type\n self.model = Sequential()\n \n # Add embedding layer for first layer\n self.model.add(Embedding(self.embeding_matrix.shape[0], self.embeding_matrix.shape[1], input_length=self.tweet_len,\n weights=[self.embeding_matrix], name='emb'))\n # Add one dimensional convolution layer\n self.model.add(Conv1D(filters=self.params[\"filters\"] , kernel_regularizer=regularizers.l2(0.01), \n kernel_size=self.params[\"kernel_size\"], activation=self.params[\"activation\"]))\n # Add one dimensional max pooling layer\n self.model.add(MaxPooling1D(pool_size=self.params[\"MP_pool_size\"]))\n # Add flatten layer\n self.model.add(Flatten())\n # Add dense layer to predict label\n self.model.add(Dense(1, activation=self.params[\"dense_activation\"]))\n # Compile\n self.model.compile(loss=self.params[\"loss\"] , metrics=['accuracy'] , optimizer='adam')", "def _build_model_internal(self, opts):\n assert False, 'VAE base class has no build_model method defined.'", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def build(self):\n\t\n\t\tprint 'BUILDING HOMOLOGY MODELS'\n\t\tif self.procedure != 'mutator': self.get_targets()\n\t\tself.get_templates()\n\t\tif self.procedure == 'single': self.build_model_single()\n\t\telif self.procedure == 'multi': self.build_model_multi()\n\t\telif self.procedure == 'mutator': self.build_model_mutator()", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension", "def __build_model(self) -> Sequential:\n self.__name = 'Training model'\n input_dim, *hidden_dims, output_dim = parameters.ANET_DIMENSIONS\n\n model = Sequential()\n model.add(Input(shape=(input_dim,)))\n\n for dimension in hidden_dims:\n model.add(Dense(dimension, activation=self.__activation_function))\n\n model.add(Dense(output_dim, activation=softmax))\n\n model.compile(\n optimizer=(self.__optimizer(learning_rate=self.__learning_rate) if self.__learning_rate is not None else self.__optimizer()),\n loss=self.__loss_function\n )\n model.summary()\n return model", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def _child_build_new_model(self):\n self._build_new_gp()", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def build(self):\n\n if self.model is not None:\n warnings.warn('Model is not empty and was already trained.\\n'\n 'Run purge method for deleting the model variable',\n Warning)\n\n self.purge()\n self.model = Sequential()\n for layer in self.layers:\n self.model.add(layer.toKerasFn())\n\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics\n )", "def build(self):\n\n if self.model is not None:\n warnings.warn('Model is not empty and was already trained.\\n'\n 'Run purge method for deleting the model variable',\n Warning)\n\n self.purge()\n self.model = Sequential()\n for layer in self.layers:\n self.model.add(layer.toKerasFn())\n\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics\n )", "def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def build_model(self):\n if not os.path.isdir(os.path.join(self.save_dir, self.name)):\n os.mkdir(os.path.join(self.save_dir, self.name))\n self.fitted = False\n else:\n self.fitted = True\n \n if self.hidden_ratio != 1.0:\n hidden_dim_A = int(self.dimension_A * self.hidden_ratio)\n hidden_dim_V = int(self.dimension_V * self.hidden_ratio)\n hidden_dim = int((self.dimension_A + self.dimension_V) * self.hidden_ratio / 4)\n else:\n hidden_dim_A = int(self.dimension_A * 0.75)\n hidden_dim_V = int(self.dimension_V * 0.75)\n hidden_dim = int((self.dimension_A + self.dimension_V) * 0.5)\n\n input_data_A = Input(shape=(self.dimension_A, ), name='audio_input')\n input_data_V = Input(shape=(self.dimension_V, ), name='video_input')\n encoded_input = Input(shape=(hidden_dim, ))\n \n encoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_encoded')(input_data_A)\n encoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_encoded')(input_data_V)\n\n shared = Concatenate(axis=1, name='concat')([encoded_A, encoded_V])\n if self.sparse:\n encoded = Dense(hidden_dim, \n activation='relu',\n activity_regularizer=self.sparse_regularizer,\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n else:\n encoded = Dense(hidden_dim, \n activation='relu',\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n \n decoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_decoded')(encoded)\n decoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_decoded')(encoded)\n\n decoded_A = Dense(self.dimension_A, activation='linear',\n name='audio_recon')(decoded_A)\n decoded_V = Dense(self.dimension_V, activation='linear',\n name='video_recon')(decoded_V)\n\n self.autoencoder = Model(inputs=[input_data_A, input_data_V], outputs=[decoded_A, decoded_V])\n self.encoder = Model(inputs=[input_data_A, input_data_V], outputs=encoded)\n self.decoder_A = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('audio_recon')(\n self.autoencoder.get_layer('audio_decoded')(\n encoded_input)))\n self.decoder_V = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('video_recon')(\n self.autoencoder.get_layer('video_decoded')(\n encoded_input)))\n\n # configure model\n self.autoencoder.compile(optimizer='adam', \n loss='mse',\n metrics=[metrics.mse, metrics.mse],\n loss_weights=[0.5, 0.5])\n print(\"--\" * 20)\n print(\"autoencoder\")\n print(self.autoencoder.summary())\n print(\"--\" * 20)\n print(\"encoder\")\n print(self.encoder.summary())\n print(\"--\" * 20)\n print(\"decoder (A)\")\n print(self.decoder_A.summary())\n print(\"--\" * 20)\n print(\"decoder (V)\")\n print(self.decoder_V.summary())\n print(\"--\" * 20)\n\n plot_model(self.autoencoder, show_shapes=True, to_file=os.path.join(self.save_dir, self.name, 'bimodal_DDAE.png'))", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()", "def build(self) -> None:", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def build_model(self):\n \n # initalizing generators\n self.g12 = G12(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n self.g21 = G21(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n \n # initializing discriminators\n self.d1 = D1(conv_dim=self.numDiscFilter, domainA_channels = self.domainA_channels, use_labels=self.use_labels)\n self.d2 = D2(conv_dim=self.numDiscFilter, domainB_channels = self.domainB_channels, use_labels=self.use_labels)\n \n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n \n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n \n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()", "def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def _reconstruct(self, num_samples=None):", "def build(self):\n raise Exception(\" not implemented in base model\")", "def create_model(self):\n pass", "def create_model(self):\n pass", "def reconstruct_input_ext(self, model_in):", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_model(cls, args, task):\n raise NotImplementedError(\"Model must implement the build_model method\")", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def build_model(self, constructor, args):\n dims = {'en': 300, 'es': 50}\n dists = {'en': 'Normal',\n 'es': 'Normal',}\n z_dim = args.model_args.get('z_dim', 64)\n h_dim = args.model_args.get('h_dim', 64)\n n_layers = args.model_args.get('n_layers', 3)\n gauss_out = (args.model != 'MultiDKS') \n encoders = {'en': models.common.DeepGaussianMLP(dims['en'], z_dim, h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(dims['es'], z_dim, h_dim, n_layers)}\n decoders = {'en': models.common.DeepGaussianMLP(z_dim, dims['en'], h_dim, n_layers),\n 'es': models.common.DeepGaussianMLP(z_dim, dims['es'], h_dim, n_layers)}\n custom_mods = [m for m in ['en', 'es'] if m in args.modalities]\n model = constructor(args.modalities,\n dims=(dims[m] for m in args.modalities),\n dists=[dists[m] for m in args.modalities],\n encoders={m: encoders[m] for m in custom_mods},\n decoders={m: decoders[m] for m in custom_mods},\n z_dim=z_dim, h_dim=h_dim,\n device=args.device, **args.model_args)\n return model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def build_model(cls, args, task):\n global PAD_IDX, EOS_IDX\n # make sure all arguments are present in older models\n w2v_lm_architecture2(args)\n\n if not hasattr(args, \"max_source_positions\"):\n args.max_source_positions = 2048\n if not hasattr(args, \"max_target_positions\"):\n args.max_target_positions = 2048\n\n tgt_dict = task.target_dictionary\n PAD_IDX = tgt_dict.pad()\n EOS_IDX = tgt_dict.eos()\n\n encoder = cls.build_encoder(args)\n assigner = cls.build_assigner(args, encoder.d)\n lm = cls.build_lm(args, task)\n\n return cls(args, encoder, assigner, lm)", "def _build(self):", "def _build(self):", "def build(self):", "def build(self):", "def build(self):", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def build_model(self):\n insts1, attrs1, rels1 = self.arg1.get_triples()\n insts2, attrs2, rels2 = self.arg2.get_triples()\n for items, shld_norm in [(insts1, True), (insts2, True), (attrs1, True),\n (attrs2, True), (rels1, False), (rels2, False)]:\n for i in range(len(items)):\n # GUROBI cant handle Unicode so step down to ASCII\n items[i] = [items[i][0].encode('ascii', 'ignore').lower(),\n items[i][1].encode('ascii', 'ignore'),\n items[i][2].encode('ascii', 'ignore')]\n # normalize concept names -- instances and attributes\n if shld_norm:\n items[i][2] = SmatchILP.normalize(items[i][2])\n\n # Attributes are same as relations\n rels1.extend(attrs1)\n rels2.extend(attrs2)\n\n log.debug(\"AMR 1 Instances:\\n %s\" % insts1)\n log.debug(\"AMR 1 Relations:\\n %s\" % rels1)\n log.debug(\"AMR 2 Instances:\\n %s\" % insts2)\n log.debug(\"AMR 2 Relations:\\n %s\" % rels2)\n\n for index, items in [(self.arg1vars, insts1), (self.arg2vars, insts2)]:\n for name, var, concept in items:\n assert name == 'instance' # relation name is instance ==> variable definition\n assert var not in index # variable name is unique\n index[var] = concept\n\n var_choices = set() # possible variable matches\n for v1 in self.arg1vars.keys():\n for v2 in self.arg2vars.keys():\n var_choices.add((v1, v2))\n\n # instances are relations too\n rels1.extend(insts1)\n rels2.extend(insts2)\n\n self.arg1size = len(rels1)\n self.arg2size = len(rels2)\n\n trpl_choices = set()\n trpl_var_consts = {}\n for name1, var11, var12 in rels1:\n id1 = \"%s:%s:%s\" % (name1, var11, var12)\n for name2, var21, var22 in rels2:\n possible = 0\n id2 = \"%s:%s:%s\" % (name2, var21, var22)\n # triple name matches && first argument to triples can be matched\n if name1 == name2 and (var11, var21) in var_choices:\n # second argument to triple can also be matched OR\n possible += 1\n if (var12, var22) in var_choices or (\n # they are the same concepts\n # var12 not in self.arg1vars and var22 not in self.arg2vars and\n var12 == var22):\n possible += 1\n trpl_choices.add((id1, id2))\n # constrains between variables and triples\n trpl_var_consts[id1, id2] = [(var11, var21)]\n # if second argument is also variable\n\n if (var12, var22) in var_choices:\n trpl_var_consts[id1, id2].append((var12, var22))\n log.debug('\\t %s <--> %s ? %s ' % (id1, id2, possible))\n\n # Add variables to ILP model\n model = GRBModel('Smatch ILP')\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n model.Params.OutputFlag = 0 # disable output\n log.info(\"Number of possible variable matches %s\" % len(var_choices))\n log.info(\"Number of possible triple matches %s\" % len(trpl_choices))\n\n self.vars = model.addVars(var_choices, vtype=GRB.BINARY, name=\"v\")\n self.trpls = model.addVars(trpl_choices, vtype=GRB.BINARY, name=\"t\")\n\n # constraints\n for v1 in self.arg1vars:\n model.addConstr(self.vars.sum(v1, '*') <= 1, name='to max 1 var')\n for v2 in self.arg2vars:\n model.addConstr(self.vars.sum('*', v2) <= 1, name='from max 1 var')\n\n for trpl_idx, var_idxs in trpl_var_consts.items():\n for var_idx in var_idxs:\n model.addConstr(self.trpls[trpl_idx] <= self.vars[var_idx], name=\"%s::%s\" % (trpl_idx, var_idx))\n\n # objective\n model.setObjective(self.trpls.sum(), GRB.MAXIMIZE)\n self.model = model\n\n # stats for how big the problem is\n var_trpl_consts_count = sum(len(x) for x in trpl_var_consts.values())\n num_constr = len(var_choices) + len(trpl_choices) + var_trpl_consts_count\n num_vars = len(var_choices) + len(trpl_choices)\n log.info(\"ILP SIZE: %d binary variables (%d vars + %d triple vars)\" % (num_vars, len(var_choices), len(trpl_choices)))\n log.info(\"ILP SIZE: %d constraints (%d b/w arg vars and triples)\" % (num_constr, var_trpl_consts_count))", "def build_model(cfg, char_voca, word_voca=None, gazet=None, pos_voca=None):\n\n # Build Embedder\n embedder = Embedder(\n window=cfg.window,\n char_voca=char_voca,\n word_voca=word_voca,\n jaso_dim=cfg.jaso_dim,\n char_dim=cfg.char_dim,\n word_dim=cfg.word_dim,\n gazet=gazet,\n gazet_embed=True,\n pos_enc=True,\n phoneme=True,\n pos_voca_size=len(pos_voca),\n pos_dim=cfg.pos_dim)\n\n print('Total Embedding_size: ', embedder.embed_dim)\n\n\n encoder_name, decoder_name = cfg.model_name.lower().split('-')\n\n # Build Encoder\n if encoder_name == 'fnn5':\n encoder = models.Fnn5(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn7':\n encoder = models.Cnn7(in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn8':\n encoder = models.Cnn8(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name in ['gru', 'lstm', 'sru']:\n encoder = models.RnnEncoder(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n out_dim=cfg.hidden_dim,\n cell=encoder_name)\n else:\n raise ValueError('unknown model name: %s' % cfg.model_name)\n\n # Build Decoder\n if decoder_name.lower() == 'fc':\n decoder = models.FCDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags)\n elif decoder_name in ['gru', 'lstm', 'sru']:\n decoder = models.RnnDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags,\n num_layers=cfg.num_layers,\n cell=decoder_name)\n\n model = models.Ner(embedder, encoder, decoder)\n\n return model", "def build(self):\n # Clean all fields.\n self._clean_fields()\n\n # Build", "def build(self):\n pass", "def build(self):\n pass", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()", "def recreate():\n path = \"./results/BEST/20190807_104745-smallv2/RESUME.yaml\"\n path = \"./results/BEST/LARGE/LARGE.yaml\"\n # import shlex\n # args = shlex.split(f\"--config {path}\")\n # sys.argv[1:] = args\n # print(sys.argv)\n config, *_ = build_model(path)\n globals().update(locals())\n\n #save_model(config)", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def build_model(self, X: pd.DataFrame, y: pd.DataFrame = None) -> pm.Model:\n idx = X.index\n \n if y is None:\n y = pd.Series(0, index=idx)\n elif self.oversample: # only if y is given\n n_pos = (y == 1).sum()\n n_neg = (y == 0).sum()\n to_add = int(np.ceil(n_neg/n_pos) - 1)\n # print(n_pos, n_neg, to_add)\n if to_add > 4:\n to_add = 4\n for i in range(to_add):\n idx = idx.append(y[y==1].index)\n X = X.loc[idx]\n y = y.loc[idx]\n \n A = X[self.v_known + self.v_oob_bio]\n B_vals = X[self.v_fuzzy]\n B_mask = (B_vals == -1).astype(int)\n C_raw = X[self.v_float_adm + self.v_float_bio]\n # C_scaled = (C_raw - self.C_mean_) / self.C_std_ \n C_scaled = np.log1p(C_raw/self.C_mean_)\n C_scaled[~np.isfinite(C_scaled)] = np.nan\n C_vals = C_scaled.fillna(0)\n C_mask = C_scaled.isnull().astype(int)\n \n coords = {\"idx\": idx, \"a\": A.columns, \"b\": B_vals.columns, \"c\": C_vals.columns}\n with pm.Model(coords=coords) as m:\n pm.Data(\"A\", A, dims=[\"idx\", \"a\"])\n pm.Data(\"B_vals\", B_vals, dims=[\"idx\", \"b\"])\n pm.Data(\"B_mask\", B_mask, dims=[\"idx\", \"b\"])\n pm.Data(\"C_vals\", C_vals, dims=[\"idx\", \"c\"])\n pm.Data(\"C_mask\", C_mask, dims=[\"idx\", \"c\"])\n pm.Data(\"y\", y, dims=[\"idx\"])\n\n pm.Normal(\"avg\", mu=0, sd=1)\n\n pm.Beta(\"h_a_incl\", alpha=1, beta=4)\n pm.Normal(\"a_coef_raw\", mu=0, sd=1, dims=[\"a\"])\n pm.Bernoulli(\"a_incl\", p=m[\"h_a_incl\"], dims=[\"a\"])\n pm.Deterministic(\"a_coef\", m['a_coef_raw'] * m['a_incl'], dims=[\"a\"])\n \n pm.Normal(\"b_vals_coef\", mu=0, sd=1, dims=[\"b\"])\n pm.Normal(\"b_mask_coef_raw\", mu=0, sd=1, dims=[\"b\"])\n pm.Beta(\"h_b_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"b_mask_incl\", p=m[\"h_b_mask_incl\"], dims=[\"b\"])\n pm.Deterministic(\"b_mask_coef\", m['b_mask_coef_raw'] * m['b_mask_incl'], dims=[\"b\"])\n \n pm.Normal(\"c_vals_coef\", mu=0, sd=1, dims=[\"c\"])\n pm.Normal(\"c_mask_coef_raw\", mu=0, sd=1, dims=[\"c\"])\n pm.Beta(\"h_c_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"c_mask_incl\", p=m[\"h_c_mask_incl\"], dims=[\"c\"])\n pm.Deterministic(\"c_mask_coef\", m['c_mask_coef_raw'] * m['c_mask_incl'], dims=[\"c\"])\n unprob = pm.Deterministic(\n \"logit\",\n m['avg']\n + tt.dot(m[\"A\"], m[\"a_coef\"])\n + tt.dot(m[\"B_vals\"] * (1 - m['B_mask']), m[\"b_vals_coef\"])\n + tt.dot(m[\"B_mask\"], m[\"b_mask_coef\"])\n + tt.dot(m[\"C_vals\"] * (1 - m['C_mask']), m[\"c_vals_coef\"])\n + tt.dot(m[\"C_mask\"], m[\"c_mask_coef\"])\n )\n pm.Bernoulli(\"y_pred\", p = tt.nnet.sigmoid(unprob), dims=['idx'], observed=m['y'])\n\n m.graph = pm.model_to_graphviz()\n\n return m", "def build_retrieval_model(cfg):\n logging.info(\"Building model....\")\n model = build_model(cfg.MODEL, cfg.OPTIMIZER)\n if g_pathmgr.exists(cfg.MODEL.WEIGHTS_INIT.PARAMS_FILE):\n init_weights_path = cfg.MODEL.WEIGHTS_INIT.PARAMS_FILE\n logging.info(f\"Initializing model from: {init_weights_path}\")\n weights = load_checkpoint(init_weights_path, device=torch.device(\"cuda\"))\n skip_layers = cfg.MODEL.WEIGHTS_INIT.get(\"SKIP_LAYERS\", [])\n replace_prefix = cfg.MODEL.WEIGHTS_INIT.get(\"REMOVE_PREFIX\", None)\n append_prefix = cfg.MODEL.WEIGHTS_INIT.get(\"APPEND_PREFIX\", None)\n state_dict_key_name = cfg.MODEL.WEIGHTS_INIT.get(\"STATE_DICT_KEY_NAME\", None)\n\n init_model_from_consolidated_weights(\n cfg,\n model,\n weights,\n state_dict_key_name=state_dict_key_name,\n skip_layers=skip_layers,\n replace_prefix=replace_prefix,\n append_prefix=append_prefix,\n )\n else:\n # We only throw the warning if not weights file is provided. We want to\n # benchmark the random initialization model too and hence support that.\n logging.warning(\"Model is randomly initialized....\")\n logging.info(f\"Model is:\\n {model}\")\n return model", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def convert_to_model(self, *args):", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def _build_models(self):\n with tf.variable_scope('model'):\n meval = Model(self.hparams, mode='test')\n meval.build()\n self._saver = meval.saver\n\n self.meval = meval", "def build_model(self, text, n = 3):\n \n try:\n self.lm.build_model(text,n)\n except:\n raise\n \n self.vocab = Counter(words(text))\n\n return self.lm", "def _explain_model(self):\n raise NotImplementedError", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def build_model(self):\n # Define input layer (states)\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n # Add the hidden layers\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n\n # Add final output layer with sigmoid activation\n actions = K.layers.Dense(units=self.action_size, activation='linear',\n name='raw_actions')(net)\n\n # Create Keras model\n self.model = K.models.Model(inputs=states, outputs=actions)\n\n # Print the created model summary\n self.logger.debug(\"Model Summery:\")\n self.model.summary(print_fn=self.logger.debug)\n\n # Define optimizer and training function\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)", "def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()", "def raw_model():\n model = cobra.Model(id_or_model=\"raw_model\", name=\"raw_model\")\n rxn_1 = cobra.Reaction(\"BIOMASS_TEST\")\n rxn_2 = cobra.Reaction(\"RXN2\")\n rxn_3 = cobra.Reaction(\"RXN3\")\n rxn_4 = cobra.Reaction(\"RXN4\")\n model.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])\n model.objective = rxn_3\n return model", "def build_refxtract_model(preprocessor, embed_size=128, hidden_size=128, device=None):\n return nn.Sequential(\n torch.nn.Embedding(preprocessor.get_vocab_size(), embed_size),\n torch.nn.Dropout(0.5),\n BiRNN(embed_size, hidden_size, 1, 4, device=device)\n )", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def build(self, observation):\n raise NotImplementedError(\n 'Needs to be implemented as part of Embedder Interface')", "def _CreateSubmodel(unused_path):\n return confusion_matrices.ConfusionMatrices()" ]
[ "0.76988584", "0.7433274", "0.7433274", "0.7323292", "0.71259195", "0.69584244", "0.6808802", "0.6758032", "0.66716534", "0.65664864", "0.64607084", "0.6417122", "0.63994056", "0.6390134", "0.6358464", "0.6354998", "0.62842834", "0.62842834", "0.6279981", "0.6247293", "0.6068129", "0.60556364", "0.6050354", "0.60270905", "0.60200065", "0.60178995", "0.6015237", "0.5999892", "0.59869677", "0.59693146", "0.5965709", "0.5956064", "0.5941028", "0.5941027", "0.5929048", "0.5928227", "0.5901046", "0.5893631", "0.58860356", "0.5851409", "0.58469135", "0.58469135", "0.5844345", "0.58370733", "0.5836008", "0.5817312", "0.58110744", "0.58082336", "0.57986015", "0.5795607", "0.5783413", "0.57807153", "0.5778852", "0.5775536", "0.5768633", "0.5766623", "0.5766623", "0.5763451", "0.5759631", "0.5758932", "0.5754224", "0.574514", "0.5741029", "0.5738566", "0.5719819", "0.57059497", "0.57059497", "0.569563", "0.569563", "0.569563", "0.56932664", "0.56917906", "0.5690455", "0.56827277", "0.5682268", "0.5682268", "0.56816417", "0.56711626", "0.5667124", "0.5665806", "0.5657207", "0.56531954", "0.5651874", "0.5649468", "0.5640211", "0.563935", "0.5638574", "0.5634355", "0.5627045", "0.56264526", "0.5622104", "0.56123203", "0.56023425", "0.5601595", "0.5597607", "0.5592742", "0.55926794", "0.559153", "0.5590597", "0.5589746", "0.5589629" ]
0.0
-1
Generate the predictions of the original model on training and validation datasets. The original model is also trained if train = True.
def generate_original_preds(train = True): x_train, y_train, x_val, y_val, id_to_word = load_data() model = create_original_model() if train: filepath="models/original.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size) model.load_weights('./models/original.hdf5', by_name=True) pred_train = model.predict(x_train,verbose = 1, batch_size = 1000) pred_val = model.predict(x_val,verbose = 1, batch_size = 1000) if not train: print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val))) print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train))) np.save('data/pred_train.npy', pred_train) np.save('data/pred_val.npy', pred_val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)", "def make_prediction(x_train, y_train, x_test, model):\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n return y_predict", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def predict(self, test_batch_size=64, device='cuda', load=False, model_path=None, dataloader_num_workers=4, save_prediction=True):\n self.model.eval()\n self.device = device\n self.test_batch_size = test_batch_size\n if load:\n if model_path:\n self.load(model_path, device=self.device)\n else:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"loaded model={model_path}\")\n self.load(model_path, device=self.device)\n if self.model is None:\n raise Exception(\"model cannot be None. Load or train the model before inference\")\n dataloader = self.data_module.get_test_dataloader(batch_size=self.test_batch_size, shuffle=False, num_workers=dataloader_num_workers)\n all_outputs = []\n tk0 = tqdm(enumerate(dataloader, 1), total=len(dataloader))\n for batch_id, data in tk0:\n for key, value in data.items():\n data[key] = value.to(self.device)\n # batch_outputs, batch_loss = self.model(**data)\n batch_outputs, batch_loss= self.validate_one_batch(data)\n all_outputs.append(batch_outputs.detach().cpu().numpy())\n predictions = np.concatenate(all_outputs, axis=0)\n if save_prediction:\n submission = pd.read_csv(path_sample_submission_file)\n assert submission.shape[0] == predictions.shape[0], \"unexpected behavior.code fix required\"\n submission.iloc[:, 1:] = predictions\n\n if not os.path.isdir(path_submissions_dir):\n os.mkdir(path_submissions_dir)\n submission.to_csv(os.path.join(path_submissions_dir, f\"{self.experiment_id}.csv\"), index=False)\n tk0.close()\n return predictions", "def _predict(self, test_dl: torch.utils.data.DataLoader) -> torch.Tensor:\n\n # Initialize an empty tensor to store the predicted output\n output = torch.tensor([]).to(cfg.training.device)\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the test data loader\n for x_batch in test_dl:\n # Move the batch to the appropriate device\n x_batch = x_batch.to(cfg.training.device)\n # Forward pass to obtain model predictions\n y_star = self.forward(x_batch)\n # Concatenate the predictions to the output tensor\n output = torch.cat((output, y_star), 0)\n\n # Return the tensor containing the predicted output\n return output", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def predict(self): \n return self.model.predict(self.test_x)", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def fit_and_get_test_predictions(self, trace, tuning=True):\n pass", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, test_dataset: Dataset) -> PredictionOutput:\n test_dataloader = self.get_test_dataloader(test_dataset)\n return self._prediction_loop(test_dataloader, description=\"Prediction\")", "def predict(self, model, x_test):\n pass", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def predict(self, X_test):\n return self.model.predict(X_test)", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def predict(config: Config, device: torch.device, resume: Optional[ResumeInfo]) -> None:\n # pylint: disable=too-many-locals\n # Load datasets\n print(colored(\"loading training datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory()\n datasets, preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n print(colored(\"saving question ids:\", attrs=[\"bold\"]))\n split_map = {\n \"train\": (config.training.data.train, datasets.train),\n \"val\": (config.training.data.val, datasets.val),\n \"test\": (config.training.data.test, datasets.test),\n }\n for split, (dataconfig, dataset) in split_map.items():\n root = Path(wandb.run.dir) / \"predictions\"\n if not root.exists():\n root.mkdir(parents=True)\n path = root / f\"{split}_ids.json\"\n start = int(dataconfig.subset[0] * len(dataset))\n end = int(dataconfig.subset[1] * len(dataset))\n subset = torch.utils.data.Subset(dataset, range(start, end))\n ids = [subset[i][\"question\"][\"questionId\"] for i in range(len(subset))]\n with open(path, \"w\") as file:\n json.dump(ids, file)\n\n # Create model runner\n print(colored(\"model:\", attrs=[\"bold\"]))\n runner_factory = RunnerFactory()\n runner = runner_factory.create(config, device, preprocessors, datasets, resume)\n print(f\"{runner.model=}\")\n\n print(colored(\"loading prediction datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory(training=False)\n datasets, pred_preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n # Extend question embedding dictionary with pad vector for OOV.\n # The runner will check if a question token index is out of bounds and\n # set it to the padding index if so.\n runner.model.question_embeddings = torch.nn.Embedding.from_pretrained(\n torch.cat(\n (\n runner.model.question_embeddings.weight.data,\n torch.zeros(\n (\n len(pred_preprocessors.questions.index_to_word)\n - runner.model.question_embeddings.num_embeddings,\n runner.model.question_embeddings.embedding_dim,\n )\n ).to(device),\n ),\n dim=0,\n )\n )\n # Update datasets and preprocessors for prediction\n runner.datasets = datasets\n runner.preprocessors = pred_preprocessors\n\n print(colored(\"predicting:\", attrs=[\"bold\"]))\n runner.predict()", "def train_predict(descriptions_models,\n X_train, y_train,\n X_valid, y_valid,\n scoring=None):\n\n results = []\n for description, model in descriptions_models:\n\n scorer = check_scoring(model, scoring=scoring)\n result = {'description': description}\n\n # Train\n start = time.time()\n model.fit(X_train, y_train)\n result['time_train'] = time.time() - start\n\n # Predict train\n start = time.time()\n result['score_train'] = scorer(model, X_train, y_train)\n result['time_predict_train'] = time.time() - start\n\n # Predict validation\n start = time.time()\n result['score_valid'] = scorer(model, X_valid, y_valid)\n result['time_predict_valid'] = time.time() - start\n\n results.append(result)\n\n return pd.DataFrame(results)[[\n 'description', 'score_train', 'score_valid',\n 'time_train', 'time_predict_train', 'time_predict_valid']]", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def predict(self, test_data):\n return self.leader.predict(test_data)", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds", "def make_predictions(self):\n \n self.Y = self.X.dot(self.w)", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def get_initial_predictions(tuner, input_data, output_path, model_save_name):\n\n best_model = tuner.best_estimator()\n batch_job = best_model.transformer(1, \"ml.m5.large\", output_path=output_path.as_uri(),\n model_name=model_save_name)\n batch_job.transform(input_data.as_uri())\n # TODO: Do an ls first so we can get any/all files\n output_file = output_path / 'validation.csv.out'\n with smart.open(output_file.as_uri(), 'r', transport_params={'session': boto_session}) as f:\n predictions = pd.read_csv(f, header=None)\n return predictions", "def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)", "def predict(self, test_data):\r\n return self.gs.predict(test_data)", "def fit_predict_model(self, X_train, y_train, X_test, pipeline):\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n return y_pred", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)", "def predict(self, scenes, tmp_dir):\n self.backend.load_model(tmp_dir)\n\n for scene in scenes:\n with scene.activate():\n labels = self.predict_scene(scene, tmp_dir)\n label_store = scene.prediction_label_store\n label_store.save(labels)\n\n if self.config.debug and self.config.predict_debug_uri:\n self.save_debug_predict_image(\n scene, self.config.predict_debug_uri)", "def predict_1(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n test_prediction = trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_loss, test_accuracy = trained_model.evaluate(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_loss = \"test_loss: {:.3f}\\n\".format(test_loss)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_loss)\n f.write(msg_test_acc)", "def _predict(self, dataset):\n binary_predictions = ProxyClassifier._predict(self, dataset)\n self.ca.estimates = binary_predictions\n predictions = [ {-1: self.__predictneg,\n +1: self.__predictpos}[x] for x in binary_predictions]\n self.ca.predictions = predictions\n return predictions", "def predict(self, test_inputs, batch_size=None):\n if batch_size is None:\n num_batches = 1\n else:\n num_batches = util.ceil_divide(test_inputs.shape[0], batch_size)\n\n test_inputs = np.array_split(test_inputs, num_batches)\n pred_means = util.init_list(0.0, [num_batches])\n pred_vars = util.init_list(0.0, [num_batches])\n for i in range(num_batches):\n pred_means[i], pred_vars[i] = self.session.run(\n self.predictions, feed_dict={self.test_inputs: test_inputs[i]})\n\n return np.concatenate(pred_means, axis=0), np.concatenate(pred_vars, axis=0)", "def generate_post_preds(train = True):\n x_train, y_train, x_val, y_val = np.load('data/x_train_new.npy'),np.load('data/y_train.npy'),np.load('data/x_val_new.npy'),np.load('data/y_val.npy')\n with open('data/id_to_word.pkl','rb') as f:\n id_to_word = pickle.load(f) \n model = create_original_model()\n\n if train:\n filepath=\"./models/post.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint]\n model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size)\n\n model.load_weights('./models/post.hdf5', \n by_name=True) \n\n pred_train = model.predict(x_train,verbose = 1, batch_size = 1000)\n pred_val = model.predict(x_val,verbose = 1, batch_size = 1000)\n if not train:\n print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val)))\n print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train)))", "def predict(self, epochs): # noqa\n\n # Check that classifier has predict_method (e.g. predict_proba is not\n # always available):\n if not hasattr(self.clf, self.predict_method):\n raise NotImplementedError('%s does not have \"%s\"' % (\n self.clf, self.predict_method))\n\n # Check that at least one classifier has been trained\n if not hasattr(self, 'estimators_'):\n raise RuntimeError('Please fit models before trying to predict')\n\n # Check predict mode\n if self.predict_mode not in ['cross-validation', 'mean-prediction']:\n raise ValueError('predict_mode must be a str, \"mean-prediction\" '\n 'or \"cross-validation\"')\n\n # Check that training cv and predicting cv match\n if self.predict_mode == 'cross-validation':\n n_est_cv = [len(estimator) for estimator in self.estimators_]\n heterogeneous_cv = len(set(n_est_cv)) != 1\n mismatch_cv = n_est_cv[0] != len(self._cv_splits)\n mismatch_y = len(self.y_train_) != len(epochs)\n if heterogeneous_cv or mismatch_cv or mismatch_y:\n raise ValueError(\n 'When predict_mode = \"cross-validation\", the training '\n 'and predicting cv schemes must be identical.')\n\n # Clean attributes\n for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:\n if hasattr(self, att):\n delattr(self, att)\n _warn_once.clear() # reset self-baked warning tracker\n\n X, y, _ = _check_epochs_input(epochs, None, self.picks_)\n\n if not np.all([len(test) for train, test in self._cv_splits]):\n warn('Some folds do not have any test epochs.')\n\n # Define testing sliding window\n if self.test_times == 'diagonal':\n test_times = _DecodingTime()\n test_times['slices'] = [[s] for s in self.train_times_['slices']]\n test_times['times'] = [[s] for s in self.train_times_['times']]\n elif isinstance(self.test_times, dict):\n test_times = copy.deepcopy(self.test_times)\n else:\n raise ValueError('test_times must be a dict or \"diagonal\"')\n\n if 'slices' not in test_times:\n if 'length' not in self.train_times_.keys():\n ValueError('Need test_times[\"slices\"] with adhoc train_times.')\n # Check that same number of time sample in testing than in training\n # (otherwise it won 't be the same number of features')\n test_times['length'] = test_times.get('length',\n self.train_times_['length'])\n # Make a sliding window for each training time.\n slices_list = list()\n for _ in range(len(self.train_times_['slices'])):\n test_times_ = _sliding_window(epochs.times, test_times,\n epochs.info['sfreq'])\n slices_list += [test_times_['slices']]\n test_times = test_times_\n test_times['slices'] = slices_list\n test_times['times'] = [_set_window_time(test, epochs.times)\n for test in test_times['slices']]\n\n for train, tests in zip(self.train_times_['slices'],\n test_times['slices']):\n # The user may define irregular timing. We thus need to ensure\n # that the dimensionality of each estimator (i.e. training\n # time) corresponds to the dimensionality of each testing time)\n if not np.all([len(test) == len(train) for test in tests]):\n raise ValueError('train_times and test_times must '\n 'have identical lengths')\n\n # Store all testing times parameters\n self.test_times_ = test_times\n\n n_orig_epochs, _, n_times = X.shape\n\n # Subselects the to-be-predicted epochs so as to manipulate a\n # contiguous array X by using slices rather than indices.\n test_epochs = []\n if self.predict_mode == 'cross-validation':\n test_idxs = [ii for train, test in self._cv_splits for ii in test]\n start = 0\n for _, test in self._cv_splits:\n n_test_epochs = len(test)\n stop = start + n_test_epochs\n test_epochs.append(slice(start, stop, 1))\n start += n_test_epochs\n X = X[test_idxs]\n\n # Prepare parallel predictions across testing time points\n # FIXME Note that this means that TimeDecoding.predict isn't parallel\n parallel, p_func, n_jobs = parallel_func(_predict_slices, self.n_jobs)\n n_test_slice = max(len(sl) for sl in self.test_times_['slices'])\n # Loop across estimators (i.e. training times)\n n_chunks = min(n_test_slice, n_jobs)\n chunks = [np.array_split(slices, n_chunks)\n for slices in self.test_times_['slices']]\n chunks = map(list, zip(*chunks))\n\n # To minimize memory during parallelization, we apply some chunking\n y_pred = parallel(p_func(\n estimators=self.estimators_, cv_splits=self._cv_splits,\n predict_mode=self.predict_mode, predict_method=self.predict_method,\n n_orig_epochs=n_orig_epochs, test_epochs=test_epochs,\n **dict(zip(['X', 'train_times'], _chunk_data(X, chunk))))\n for chunk in chunks)\n\n # Concatenate chunks across test time dimension.\n n_tests = [len(sl) for sl in self.test_times_['slices']]\n if len(set(n_tests)) == 1: # does GAT deal with a regular array/matrix\n self.y_pred_ = np.concatenate(y_pred, axis=1)\n else:\n # Non regular testing times, y_pred is an array of arrays with\n # different lengths.\n # FIXME: should do this with numpy operators only\n self.y_pred_ = [[test for chunk in train for test in chunk]\n for train in map(list, zip(*y_pred))]\n return self.y_pred_", "def prediction(input_path=INPUT_DIR,\n output_path=OUTPUT_DIR,\n model_path=MODEL_PATH,\n test=False):\n\n X = tf.placeholder(shape=[None, chunk_size, chunk_size], dtype=tf.float32, name='input_area')\n y_inter = deepcn.deepcn(X, chunk_size, False)\n y_pred = tf.cast(tf.argmax(tf.squeeze(y_inter), -1), tf.uint8)\n\n img_ids = []\n for name in os.listdir(input_path):\n if os.path.isdir(os.path.join(input_path, name)):\n img_ids.append(name)\n all_preds = np.zeros((len(img_ids), 256, 256))\n print('num of images: ', len(img_ids))\n\n loader = tf.train.Saver()\n\n with tf.Session() as sess:\n print(\"Import model from: %s\" %model_path)\n loader.restore(sess, model_path)\n # sess.run(tf.global_variables_initializer())\n\n batch_start_pos = 0\n while batch_start_pos < len(img_ids):\n batch_size = 100\n batch_end_pos = min(batch_start_pos + batch_size, len(img_ids))\n print('predict from %s, to %s' % (batch_start_pos, batch_end_pos))\n batch = img_ids[batch_start_pos:batch_end_pos]\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=batch)\n input_arr = pw.ResizedTestData()\n print(\"input_arr.shape: \", input_arr.shape)\n # input test_data_batch, output prediction of shape batch_size * 256 * 256\n pred_arr = sess.run(y_pred, feed_dict={X: input_arr})\n print(\"pred_arr.shape: \", pred_arr.shape)\n all_preds[batch_start_pos:batch_end_pos] = pred_arr\n pw.OutputPrediction(pred_arr*100, path=output_path)\n batch_start_pos = batch_end_pos\n\n # Use all img_ids and all_preds to generate single cell split csv file\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=img_ids)\n pw.GenerateSubmit(all_preds, output_path, cutoff=0.5)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, test_set, test_labels):\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def predict(self, dataset):\n # TODO: self.model(training=False)\n # logging.info('Predicting')\n # if self.verbosity > 1:\n # print('Predicting')\n dataset = rdata.data2dataset(dataset) # Convert to dataset\n assert dataset.get_dim_input() == self.n_inputs, \\\n 'Number of covariates does not match the model %d -> %d' % (dataset.get_dim_input(), self.n_inputs)\n n_data = dataset.get_n_data()\n\n pred = self._predict(dataset=dataset) # Predict\n\n if self.isprobabilistic():\n assert pred[0].shape == (n_data, self.n_outputs)\n assert pred[1].shape == (n_data, self.n_outputs)\n else:\n assert pred.shape == (n_data, self.n_outputs)\n return pred", "def get_model_predictions(\n self,\n model: Type[Model],\n start_task_index: int = 0,\n stop_task_index: Optional[int] = None,\n batched: bool = False,\n batch_size: int = _DEFAULT_BATCH_SIZE,\n skip_validation: bool = False,\n ) -> Dict[str, Dict[str, Union[str, float]]]:\n predictions = {}\n if not batched:\n batch_size = None\n n_tasks = (stop_task_index or self.n_tasks) - start_task_index\n with tqdm(total=n_tasks) as pbar:\n if not batched:\n for support_x, support_y, query_x, metadata in self.get_tasks(\n start_task_index=start_task_index,\n stop_task_index=stop_task_index,\n ):\n query_y, scores = _parse_fit_and_predict_result(\n model.fit_and_predict(\n support_x=support_x,\n support_y=support_y,\n target_x=query_x,\n metadata=metadata,\n )\n )\n if not skip_validation:\n validate(query_y, metadata['labels'])\n predictions.update(\n _convert_fit_and_predict_result_to_predictions(\n query_y=query_y,\n scores=scores,\n query_question_ids=metadata['query_question_ids']\n )\n )\n pbar.update(1)\n else:\n for batch in grouper(\n batch_size,\n self.get_tasks(\n start_task_index=start_task_index,\n stop_task_index=stop_task_index,\n )\n ):\n support_x, support_y, query_x, metadata = zip(*(b for b in batch if b is not None))\n n_tasks_in_batch = len(support_x)\n query_y, scores = _parse_fit_and_predict_result(\n model.fit_and_predict(\n support_x=support_x,\n support_y=support_y,\n target_x=query_x,\n metadata=metadata,\n )\n )\n try:\n query_y = flatten(query_y)\n scores = flatten(scores) if scores is not None else None\n except TypeError:\n # Already flattened\n pass\n query_question_ids_flat = flatten(m['query_question_ids'] for m in metadata)\n if not skip_validation:\n validate(query_y, metadata['labels'])\n predictions.update(\n _convert_fit_and_predict_result_to_predictions(\n query_y=query_y,\n scores=scores,\n query_question_ids=query_question_ids_flat,\n )\n )\n pbar.update(n_tasks_in_batch)\n return predictions", "def test_fit_predict() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def correct_age_predictions(train_preds, train_age, test_preds, test_age):\n lr = LinearRegression()\n\n train_resids = np.array(train_preds - train_age)\n test_resids = np.array(test_preds - test_age)\n\n # fit model\n lr.fit(train_age[:,np.newaxis], train_resids)\n\n # predict test residuals using age\n pred_resid = lr.predict(test_age[:,np.newaxis])\n\n # correct model predictions\n corrected_predictions = test_preds - pred_resid\n\n return corrected_predictions", "def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[TorchBasedLinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n model = self._infer_params()\n\n model.fit(\n train.data,\n train.target,\n train.weights,\n valid.data,\n valid.target,\n valid.weights,\n )\n\n val_pred = model.predict(valid.data)\n\n return model, val_pred", "def run(self, orig_target_df):\n\n # For each fold\n for fold_idx, (fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df) in enumerate(self._generate_validation_fold()):\n train_test_date_split = fold_training_set_df[\"date\"].max()\n eval_start_date = train_test_date_split - timedelta(days = self.test_nb_days)\n date_to_predict = train_test_date_split + timedelta(days = 1)\n print(\"Warning: date_to_predict offset should be computed dynamically. Currently fixed to 1.\")\n\n # For each prediction method\n for process, process_name in zip(self.process_lst, self.process_names_lst):\n print(\"Running validation for process:\", process_name, \"on fold:\", fold_idx, \"...\")\n\n # Train the model\n with open(self.data_cache_path_str + \"data_bkp.pkl\", \"wb\") as f:\n pickle.dump((fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df), f)\n\n y_train = fold_target_df[\"demand\"].reset_index(drop = True)\n model = process(train_test_date_split, eval_start_date)\n model.fit(fold_training_set_df, y_train)\n\n # Generate predictions for validation set\n preds = model.predict(fold_testing_set_df, date_to_predict)\n preds[\"demand\"] = (orig_target_df[\"shifted_demand\"] + preds[\"demand\"]).apply(np.expm1)\n\n # Score the predictions\n preds2 = preds.copy()\n preds2.columns = [\"id\", \"date\", \"preds\"]\n preds_rmse_by_date_df = preds2.merge(fold_truth_df, how = \"left\", on = [\"id\", \"date\"])\n preds_rmse_by_date_df = preds_rmse_by_date_df[[\"date\", \"preds\", \"demand\"]].groupby(\"date\").apply(lambda x: self._rmse(x[\"demand\"], x[\"preds\"])).reset_index()\n preds_rmse_by_date_df.columns = [\"date\", \"preds_rmse\"]\n\n best_preds_piv = preds[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv = fold_truth_df[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv.set_index(\"id\", inplace = True)\n best_preds_piv.set_index(\"id\", inplace = True)\n best_preds_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n truth_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n validation_WRMSSE = round(model.evaluator.wrmsse(best_preds_piv, truth_piv, score_only = True), 6)\n\n # Save result for later use\n self.scores[process_name].append((fold_idx, preds_rmse_by_date_df, validation_WRMSSE))\n \n if self.verbose == True: \n print(process_name, \"had a score of\", validation_WRMSSE, \"on validation period\", fold_testing_set_df[\"date\"].min(), \"to\", fold_testing_set_df[\"date\"].max())\n\n metrics_lst = []\n for process_name, content in self.scores.items():\n for fold_idx, preds_rmse_by_date_df, validation_WRMSSE in content:\n preds_rmse_by_date_df[\"process_name\"] = process_name\n preds_rmse_by_date_df[\"fold_idx\"] = fold_idx\n preds_rmse_by_date_df[\"WRMSSE\"] = validation_WRMSSE\n metrics_lst.append(preds_rmse_by_date_df)\n\n metrics_df = pd.concat(metrics_lst, axis = 0)\n metrics_df.set_index(\"date\", inplace = True)\n\n return metrics_df", "def __train_and_predict(self, X_train, y, X_test):\n self.model.fit(X_train, y, eval_metric='auc')\n prediction_probs = self.model.predict_proba(X_train)[:, 1]\n print \"Training auc = %f\" % roc_auc_score(y, prediction_probs)\n self.__write_csv(prediction_probs,\n X_train.shape[0], self.train_out_file)\n\n prediction_probs = self.model.predict_proba(X_test)[:, 1]\n self.__write_csv(prediction_probs,\n X_test.shape[0], self.test_out_file)\n\n self.feature_imp()", "def train(\n self, training_data: Dataset, validation_data: Optional[Dataset] = None\n ) -> Predictor:\n raise NotImplementedError", "def predict_4(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_accuracy = trained_model.score(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_acc)", "def train_predict(model_list,X_train, X_test, y_train, y_test):\n P = np.zeros((y_test.shape[0], len(model_list)))\n P = pd.DataFrame(P)\n\n print(\"Fitting models.\")\n cols = list()\n for i, (name, m) in enumerate(models.items()):\n print(\"%s...\" % name, end=\" \", flush=False)\n m.fit(X_train, y_train)\n P.iloc[:, i] = m.predict_proba(X_test)[:, 1]\n cols.append(name)\n print(\"done\")\n\n P.columns = cols\n print(\"Done.\\n\")\n return P", "def generate_predictions(inputs, model, tokenizer):\n # Generate model results\n outputs = model(**inputs)\n\n # Convert logit outputs into predictions for table cells and aggregation operators\n predicted_table_cell_coords, predicted_aggregation_operators = tokenizer.convert_logits_to_predictions(\n inputs,\n outputs.logits.detach(),\n outputs.logits_aggregation.detach()\n )\n\n print(predicted_table_cell_coords)\n print(predicted_aggregation_operators)\n\n # Return values\n return predicted_table_cell_coords, predicted_aggregation_operators", "def test_predict(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n true, predictions = model.predict('test')\n expected_size = ((dataset.num_examples('test') //\n model.batch_size) * model.batch_size)\n self.assertEqual(true.shape[0], expected_size)\n self.assertEqual(true.shape, predictions.shape)", "def predict_2(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n test_prediction = trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_loss, test_accuracy = trained_model.evaluate(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_loss = \"test_loss: {:.3f}\\n\".format(test_loss)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_loss)\n f.write(msg_test_acc)", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def predict(model, dataset_info, args):\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n fill_pred_op_info(dataset_info, model, args, model_info)\n # fill_topic_op(args, model_info)\n\n str_ = 'Predictions of the given text data of dataset %s using different ' \\\n 'saved models:' % args.predict_dataset\n labels = [str(i) for i in dataset_info[args.predict_dataset]['labels']]\n if len(labels) == 2 or args.task == 'regression':\n # TODO currently just hard code for binary\n header = 'id\\tlabel\\t' + str(1) + '\\n'\n else:\n header = 'id\\tlabel\\t' + '\\t'.join(labels) + '\\n'\n\n saver = tf.train.Saver(max_to_keep=100)\n\n model_names = args.datasets\n if len(args.datasets) > 1:\n model_names.append('MULT')\n\n for model_name in model_names:\n # load the saved best model\n str_ += '\\nUsing the model that performs the best on (%s)\\n' % model_name\n\n output = header\n str_ += header\n\n data = []\n\n with tf.Session() as sess:\n if model_name == 'MULT':\n checkpoint_path = os.path.join(args.checkpoint_dir, 'MULT',\n 'model')\n else:\n checkpoint_path = model_info[model_name]['checkpoint_path']\n\n saver.restore(sess, checkpoint_path)\n\n dataset_name = args.predict_dataset\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n _pred_op = model_info[dataset_name]['pred_pred_op']\n _pred_iter = model_info[dataset_name]['pred_iter']\n _ids, _predictions, _scores = get_all_pred_res(sess, _pred_op,\n _pred_iter, args)\n\n for id, pred, score in zip(_ids, _predictions, _scores):\n record = {\n 'id': id,\n 'label': pred\n }\n if args.task == 'classification':\n for l, s in zip(labels, score):\n record[str(l)] = s\n else:\n record['score'] = score[0]\n data.append(record)\n\n # output positive score for binary classification\n\n if len(score) == 2:\n score = str(score[1])\n else:\n score = '\\t'.join([str(i) for i in score])\n str_ += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n output += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n\n make_dir(args.predict_output_folder)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.tsv',\n 'w') as file:\n # for i in _predictions:\n # file.write(str(i))\n file.write(output)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.json',\n 'wt') as file:\n json.dump(data, file, ensure_ascii=False)\n\n logging.info(str_)", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)", "def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict", "def predict_only(self):", "def predict(self, inputs):\n return self.model.predict(inputs)", "def predictions(self, model):\n return get_predictions_from_df(\n model=model, df=self.prediction_df,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n )", "def _predict(cls, model, is_log_transformed,\n raw_actual, interpolated_actual,\n training_end=None, seasonal_feature_scoring=None, pred_date=None, order_of_diff=None,\n training_tail=None, ext_training_features=None, pred_len=None, freq=None,\n include_holidays_exog=None):\n\n import numpy as np\n import pandas as pd\n import scipy.stats as st\n from numpy.linalg import LinAlgError\n import math\n\n alpha = cls._sig_level\n alpha_extreme = cls._sig_level_extreme\n\n include_holidays_exog = include_holidays_exog if ext_training_features else 0\n\n index = pd.date_range(start=training_end, end=pred_date, freq=freq)[1:] # Holidays are always daily.\n\n de_obj = DataExploration()\n pred_exog = de_obj._get_exog_data(pred_date, pred_date, index) if include_holidays_exog else None\n\n if pred_exog is not None and set(pred_exog.columns.values) != set(ext_training_features):\n missing_col_list = list(set(ext_training_features) - set(pred_exog.columns.values))\n common_cols = list(set(ext_training_features).intersection(set(pred_exog.columns.values)))\n temp_df = pred_exog[common_cols]\n missing_feat_df = pd.DataFrame(np.zeros([len(pred_exog), len(missing_col_list)]),\n columns=missing_col_list, index=pred_exog.index.values)\n pred_exog = pd.concat([temp_df, missing_feat_df], axis=1)\n pred_exog = pred_exog[ext_training_features]\n\n freq = \"1\" + freq if not any(char.isdigit() for char in freq) else freq\n\n forecast_ndays = int((pred_date - pd.Timestamp(training_end)) / pd.Timedelta(freq))\n model_freshness = forecast_ndays / float(pred_len)\n\n try:\n if forecast_ndays > pred_len:\n raise ValueError('Current trained model object expired')\n\n float_min = 1e-10\n\n # set exogenous (holiday) variables for input data\n if include_holidays_exog:\n pred_exog = pred_exog.loc[pd.Timestamp(training_end) + pd.Timedelta(freq): pred_date]\n else:\n pred_exog = None\n\n if seasonal_feature_scoring:\n if not include_holidays_exog:\n pred_exog = seasonal_feature_scoring[:forecast_ndays]\n else:\n pred_exog['fourier_feature'] = seasonal_feature_scoring[:forecast_ndays]\n\n forecast = list(model.forecast(steps=forecast_ndays, alpha=alpha, exog=pred_exog))\n interpolated_training_data = list(zip(*training_tail))[1]\n\n for order in list(reversed(range(order_of_diff))):\n training_data_diff = np.diff(interpolated_training_data,\n order) if order > 0 else interpolated_training_data\n\n forecast_diff_mean = [training_data_diff[-1]]\n forecast_diff_ci = []\n\n for i in range(forecast_ndays):\n forecast_diff_mean.append(forecast_diff_mean[-1] + forecast[0][i])\n forecast_diff_ci.append([forecast_diff_mean[-1] -\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i]),\n forecast_diff_mean[-1] +\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i])])\n forecast[0] = forecast_diff_mean[1:]\n forecast[2] = forecast_diff_ci\n\n if is_log_transformed:\n transformed_back_forecast = np.exp(forecast[0][-1] + ((forecast[1][-1] ** 2) / 2.0)) - 1\n transformed_back_std_err = np.sqrt((np.exp(forecast[1][-1] ** 2) - 1) * (np.exp((2 * forecast[0][-1]) +\n (forecast[1][\n -1] ** 2))))\n transformed_back_CILower = transformed_back_forecast - \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_CIUpper = transformed_back_forecast + \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_interpolated_actual = float(np.exp(interpolated_actual) - 1)\n if np.sum(np.isnan(forecast[0][-1])) or np.isnan(forecast[1][-1]):\n raise ValueError('Predicted null value')\n\n if is_log_transformed:\n zscore = (transformed_back_interpolated_actual -\n transformed_back_forecast) / max(float(transformed_back_std_err), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(transformed_back_CILower) \\\n or math.isnan(transformed_back_CIUpper):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN under log transform')\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'LogTransformedAdjustedActual': interpolated_actual,\n 'LogTransformedPrediction': float(forecast[0][-1]),\n 'LogTransformedStdErr': float(forecast[1][-1]),\n 'LogTransformedCILower': float(forecast[2][-1][0]),\n 'LogTransformedCIUpper': float(forecast[2][-1][1]),\n 'AdjustedActual': transformed_back_interpolated_actual,\n 'Prediction': float(transformed_back_forecast) if not float(\n transformed_back_forecast) == float('inf') else 0.0,\n 'StdErr': float(transformed_back_std_err) if not float(\n transformed_back_std_err) == float('inf') else 0.0,\n 'CILower': float(transformed_back_CILower) if not float(\n transformed_back_CILower) == float('-inf') else 0.0,\n 'CIUpper': float(transformed_back_CIUpper) if not float(\n transformed_back_CIUpper) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n else:\n zscore = (interpolated_actual - forecast[0][-1]) / max(float(forecast[1][-1]), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(forecast[2][-1][0]) or math.isnan(forecast[2][-1][1]):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN')\n\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'AdjustedActual': interpolated_actual,\n 'Prediction': float(forecast[0][-1]) if not float(\n forecast[0][-1]) == float('inf') else 0.0,\n 'StdErr': float(forecast[1][-1]) if not float(\n forecast[1][-1]) == float('inf') else 0.0,\n 'CILower': float(forecast[2][-1][0]) if not float(\n forecast[2][-1][0]) == float('-inf') else 0.0,\n 'CIUpper': float(forecast[2][-1][1]) if not float(\n forecast[2][-1][1]) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n except (LinAlgError, ValueError, LADStructuralError) as e:\n result = {'Success': False,\n 'AdjustedActual': interpolated_actual,\n 'ErrorMessage': str(e)}\n\n return result", "def prediction(self, test_path, dest_path):\n logger.info(f\"prediction on files from {test_path}\")\n\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n save_path = data_functions.create_path(dest_path, self.train_time)\n save_path = data_functions.create_path(save_path, 'raw_pred')\n logger.info(f\"saving predictions to {save_path}\")\n\n test_gen = self.test_generator(test_path)\n for img, img_entry, orig_shape in test_gen:\n logger.info(f\"getting prediction for {img_entry.name}\")\n pred_raw = self.model.predict(img, batch_size=1)[0]\n pred_raw_resized = cv2.resize(pred_raw, orig_shape)\n\n file_name = img_entry.name.rsplit('.', 1)[0] + '.npy'\n npy_file_save_path = os.path.join(save_path, file_name)\n np.save(npy_file_save_path, pred_raw_resized, allow_pickle=True)\n\n pred_image = (255 * pred_raw_resized).astype(np.uint8)\n cv2.imwrite(os.path.join(save_path, img_entry.name), pred_image)\n\n return save_path", "def predict(data, model_predict):\n # Execute any steps you need to do before scoring\n\n # This method makes predictions against the raw, deserialized model\n #predictions = model_predict(data)\n\n data.to_csv(\"/opt/code/chemprop_folder/for_scoring.csv\", index=False)\n\n args = PredictArgs().parse_args([\n '--test_path', '/opt/chemprop_folder/for_scoring.csv',\n '--checkpoint_path', '/opt/code/model.pth',\n '--preds_path', '/opt/chemprop_folder/preds.csv'\n ])\n\n make_predictions(args)\n\n preds_df = pds.read_csv(\"/opt/chemprop_folder/preds.csv\")\n sh = str(preds_df.shape)\n print(sh)\n\n preds_df = preds_df.rename(columns = {\"p_np\": \"positive_class_label\"})\n preds_df = preds_df.drop(columns=['smiles'])\n preds_df[\"negative_class_label\"] = 1 - preds_df[\"positive_class_label\"]\n\n print(preds_df.head())\n\n # Execute any steps you need to do after scoring\n # Note: To properly send predictions back to DataRobot, the returned DataFrame should contain a\n # column for each output label for classification or a single value column for regression\n return preds_df", "def fit_predict_score(self, train_reviews: List[ParsedText],\n test_reviews: List[ParsedText], test_reviews_pred: List[ParsedText],\n **kwargs) -> List[ParsedText]:\n\n self.fit(train_texts=train_reviews, val_texts=test_reviews, **kwargs)\n test_reviews_pred = self.predict(test_reviews_pred)\n logging.info(f'Score: {self.score(texts=test_reviews, texts_pred=test_reviews_pred)}')\n return test_reviews_pred", "def predict_single_fold(self, model: TorchBasedLinearEstimator, dataset: TabularDataset) -> np.ndarray:\n pred = model.predict(dataset.data)\n\n return pred", "def predict(self, testx=None):\n if self.best_model is None:\n raise Exception(\"Train a model first\")\n\n if testx is None:\n testx = self.test_X\n\n return self._predict(testx)", "def predict(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict(data, version)\n return self.model_service.predict(data, version)", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def _predict(self, testX):\n pass", "def eval_epoch(self, final=False, save_predictions=False):\n t1 = time()\n output = {'tp': [], 'fp': [], 'fn': [], 'tn': [], 'loss': [], 'preds': [],'truth': [], 'true': 0,'true_sep':np.zeros(self.rel_size)}\n test_info = []\n test_result = []\n self.model.eval()\n test_iter = self.iterator(self.data['test'], batch_size=self.params['batch'], shuffle_=False)\n # preds=[]\n # truths=[]\n for batch_idx, batch in enumerate(test_iter):\n batch = self.convert_batch(batch, istrain=False, save=True)\n\n with torch.no_grad():\n loss, stats, predictions, select, pred_pairs, multi_truths, mask, _ = self.model(\n batch) # pred_pairs <#pair, relations_num>\n pred_pairs = torch.sigmoid(pred_pairs)\n\n output['loss'] += [loss.item()]\n output['tp'] += [stats['tp'].to('cpu').data.numpy()]\n output['fp'] += [stats['fp'].to('cpu').data.numpy()]\n output['fn'] += [stats['fn'].to('cpu').data.numpy()]\n output['tn'] += [stats['tn'].to('cpu').data.numpy()]\n output['preds'] += [predictions.to('cpu').data.numpy()]\n # preds.extend(predictions.to('cpu').data.numpy())\n # truths.extend(truth.to('cpu').data.numpy())\n\n if True:\n test_infos = batch['info'][select[0].to('cpu').data.numpy(),\n select[1].to('cpu').data.numpy(),\n select[2].to('cpu').data.numpy()][mask.to('cpu').data.numpy()]\n test_info += [test_infos]\n\n pred_pairs = pred_pairs.data.cpu().numpy()\n multi_truths = multi_truths.data.cpu().numpy()\n output['true'] += multi_truths.sum() - multi_truths[:, self.loader.label2ignore].sum()\n output['true_sep'] = output['true_sep'] +multi_truths.sum(axis=0)\n if save_predictions:\n assert test_infos.shape[0] == len(pred_pairs), print(\n \"test info=%d, pred_pair=%d\" % (len(test_infos.shape[0]), len(pred_pairs)))\n for pair_id in range(len(pred_pairs)):\n multi_truth = multi_truths[pair_id] #第pair_id个实体对的true\n for r in range(0, self.rel_size):\n if r == self.loader.label2ignore:\n continue\n\n test_result.append((int(multi_truth[r]) == 1, float(pred_pairs[pair_id][r]),\n test_infos[pair_id]['intrain'],test_infos[pair_id]['cross'], self.loader.index2rel[r], r,\n len(test_info) - 1, pair_id))\n\n\n # estimate performance\n total_loss, scores = self.performance(output)\n # pairs*rel_size*batch\n test_result.sort(key=lambda x: x[1], reverse=True)\n\n input_theta, w, f1,p,r,scores_class = self.tune_f1_theta(test_result, output['true'],output['true_sep'], self.params['input_theta'], isTest=save_predictions)\n\n t2 = time()\n if not final:\n self.test_res['loss'] += [total_loss]\n # self.test_res['score'] += [scores[self.primary_metric]]\n self.test_res['score'] += [f1]\n self.test_res['p'] = p\n self.test_res['r'] = r\n print(' TEST | LOSS = {:.05f}, '.format(total_loss), end=\"\")\n print_results(scores, scores_class, self.show_class, t2 - t1)\n # print(\"不同类别:\")\n # t = classification_report(truths, preds,target_names=[\"NA\",\"父母子女\", \"祖孙\", \"兄弟姐妹\", \"叔伯姑舅姨\", \"夫妻\", \"其他亲戚\", \"好友\", \"上下级\", \"师生\", \"合作\", \"情侣\", \"对立\", \"共现\", \"同学\", \"同门\"])\n # print(t)\n\n if save_predictions:\n\n test_result = test_result[: w + 1]\n test_result_pred = []\n test_result_info = []\n for item in test_result:\n test_result_pred.append([(item[-3], item[1])]) #预测的关系是的概率\n test_result_info.append([test_info[item[-2]][item[-1]]])\n assert (item[-3] in test_info[item[-2]][item[-1]]['rel']) == item[0], print(\"item\\n\", item, \"\\n\",\n test_info[item[-2]][\n item[-1]])\n write_errors(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel, type=\"theta\")\n write_preds(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel)\n # f1_score_t=f1_score(truths, preds, average='micro')\n # print(f1, scores['micro_f'], f1_score_t)\n\n return f1, scores['micro_f'],input_theta,p,r", "def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def predict(self, batched_features, model_load_dir=None):\n\n previous_mode = self._mode\n self._mode = 'predict'\n\n if model_load_dir is None:\n model_load_dir = self._save_dir\n logger.info('Model is lodded from {}'.format(model_load_dir))\n\n if not self._is_graph_build:\n logger.info('Initializing the model for prediction...')\n self.compile()\n\n gpu_options = tf.GPUOptions(allow_growth=True)\n sess_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, log_device_placement=True)\n with tf.Session(config=sess_config) as sess:\n saver = tf.train.Saver()\n logger.info(\"Getting latest checkpoint in {}\".format(model_load_dir))\n last_checkpoint = tf.train.latest_checkpoint(model_load_dir)\n logger.info(\"Attempting to load checkpoint at {}\".format(last_checkpoint))\n saver.restore(sess, last_checkpoint)\n logger.info(\"Successfully loaded {}!\".format(last_checkpoint))\n\n feed_dict = self._get_test_feed_dict(batched_features)\n y_pred = sess.run(self.prediction, feed_dict=feed_dict)\n\n self._mode = previous_mode\n return y_pred", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def predict(self, data):\n return self.result.predict(data)", "def predict(dataset):\n import capsnet\n\n # Load (standardized) input data and associated file names\n test_x, _, names = _load_data(dataset)\n\n # Predict class probabilities for each model (epoch)\n at_preds, sed_preds = [], []\n\n for epoch in _determine_epochs(cfg.prediction_epochs):\n model = _load_model(epoch)\n at_pred, sed_pred = utils.timeit(\n lambda: capsnet.gccaps_predict(test_x, model),\n '[Epoch %d] Predicted class probabilities' % epoch)\n\n at_preds.append(at_pred)\n sed_preds.append(sed_pred)\n\n # Average predictions to give an overall output\n total_at_pred = np.mean(at_preds, axis=0)\n total_sed_pred = np.mean(sed_preds, axis=0)\n\n # Ensure output directory exists and set file path format\n os.makedirs(os.path.dirname(cfg.predictions_path), exist_ok=True)\n predictions_path = cfg.predictions_path.format('%s', dataset.name)\n\n # Save free parameters to disk\n utils.log_parameters({'prediction_epochs': cfg.prediction_epochs},\n os.path.join(os.path.dirname(cfg.predictions_path),\n 'parameters.json'))\n\n # Write predictions to disk\n utils.write_predictions(names, total_at_pred, predictions_path % 'at')\n utils.write_predictions(names, total_sed_pred, predictions_path % 'sed')", "def predict(self, model, context, data):\n pass", "def test_predict(self):\n\n docs = self.docs\n for m in self.models:\n preds = m.predict(docs)\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertEqual(len(preds), len(docs))\n self.assertEqual(preds.dtype, int)\n\n preds = m.predict(docs, output_type=\"probability\")\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertTrue(len(preds) == len(docs))\n s = preds.apply(lambda x: sum(x))\n self.assertTrue((s.apply(lambda x: abs(x - 1)) < 0.000001).all())\n\n # Test predictions when docs have new words\n new_docs = turicreate.SArray([{\"-1,-1\": 3.0, \"0,4\": 5.0, \"0,3\": 2.0}])\n preds = m.predict(new_docs)\n self.assertEqual(len(preds), len(new_docs))\n\n # Test additional burnin. Ideally we could show that things\n # converge as you increase burnin.\n preds_no_burnin = m.predict(docs, output_type=\"probability\", num_burnin=0)\n self.assertEqual(len(preds_no_burnin), len(docs))", "def test_fit_predict() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def predict_model(args):\n print(args)\n\n if args.cuda:\n print(\"=====> use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"no GPU found or wrong gpu id, please run without --cuda\")\n\n # build the model\n model = build_model(args.model, num_classes=args.classes)\n\n if args.cuda:\n model = model.cuda() # using GPU for inference\n cudnn.benchmark = True\n\n if not os.path.exists(args.save_seg_dir):\n os.makedirs(args.save_seg_dir)\n\n # load the test set\n datas, testLoader = build_dataset_test(args.dataset, args.num_workers, none_gt=True)\n\n if args.checkpoint:\n if os.path.isfile(args.checkpoint):\n print(\"=====> loading checkpoint '{}'\".format(args.checkpoint))\n checkpoint = torch.load(args.checkpoint)\n model.load_state_dict(checkpoint['model'])\n # model.load_state_dict(convert_state_dict(checkpoint['model']))\n else:\n print(\"=====> no checkpoint found at '{}'\".format(args.checkpoint))\n raise FileNotFoundError(\"no checkpoint found at '{}'\".format(args.checkpoint))\n\n print(\"=====> beginning testing\")\n print(\"test set length: \", len(testLoader))\n predict(args, testLoader, model)", "def make_predict_step(self):\n return self.make_eval_step()", "def sequence_predict(self, load_script=False, variant=\"predict\"):\n\n if variant != 'internal':\n # Open an existing model and get the input dataset. \n # Target for historical data are expected if using previous targets as a feature.\n request_data = self._get_model_and_data(ordered_data=True) \n if type(request_data) == list:\n X, y = request_data\n else:\n X = request_data\n else:\n X = self.X_test.copy()\n y = self.y_test.copy()\n\n # Scale the targets and increase stationarity if required\n if variant != 'internal' and self.model.lag_target and (self.model.scale_target or self.model.make_stationary):\n # If using differencing, we retain original y values for inversing the transformation later\n y_orig = y.values.ravel() if self.model.make_stationary=='difference' else None\n # Apply the transformer to the targets\n y = self.model.target_transformer.transform(y)\n # Drop samples where y cannot be transformed due to insufficient lags\n X = X.iloc[len(X)-len(y):]\n\n # Set the number of periods to be predicted\n prediction_periods = self.model.prediction_periods\n # Set the number of rows required for one prediction\n self.rows_per_pred = 1\n self.diff_lags = max(self.model.stationarity_lags) if self.model.lag_target and self.model.make_stationary=='difference' else 0\n # Set property depending on whether the current sample will be included as an input, or if we only use lag observations for predictions\n self.first_pred_modifier = 1 if self.model.current_sample_as_input else 0 \n\n # Check that the input data includes history to meet any lag calculation requirements\n if self.model.lags:\n # An additional lag observation is needed if previous targets are being added to the features\n self.rows_per_pred = self.model.lags+self.first_pred_modifier+1 if self.model.lag_target else self.model.lags+self.first_pred_modifier\n # If the target is being lagged and made stationary through differencing additional lag periods are required\n if self.model.lag_target and self.model.make_stationary=='difference':\n extra_msg = \" plus an additional {} periods for making the target stationary using differencing\".format(self.diff_lags)\n # For multi-step predictions we only expect lag values, not the current period's values\n # self.rows_per_pred = self.rows_per_pred-1 if prediction_periods > 1 else self.rows_per_pred\n assert len(X) >= self.rows_per_pred + self.diff_lags, \"Insufficient input data as the model requires {} lag periods for each prediction\".format(self.rows_per_pred) + extra_msg\n\n if variant != 'internal':\n # Prepare the response DataFrame\n # Initially set up with the 'model_name' and 'key' columns and the same index as request_df\n self.response = self.request_df.drop(columns=['n_features'])\n \n # Set up a list to contain predictions and probabilities if required\n predictions = []\n get_proba = False\n if variant == 'predict_proba':\n get_proba = True\n probabilities = [] \n\n # Refresh the keras model to avoid tensorflow errors\n if self.model.using_keras:\n self._keras_refresh()\n\n if prediction_periods > 1:\n if not self.model.lag_target:\n y = None\n\n # Check that we can generate 1 or more predictions of prediction_periods each\n n_samples = len(X)\n assert (n_samples - self.rows_per_pred) >= prediction_periods, \\\n \"Cannot generate predictions for {} periods with {} rows, with {} rows required for lag observations. You may need to provide more historical data or sufficient placeholder rows for future periods.\"\\\n .format(prediction_periods, n_samples, self.rows_per_pred)\n \n # For multi-step predictions we can add lag observations up front as we only use actual values\n # i.e. We don't use predicted y values for further predictions \n if self.model.lags or self.model.lag_target:\n X = self._add_lags(X, y=y, extrapolate=self.first_pred_modifier) \n\n # We start generating predictions from the first row as lags will already have been added to each sample\n start = 0\n else:\n # We start generating predictions from the point where we will have sufficient lag observations\n start = self.rows_per_pred\n \n if self.model.lag_target or prediction_periods > 1:\n # Get the predictions by walking forward over the data\n for i in range(start, len(X) + self.first_pred_modifier, prediction_periods): \n # For multi-step predictions we take in self.rows_per_pred rows of X to generate predictions for prediction_periods\n if prediction_periods > 1:\n batch_X = X.iloc[[i]]\n \n if not get_proba:\n # Get the prediction. \n pred = self.model.pipe.predict(batch_X)\n # Flatten the predictions for multi-step outputs and add to the list\n pred = pred.ravel().tolist()\n predictions += pred\n else:\n # Get the predicted probability for each sample \n proba = self.model.pipe.predict_proba(batch_X)\n proba = proba.reshape(-1, len(self.model.pipe.named_steps['estimator'].classes_))\n probabilities += proba.tolist()\n # For walk forward predictions with lag targets we use each prediction as input to the next prediction, with X values avaialble for future periods.\n else:\n batch_X = X.iloc[i-self.rows_per_pred : i] \n # Add lag observations\n batch_y = y.iloc[i-self.rows_per_pred : i]\n batch_X = self._add_lags(batch_X, y=batch_y, extrapolate=self.first_pred_modifier)\n\n # Get the prediction. We only get a prediction for the last sample in the batch, the remaining samples only being used to add lags.\n pred = self.model.pipe.predict(batch_X.iloc[[-1],:])\n\n # Add the prediction to the list. \n predictions.append(pred)\n \n # Add the prediction to y to be used as a lag target for the next prediction\n y.iloc[i - self.first_pred_modifier, 0] = pred\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities.append(self.model.pipe.predict_proba(batch_X.iloc[[-1],:]))\n else:\n # Add lag observations to the samples if required\n if self.model.lags:\n X = self._add_lags(X, extrapolate=self.first_pred_modifier)\n\n # Get prediction for X\n predictions = self.model.pipe.predict(X)\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities = self.model.pipe.predict_proba(X)\n \n # Set the number of placeholders needed in the response\n # These are samples for which predictions were not generated due to insufficient lag periods or for meeting multi-step prediction period requirements\n self.placeholders = self.rows_per_pred + self.diff_lags - self.first_pred_modifier\n\n # Transform probabilities to a readable string\n if get_proba:\n # Add the required number of placeholders at the start of the response list\n y = [\"\\x00\"] * self.placeholders\n \n # Truncate multi-step predictions if the (number of samples - self.rows_per_pred) is not a multiple of prediction_periods\n if prediction_periods > 1 and ((n_samples-self.rows_per_pred) % prediction_periods) > 0: \n probabilities = probabilities[:-len(probabilities)+(n_samples-self.rows_per_pred)]\n \n for a in probabilities:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i += 1\n y.append(s[2:])\n\n # Prepare predictions\n else:\n if prediction_periods > 1:\n # Set the value to use for nulls\n null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n\n # Truncate multi-step predictions if the (number of samples - self.placeholders) is not a multiple of prediction_periods\n if (n_samples-self.rows_per_pred) % prediction_periods > 0:\n predictions = predictions[:-len(predictions)+(n_samples-self.rows_per_pred)]\n\n # Add null values at the start of the response list to match the cardinality of the input from Qlik\n y = np.array(([null] * (self.rows_per_pred - self.first_pred_modifier)) + predictions)\n elif self.model.lag_target: \n # Remove actual values for which we did not generate predictions due to insufficient lags\n if is_numeric_dtype(y.iloc[:, 0].dtype):\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = np.NaN\n else:\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = \"\\x00\"\n # Flatten y to the expected 1D shape\n y = y.values.ravel()\n else:\n y = np.array(predictions)\n \n # Inverse transformations on the targets if required \n if variant != 'internal' and (self.model.scale_target or self.model.make_stationary):\n # Take out placeholder values before inverse transform of targets\n null_values = y[:self.rows_per_pred - self.first_pred_modifier] if prediction_periods > 1 or self.model.lag_target else []\n # Add placeholders for samples removed during differencing\n if self.model.make_stationary=='difference':\n null_values = np.append(null_values, np.repeat(null_values[0], self.diff_lags))\n y = y if len(null_values) == 0 else y[-len(predictions):]\n # Add untransformed lag values for differencing if required\n end = self.placeholders\n start = end - self.diff_lags\n y = y if y_orig is None else np.append(y_orig[start : end], y)\n\n # Apply the transformer to the test targets\n y = self.model.target_transformer.inverse_transform(y) \n\n # Remove lags used for making the series stationary in case of differencing\n if self.model.make_stationary == 'difference':\n y = y[self.diff_lags:]\n\n # Replace lags used for making the series stationary with nulls in case of differencing\n # if self.model.make_stationary == 'difference':\n #null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n # y = np.append(np.array([null]*self.diff_lags), y[self.diff_lags:])\n \n # Add back the placeholders for lag values\n if len(null_values) > 0:\n y = np.append(null_values, y)\n \n if variant == 'internal':\n return y\n\n # Add predictions / probabilities to the response\n self.response['result'] = y\n\n # Reindex the response to reset to the original sort order\n self.response = self.response.reindex(self.original_index)\n \n if load_script:\n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def predict(self, X_test, **kwargs):\n\n # Normalize inputs\n if self.normalize_input:\n X_, _, _ = zero_mean_unit_var_normalization(X_test, self.X_mean, self.X_std)\n else:\n X_ = X_test\n\n # Sample a number of predictions for each given point\n # Generate mean and variance for each given point from sampled predictions\n\n X_ = torch.Tensor(X_)\n self.model.eval()\n Yt_hat = self.model(X_).data.cpu().numpy()\n\n if self.normalize_output:\n Yt_hat = zero_mean_unit_var_denormalization(Yt_hat, self.y_mean, self.y_std)\n\n logger.debug(f\"Generated final outputs array of shape {Yt_hat.shape}\")\n\n return Yt_hat", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(self, num_samples = BATCH_SIZE, display_predictions = True):\n if self.model is None:\n raise Exception(\"Model is empty, either build or load it\")\n\n print(\"==> Prediction on model from\", self.model_dir)\n file_names, file_labels = self.get_sample_files(num_samples)\n self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))\n self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)\n self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)\n\n self.predictions = self.model.predict(self.predict_dataset)\n\n if display_predictions:\n self.display_predictions()", "def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)", "def predict(self, inputs):\n\n return self.model.predict(inputs)", "def predict_and_evaluate(config, workdir, ckpt_path=None):\n logging.info('Starting testing at %s', workdir)\n tf.io.gfile.makedirs(workdir)\n\n rng = jax.random.PRNGKey(config.seed)\n # Build input pipeline.\n rng, data_rng = jax.random.split(rng)\n data_rng = jax.random.fold_in(data_rng, jax.process_index())\n test_ds = []\n for split in config.dataset.test_splits:\n ds = input_pipeline.create_val_dataset(\n config.dataset, split, config.dataset.test_per_device_batch_size,\n config.dataset.test_pad_last_batch)\n test_ds.append(ds)\n\n # Initialize model.\n inputs = train_utils.get_init_inputs(test_ds[0])\n rng, model_rng = jax.random.split(rng)\n predict_config = models.TransformerConfig(**config.model.to_dict())\n predict_config = predict_config.replace(decode=True)\n model = models.Model(predict_config)\n state = train_utils.create_train_state(\n model, config, model_rng, inputs=inputs)\n\n writer = metric_writers.create_default_writer(\n workdir, just_logging=jax.process_index() > 0)\n\n # Set up checkpointing of the model and the input pipeline.\n checkpoint_dir = os.path.join(workdir, 'checkpoints')\n ckpt = checkpoint.MultihostCheckpoint(checkpoint_dir, max_to_keep=3)\n\n logging.info('Testing and evaluating checkpoint %s', ckpt_path)\n try:\n state = ckpt.restore(state, ckpt_path)\n except FileNotFoundError:\n state = ckpt.restore_or_initialize(state)\n step = int(state.step)\n\n p_pred_step = jax.pmap(\n functools.partial(predict_step, config=predict_config),\n axis_name='batch',\n static_broadcasted_argnums=(3,))\n p_init_cache = jax.pmap(\n functools.partial(init_cache, config=predict_config), axis_name='batch')\n\n # Distribute testing.\n state = flax_utils.replicate(state)\n with metric_writers.ensure_flushes(writer):\n test_metrics = {}\n for ds, split in zip(test_ds, config.dataset.test_splits):\n ds_metrics = evaluate_sequence_accuracy(p_pred_step, p_init_cache, state,\n ds, config, split, workdir,\n config.num_test_steps)\n ds_metrics = {f'{k}_{split}': v for k, v in ds_metrics.items()}\n test_metrics.update(ds_metrics)\n writer.write_scalars(step, test_metrics)", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def make_predictions(model_choice, model_name, loader):\n\n torch.multiprocessing.set_sharing_strategy('file_system')\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # I made a mistake in the saving script\n model_path = os.path.join('../trained_models', model_name, model_name + '.pth')\n\n if model_choice == 'baby':\n from models.BabyC3D import BabyC3D\n\n # from models.BabyC3D import Crazy\n\n model = BabyC3D()\n # model = Crazy()\n elif model_choice == 'small':\n from models.SmallC3D import SmallC3D\n\n model = SmallC3D()\n elif model_choice == 'se3cnn':\n from models.Se3cnn import Se3cnn\n\n model = Se3cnn()\n elif model_choice == 'c3d':\n from models.C3D import C3D\n\n model = C3D()\n elif model_choice == 'small_siamese':\n from models.Siamese import SmallSiamese\n\n model = SmallSiamese()\n elif model_choice == 'baby_siamese':\n from models.Siamese import BabySiamese\n\n model = BabySiamese()\n elif model_choice == 'babyse3cnn':\n from models.BabySe3cnn import BabySe3cnn\n\n model = BabySe3cnn()\n else:\n # Not possible because of argparse\n raise ValueError('Not a possible model')\n model.to(device)\n model = torch.nn.DataParallel(model)\n\n # import torch.optim as optim\n # optimizer = optim.Adam(None)\n # print(model, model_path)\n\n dict_results = run_model(loader, model, model_path)\n pickle.dump(dict_results, open(f'../data/post_processing/predictions/{model_name}.p', 'wb'))\n return dict_results" ]
[ "0.6959041", "0.6904541", "0.6858206", "0.674883", "0.6679141", "0.65974444", "0.654767", "0.65204763", "0.64544606", "0.6438485", "0.6437152", "0.64298964", "0.6410104", "0.640449", "0.640449", "0.6369408", "0.63413376", "0.63313776", "0.62701774", "0.6262735", "0.6240748", "0.6231877", "0.62217706", "0.6215227", "0.62140906", "0.62039584", "0.6198473", "0.61929524", "0.6186982", "0.6182213", "0.61816764", "0.6163021", "0.61606604", "0.61566806", "0.61465704", "0.6143893", "0.6138511", "0.61298186", "0.6129314", "0.612912", "0.6117833", "0.6116306", "0.61095613", "0.607476", "0.6072755", "0.60632706", "0.6061132", "0.60591114", "0.60584116", "0.60538423", "0.60496396", "0.60289437", "0.60224366", "0.6022385", "0.60113794", "0.60074264", "0.6006405", "0.59993875", "0.59945095", "0.5990978", "0.598605", "0.5977147", "0.59756404", "0.59645534", "0.59595037", "0.59581476", "0.595765", "0.5957201", "0.59487265", "0.59464115", "0.5942999", "0.594073", "0.59337157", "0.5932636", "0.5932418", "0.59292054", "0.59270704", "0.5925904", "0.5924917", "0.59228575", "0.5922555", "0.59167826", "0.5916627", "0.590919", "0.59076715", "0.5898903", "0.5896535", "0.5893563", "0.58896863", "0.5886199", "0.58826196", "0.58815044", "0.5879877", "0.58774436", "0.58725053", "0.58649004", "0.5864782", "0.58643126", "0.586229", "0.58583516" ]
0.7165115
0
Build the L2X model for selecting words.
def construct_gumbel_selector(X_ph, num_words, embedding_dims, maxlen): emb_layer = Embedding(num_words, embedding_dims, input_length = maxlen, name = 'emb_gumbel') emb = emb_layer(X_ph) #(400, 50) net = Dropout(0.2, name = 'dropout_gumbel')(emb) net = emb first_layer = Conv1D(100, kernel_size, padding='same', activation='relu', strides=1, name = 'conv1_gumbel')(net) # bs, 400, 100 # global info net_new = GlobalMaxPooling1D(name = 'new_global_max_pooling1d_1')(first_layer) # bs, 100 global_info = Dense(100, name = 'new_dense_1', activation='relu')(net_new) # bs, 100 # local info net = Conv1D(100, 3, padding='same', activation='relu', strides=1, name = 'conv2_gumbel')(first_layer) # bs, 400, 100 local_info = Conv1D(100, 3, padding='same', activation='relu', strides=1, name = 'conv3_gumbel')(net) # bs, 400, 100 combined = Concatenate()([global_info,local_info]) net = Dropout(0.2, name = 'new_dropout_2')(combined) net = Conv1D(100, 1, padding='same', activation='relu', strides=1, name = 'conv_last_gumbel')(net) logits_T = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'conv4_gumbel')(net) # bs, 400, 1 # wanna make it bs, maxlen*num_groups squeeze_layer = Lambda(lambda x:tf.squeeze(x), output_shape=lambda x:x[:-1]) logits_T_grp = Dense(maxlen*num_groups)(squeeze_layer(logits_T)) #print(logits_T_grp.shape) return logits_T_grp # bs, 400* num_groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension", "def build_model():", "def build_model(self, text, n = 3):\n \n try:\n self.lm.build_model(text,n)\n except:\n raise\n \n self.vocab = Counter(words(text))\n\n return self.lm", "def build_model(self, text):\n text = '< ' * (self.n - 1) + text.replace(' . ', ' .%s ' % (' <' * (self.n - 1))) + ' >'\n tokens = self.split(text)\n self.corpus_len = len(tokens)\n self.n_grams_by_len = [{} for _ in range(self.n)]\n for i in range(len(tokens)): # for index in tokens\n for n in range(self.n): # for n-gram size from 1 to n:\n if i >= n: # if the index has advanced enough for this n\n n_gram = self.join(tokens[i - n: i + 1])\n n_grams = self.n_grams_by_len[n] # get dict for respective n\n n_grams[n_gram] = n_grams.get(n_gram, 0) + 1 # get dict for respective n\n return self.get_model()", "def build(self):\n labelled_documents = self.get_labelled_documents_queryset()\n\n self.model = self.build_model(labelled_documents)\n self.save_model()", "def generate_limittedmodel():\r\n print('Loading model')\r\n model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)\r\n print('Model loaded!')\r\n\r\n print('Loading dot products')\r\n dp = np.load(DP_NAME)\r\n print('Dot products loaded')\r\n\r\n print('Filtering vocab')\r\n for name, vocab in list(model.vocab.items()):\r\n if dp[vocab.index] < MAX_DEGREE:\r\n del model.vocab[name]\r\n\r\n il = list(model.vocab.items())\r\n print('Sorting vocab')\r\n il.sort(key=lambda x: x[1].index)\r\n\r\n # Find the indexes of the words that are being kept\r\n print('Generating indexes')\r\n indexes = []\r\n for i in range(0, len(il)):\r\n name, vocab = il[i]\r\n indexes.append(vocab.index)\r\n model.vocab[name].index = i\r\n\r\n print('Modifying model weights')\r\n model.syn0 = model.syn0[indexes]\r\n\r\n print('Saving file')\r\n model.save_word2vec_format(SAVE_NAME, binary=True)", "def train_word2vec(self, size = 50, window = 20, min_count = 5, epochs = 40):\n\n\n # Read the entire previous data for training\n full_data = pd.read_csv(self.path_full_data, encoding = \"ISO-8859-1\")\n\n # Also read the column which we are performing analysis for\n col_data = pd.read_csv(self.path_data_col\n , encoding = \"ISO-8859-1\"\n , usecols = [self.id_col_name, self.col_name])\n \n\n # Clean the data in the column\n col_data[self.col_name] = self.cln.clean(col_data[self.col_name], typo = self.typo_ind)\n col_data.replace(np.nan, '', inplace = True)\n col_name_list = list(col_data[self.col_name].apply(lambda x: str(x).split(' ')))\n\n\n # Make a list of lists of the data\n input_list = list(full_data['response'].apply(lambda x: x.split(' ')))\n input_list = input_list + col_name_list\n\n # Remove the responses having only one or two words\n input_list = [x for x in input_list if len(x) > 1]\n\n # Build vocabulary and train model\n model = gensim.models.Word2Vec(\n input_list,\n size = size,\n window = window,\n min_count = min_count)\n\n model.train(input_list, total_examples = len(input_list), epochs = epochs)\n\n return model", "def build_model(allidx,MAX_LENGTH,onlyArg):\n wordidx, labelidx, featuresidx, extraidx=allidx\n posidx, neridx, depidx, distanceidx, chnkidx, wikineridx, dbpedianeridx, subneridx = featuresidx\n\n main_input = Input(shape=(MAX_LENGTH,), name='main_input', dtype='int32')\n inputNodes=[main_input]\n\n w2vmodel=\"../embeddings/Domain-Word2vec.model\"\n\n embedding_matrix,EMBEDDING_DIM,vocabulary_size=prepare.wv_embedded(wordidx,w2vmodel)\n \n x = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, input_length=MAX_LENGTH, mask_zero=False)(main_input)\n numnode=int(EMBEDDING_DIM/2)\n\n # pos embedding\n inputNodes,pos_layer=layers.embedlayer(inputNodes,\"pos_input\",posidx,MAX_LENGTH)\n x=Concatenate()([x,pos_layer])\n numnode+=int(len(posidx)/2)\n\n # ner embedding\n inputNodes,ner_layer=layers.embedlayer(inputNodes,\"ner_input\",neridx,MAX_LENGTH)\n x=Concatenate()([x,ner_layer])\n numnode+=int(len(neridx)/2)\n\n inputNodes,wikiner_layer=layers.embedlayer(inputNodes,\"wikiner_input\",wikineridx,MAX_LENGTH)\n x=Concatenate()([x,wikiner_layer])\n numnode+=int(len(wikineridx)/2)\n\n inputNodes,dbpedianer_layer=layers.embedlayer(inputNodes,\"dbpedianer_input\",dbpedianeridx,MAX_LENGTH)\n x=Concatenate()([x,dbpedianer_layer])\n numnode+=int(len(dbpedianeridx)/2)\n\n # dep embedding\n inputNodes,dep0_layer=layers.embedlayer(inputNodes,\"dep0_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep0_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep1_layer=layers.embedlayer(inputNodes,\"dep1_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep1_layer])\n numnode+=int(len(depidx)/2)\n\n inputNodes,dep2_layer=layers.embedlayer(inputNodes,\"dep2_input\",depidx,MAX_LENGTH)\n x=Concatenate()([x,dep2_layer])\n numnode+=int(len(depidx)/2)\n\n # chnk embedding\n inputNodes,lvl_layer=layers.embedlayer(inputNodes,\"lvl_input\",distanceidx,MAX_LENGTH)\n x=Concatenate()([x,lvl_layer])\n numnode+=int(len(distanceidx)/2)\n\n inputNodes,chnk_layer=layers.embedlayer(inputNodes,\"chnk_input\",chnkidx,MAX_LENGTH)\n x=Concatenate()([x,chnk_layer])\n numnode+=int(len(chnkidx)/2)\n\n # wikiclass embedding\n inputNodes,subner_layer=layers.embedlayer(inputNodes,\"subner_input\",subneridx,MAX_LENGTH)\n x=Concatenate()([x,subner_layer])\n numnode+=int(len(subneridx)/2)\n\n if onlyArg:\n neartrigger_input = Input(shape=(MAX_LENGTH,), name='neartrigger_input', dtype='int32')\n inputNodes.append(neartrigger_input)\n neartrigger_layer = Embedding(output_dim=EMBEDDING_DIM, weights=[embedding_matrix],input_dim=vocabulary_size, \\\n input_length=MAX_LENGTH, mask_zero=False)(neartrigger_input)\n x=Concatenate()([x,neartrigger_layer])\n numnode+=50\n inputNodes,x,numnode=layers.extralayer(inputNodes,x,numnode,extraidx,featuresidx,MAX_LENGTH)\n\n lstm_out = Bidirectional(LSTM(numnode, dropout=0.2,return_sequences=True))(x)\n numnode=int((numnode+len(labelidx))*2/3)\n\n if onlyArg:\n lstm_out = SeqSelfAttention(attention_activation='tanh', attention_width=5)(lstm_out)\n\n lstm_out = Dropout(0.2)(lstm_out)\n out = Dense(numnode)(lstm_out)\n\n crf = CRF(len(labelidx), sparse_target=False) # CRF layer\n main_output=crf(out)\n loss=crf_loss #crf.loss_function\n acc=[crf_accuracy]\n\n model = Model(inputs=inputNodes, outputs=main_output) \n model.compile(loss=loss,optimizer=Adam(0.001),metrics=acc)\n model.summary()\n\n return model", "def buildWord2VecModel(lineSentencesPath, modelPath):\r\n\r\n lineSentences = LineSentence(lineSentencesPath)\r\n\r\n workers = multiprocessing.cpu_count()\r\n # initiate the model and perform the first epoch of training\r\n terms2vec = Word2Vec(lineSentences, size=100, window=5, min_count=2, sg=1, workers= workers - 1)\r\n terms2vec.save(modelPath)\r\n print(\"\\rFinished 1 epoch for {}.\".format(modelPath), end=\"\")\r\n\r\n # perform another 11 epochs of training\r\n for i in range(1, 12):\r\n terms2vec.train(lineSentences, total_examples=terms2vec.corpus_count, epochs=terms2vec.iter)\r\n terms2vec.save(modelPath)\r\n print(\"\\rFinished {} epochs for {}.\".format(i + 1, modelPath), end=\"\")\r\n print()\r\n return terms2vec", "def build_model(self):\n doc_input = Input(shape=(self.max_sent_num ,self.max_sent_length,512), dtype='float32')\n doc_in=Flatten()(doc_input)\n \n #masked3=Masking(mask_value=Special_value)(doc_input)\n \n # self.model_sent = self.build_sent_encoder()\n \n # doc_encoder= TimeDistributed(self.model_sent)(doc_in)\n \n # document_att= self.build_doc_encoder(doc_encoder)\n dense= Dense(DENSE_SIZE,activation='softmax')(doc_in)\n #doc_att = self.build_sent_encoder(sent_encoder)\n # dense the output to 2 because the result is a binary classification.\n output_tensor = Dense(3, activation='softmax', name='classification')(dense)\n # Create Sentence-level Model\n self.model = Model(doc_input, output_tensor)", "def build_model(self , text, n=3): #should be called build_model\n self.n = n\n self.vocab = Counter(words(text))\n\n tokens=tokenize(text)\n for gram in list(ngrams(tokens,self.n)):\n self.lm_dict[tuple(gram[:-1])][gram[-1]]+=1", "def build_LM(in_file):\n\tprint 'building language models...'\n\tfile_contents = open(in_file).readlines()\n\t#for each line in the file, split the language type away from the text line\n\t#split the text line into n grams and add it to the correct language type\n\t#apply smoothing to the final dictionary\n\tfor line in file_contents:\n\t\tsplit_line = line.split(' ', 1)\n\t\tlanguage_type = split_line[0]\n\t\ttext_line = split_line[1]\n\t\tline_fourgram = ngram_from_line(text_line)\n\t\tdict_update(language_type, line_fourgram)\n\tsmooth_dict()\n\t#print(\"models built with \"),\n\t#print(NUM_NGRAMS),\n\t#print(\" ngrams\")\n\treturn build_probability_model()", "def word2vec_model(sentences, size=100, min_count=5, window=5,\n negative=5, cbow=True, iterations=5, seed=0,\n workers=1):\n if cbow is True:\n sg = 0\n else:\n sg = 1\n model = Word2Vec(size=size, window=window,\n min_count=min_count, workers=workers,\n sg=sg, negative=negative, seed=seed)\n\n model.build_vocab(sentences)\n\n model.train(sentences, total_examples=model.corpus_count,\n epochs=iterations)\n return model", "def build_model():\n \n #english trained optimized pipeline for word embedding\n nlp = spacy.load(\"en_core_web_md\") # this model will give you 300D\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ])),\n \n ('embeddings_pipeline', Pipeline([\n ('vect_trans',SpacyVectorTransformer(nlp)),\n ('reduce_dim', TruncatedSVD(50)),\n ])),\n \n ])),\n \n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__embeddings_pipeline__reduce_dim__n_components':(50,60,70,100,120,130,150)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters,cv=2)\n \n return cv", "def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model", "def build_from_words(self, words):\n if isinstance(words, unicode):\n self.build(words)\n elif isinstance(words, list):\n flag = \"seg\"\n assert len(words) > 0\n\n word = words[0]\n if isinstance(word, unicode):\n flag = \"seg\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 2 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"pos\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 4 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"dp\"\n else:\n flag = \"unknown\"\n\n self._xml4nlp = Element('xml4nlp')\n self._note = SubElement(self._xml4nlp, 'note')\n self._doc = SubElement(self._xml4nlp, 'doc')\n\n para = SubElement(self._doc, 'para')\n sent = SubElement(para, 'sent')\n\n para.set(\"id\", \"0\")\n sent.set(\"id\", \"0\")\n\n self._clean_note()\n\n if flag == \"seg\":\n for i, word in enumerate(words):\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word\n }))\n sent.set('cont', (\"\".join(words)))\n self._set_word_on_note()\n elif flag == \"pos\":\n for i, word_pos in enumerate(words):\n word, pos = word_pos\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_pos_on_note()\n elif flag == \"dp\":\n for i, rep in enumerate(words):\n word, pos, head, dep_rel = rep\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos,\n 'parent': str(int(head) - 1),\n 'relation': dep_rel\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_parser_on_note()\n\n self.dom = self._xml4nlp", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)", "def __init__(self, word2vec_model):\n self._model = word2vec_model", "def build_model(self):\n pass", "def build_model(self):\n pass", "def fit(self, X, y = None):\n workers = cpu_count()\n if self.workers > 0 and self.workers < workers:\n workers = self.workers\n\n if self.w2v is None:\n word2vec = Word2Vec(\n sentences = X, size = self.size, alpha = self.alpha, window = self.window,\n min_count = self.min_count, max_vocab_size = self.max_vocab_size,\n sample = self.sample, seed = self.seed, workers = workers,\n min_alpha = self.min_alpha, sg = self.sg, hs = self.hs, negative = self.negative,\n cbow_mean = self.cbow_mean, hashfxn = self.hashfxn, iter = self.iter,\n null_word = self.null_word, trim_rule = self.trim_rule,\n sorted_vocab = self.sorted_vocab, batch_words = self.batch_words)\n\n # once we’re finished training the model (i.e. no more updates, only querying)\n # we can store the word vectors and delete the model to trim unneeded model memory\n self.w2v_ = {w: vec for w, vec in zip(word2vec.wv.index2word, word2vec.wv.syn0)}\n else:\n # TODO : ??? is assuming w2v a file path generalizable\n self.w2v_ = {}\n all_words = set(w for words in X for w in words)\n\n # GLOVE pretrained weight's format\n with open(self.w2v) as f:\n for line in f:\n splitted = line.split()\n w = splitted[0]\n vector = [float(x) for x in splitted[1:]]\n if w in all_words:\n self.w2v_[w] = np.asarray(vector)\n\n if self.tfidf:\n tfidf_vec = TfidfVectorizer(analyzer = lambda x: x)\n tfidf_vec.fit(X)\n self.w2idf_ = {w: tfidf_vec.idf_[i] for w, i in tfidf_vec.vocabulary_.items()}\n\n return self", "def load_word2vec_model():\n logging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s', \n level=logging.INFO)\n model_path = '/playpen/home/tongn/GoogleNews-vectors-negative300.bin'\n model = KeyedVectors.load_word2vec_format(fname=model_path, binary=True)\n return model", "def build_model(num_topics=30):\n data = utils.read_wiki(\"wiki.train.tokens\")\n\n # preprocessing: remove too frequent words, stopwords ...\n logger.info(\"Start preprocessing, this will take quite some time ...\")\n list_of_tokens, bigrams = preprocess(data)\n\n id2word = corpora.Dictionary(list_of_tokens)\n id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=VOCAB_SIZE)\n logger.info(f\"Done processing dataset len, vocab len {len(id2word.keys())}, {len(list_of_tokens)}\")\n \n # convert data into df vectors\n corpus = [id2word.doc2bow(tokens) for tokens in list_of_tokens]\n\n for num_topics in range(10, 100, 6):\n lda_model = LdaModel(corpus, num_topics=num_topics,\n id2word=id2word,\n passes=20,\n iterations=400,\n # alpha=[0.01]*num_topics,\n alpha=\"auto\",\n # eta=[0.01] * VOCAB_SIZE,\n eta=\"auto\")\n \n # save the model\n path = pathlib.Path(f\"{SAVING_DIR}/lda_topic_{num_topics}\")\n path.mkdir(parents=True, exist_ok=True)\n path = path / \"lda.model\"\n lda_model.save(str(path.absolute()))\n id2word.save(UNIGRAM_FILE)\n bigrams.save(BIGRAM_FILE)\n\n # visualize topics by LDAviz\n vis = gensimvis.prepare(topic_model=lda_model, corpus=corpus, dictionary=id2word)\n pathlib.Path(\"lda_vizs\").mkdir(parents=True, exist_ok=True)\n pyLDAvis.save_html(vis, f'lda_vizs/lda_visualization_{num_topics}.html')\n return id2word, bigrams, lda_model", "def prepare_lexicons(self, topnwords = 80, distance_cutoff = 0.45):\n\n model = self.train_word2vec()\n\n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # For each topic, collect the words most similar to them in a list of lists\n topic_lexicons = []\n\n # Loop through the ten topics\n for topic in topics:\n\n temp_words = []\n\n # Loop through each word that we have given manually under each topic\n for word in topic:\n\n # Consider most similar words according to some cutoffs\n similar_words = model.wv.most_similar(positive = word, topn = topnwords)\n temp_words1 = [x for (x,y) in similar_words if y >= distance_cutoff]\n\n temp_words = temp_words + temp_words1\n\n temp_words = temp_words + topic\n\n\n # Take unique words, there might be duplicates\n topic_lexicons.append(list(set(temp_words)))\n\n # Some manual adjustments\n # Remove 'commute' from other topic\n topic_lexicons[8].remove('commute')\n\n return topic_lexicons", "def __init__(self, model_name):\n self.name = str(model_name)\n self.numwords = 0\n self.words = {} #how many types of words\n self.word_lengths = {} #how many word lengths\n self.stems = {} #how many stems\n self.sentence_lengths = {} #how many sentence lengths\n self.common_word = [] #top ten most common words", "def fit(self, X, y=None):\n for input_data in X:\n self._node_vocab.add_node(input_data[0])\n self._word_vocab.add_document(input_data[1])\n if self._use_char:\n self._char_vocab.add_documents(input_data[1])\n for data in input_data[2]:\n self._word_vocab.add_document(data)\n if self._use_char:\n self._char_vocab.add_documents(data)\n # self._label_vocab.add_node(' '.join(data)) # this results in a very big lable space (90K) \n self._label_vocab.add_document(data) # Use word indexing instead, drawbacks: BOW\n\n self._node_vocab.build()\n self._word_vocab.build()\n self._char_vocab.build()\n self._label_vocab.build()\n\n return self", "def train_word2vec_from_ES(es_config, query, model_file):\n q_docs = QueryResultDocs(es_config, query)\n model = gensim.models.Word2Vec(q_docs, workers=40)\n model.save(model_file)\n print 'model trained & saved'\n return model", "def build_model(self):\n raise NotImplementedError", "def build_model():\n nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\n pipeline = Pipeline([\n ('features', FeatureUnion([\n \n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer())\n ])),\n \n ('strarting_verb', StartingVerbExtractor())\n \n ])),\n\n ('clf', MultiOutputClassifier(estimator = AdaBoostClassifier(random_state = 42)))\n\n ])\n \n parameters = {\"clf__estimator__learning_rate\": [0.1, 0.5, 1.0],\n \"clf__estimator__n_estimators\": [25, 50, 75]\n }\n \n from sklearn.model_selection import GridSearchCV\n cv = GridSearchCV(pipeline, param_grid = parameters) \n \n return cv", "def get_w2v_model(sentences, num_features=300, min_word_count=40,\n num_workers=4, context=10, downsampling=1e-3):\n\n logger.debug(\"Trying to get the W2V model with {} features, and begins \"\n \"with '{}'.\".format(num_features, \" \".join(sentences[0][:5])))\n model_hash = md5(str(sentences) + str(num_features)).hexdigest()\n model_filename = model_hash + \".w2v.model\"\n\n if os.path.isfile(model_filename):\n logger.debug(\"Found the model in file '{}'.\".format(model_filename))\n model = Word2Vec.load(model_filename)\n else:\n logger.debug(\"Didn't find the model.\")\n logger.debug(\"Training Word2Vec model with {} sentences and \"\n \"{} features...\".format(len(sentences), num_features))\n model = Word2Vec(sentences, workers=num_workers,\n size=num_features, min_count=min_word_count,\n window=context, sample=downsampling, seed=1)\n logger.debug(\"...done.\")\n # If you don't plan to train the model any further, calling\n # init_sims will make the model much more memory-efficient.\n model.init_sims(replace=True)\n logger.debug(\"Saving model in {}.\".format(model_filename))\n model.save(model_filename)\n\n return model", "def load_word2vec_en_pretrained():\r\n log.info(\"Load W2V Model\")\r\n model = api.load(\"glove-wiki-gigaword-50\")\r\n return PreTrainedGensimEN(model)", "def build_gensim_synset_model_from_sentences(sentences, window=5):\n model = Word2Vec(\n sentences, size=100, window=5, min_count=5, workers=4,\n trim_rule=trim_rule)\n return model", "def load_word_model(self, opt):\n print('\\nloading word model...\\n')\n opt = copy.deepcopy(opt)\n opt.is_word_model = True\n if not opt.load_complete_model:\n if opt.pretrained_emb:\n dloader = W2VDataLoader(path=opt.datapath,\n train=opt.traindata,\n dev=opt.testdata,\n bos_eos=opt.bos,\n lowercase=opt.lowercase,\n shared_vocab=opt.share_vocab,\n batch_size=opt.batch_size,\n gpu=opt.gpu,\n valsplit=opt.valsplit)\n _, _, _ = dloader.return_iterators()\n self.vocab = {'src': dloader.SRC.vocab, 'tgt': dloader.TGT.vocab}\n self.mappings = dloader.mappings\n else:\n _, _, _, self.vocab, self.mappings = lib.data.create_datasets(opt)\n model, optim = lib.model.create_model((self.vocab['src'], self.vocab['tgt']), opt)\n print('Loading test data from \"%s\"' % opt.testdata)\n print('Loading training data from \"%s\"' % opt.traindata)\n print(' * Vocabulary size. source = %d; target = %d' % (len(self.vocab['src']), len(self.vocab['tgt'])))\n print(' * Maximum batch size. %d' % opt.batch_size)\n else:\n model = lib.model.create_model_from_complete(opt, 'word')\n optim = None\n print(model)\n return model, optim", "def generate_words_greedily(self, model, session, X, words_to_idx):\n \n Xorig_clean = self.cleanOutput(X, words_to_idx)\n \n for i in range(len(X)):#iterate over allscentences\n #set eos pointer to eos index\n p_eos = np.argwhere(np.array(X[i])==words_to_idx['<eos>'])[0][0] # 2 is eos but would be better using the dict\n while True:\n #compute predictions\n feed_dict = {self.input_x: np.array(X[i]).reshape((1,29)),\n self.input_y: np.array(X[i]).reshape((1,29))} # input_y is not needed\n \n prediction, sentence_probability = session.run([self.predictions, self.sentence_probability], feed_dict)\n \n lastpred = prediction[0,p_eos-1]\n X[i][p_eos]=lastpred\n \n p_eos += 1\n if lastpred == words_to_idx['<eos>'] or p_eos==29: break\n \n #postprocess X\n Xclean = self.cleanOutput(X, words_to_idx)\n self.create_submission_file(Xorig_clean, task='originalX')\n self.create_submission_file(Xclean, task='continuation')", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def fit(self, X, y):\n catalog = []\n for title, lable in zip(X, y):\n for word in title.split():\n pair = (word, lable)\n catalog.append(pair)\n\n self.unique_words = Counter(catalog)\n print(\"unique_words\", self.unique_words)\n\n self.counted_dict = dict(Counter(y))\n print(\"counted_dict\", self.counted_dict)\n\n words = [word for title in X for word in title.split()]\n self.counted_words = dict(Counter(words))\n print(\"counted_words\", self.counted_words)\n\n self.model = {\n \"labels\": {},\n \"words\": {},\n }\n\n for edition in self.counted_dict:\n count = 0\n for word, label_name in self.unique_words:\n if edition == label_name:\n count += self.unique_words[(word, edition)]\n params = {\n \"label_count\": count,\n \"probability\": self.counted_dict[edition] / len(y),\n }\n self.model[\"labels\"][edition] = params\n\n for word in self.counted_words:\n params = {}\n for edition in self.counted_dict:\n nc = self.model[\"labels\"][edition][\"label_count\"]\n nic = self.unique_words.get((word, edition), 0)\n counted_len = len(self.counted_words)\n alpha = self.alpha\n smooth = (nic + alpha) / (nc + alpha * counted_len)\n params[edition] = smooth\n self.model[\"words\"][word] = params", "def generate_text(model, w2vmodel, nb_epoch, length=75, max_seq_length=20, seed=\"Rain drop drop top\"):\n global sample\n generated = ''\n sequences = seed\n\n generated += seed\n\n #clean seed\n seed=re.sub(r'<[^<]+?>', '', seed)\n #remove encoding characters like \\x86\n seed=re.sub(r'[^\\x00-\\x7f]','',seed)\n seed=re.sub(r'\\#','',seed)\n #remove punctuation\n seed=re.sub(r'[^A-Za-z0-9\\s]','',seed)\n\n #shorten if longer than max_seq_length\n seed = seed.split(' ')[:max_seq_length]\n\n word_ix_list = []\n for word in seed:\n try:\n word = word_to_ix(word,w2vmodel)\n except:\n #since we're using -1 as a null word (why we also pad with the not in vocab index), we'll use that for words that aren't in the word2vec model\n print('Warning: {0} not contained in training vocabulary. It will be ignored when computing output.'.format(word))\n word = word_to_ix('_UNSEEN_',w2vmodel)\n word_ix_list.append(word)\n\n #pad word_list with the unseen word2vec if shorter than max_seq_length\n word_ix_list = [word_to_ix('_UNSEEN_',w2vmodel)] * (max_seq_length-len(word_ix_list)) + word_ix_list\n\n for temp in [0.2, 0.5, .75, 1.0]:\n print('temperature: ', temp)\n for word in range(length):\n #reshape wordlist\n word_ix_list = np.asarray(word_ix_list).reshape(1,max_seq_length)\n\n #prediction = model.predict(x=word_ix_list)\n #next_ix = np.argmax(prediction)\n prediction = model.predict(x=word_ix_list,verbose=0)[0]\n next_ix = sample(prediction, temp)\n predicted_word = ix_to_word(next_ix,w2vmodel)\n\n generated += (' ' + predicted_word) #add predicted word to the generated output\n\n #remove first word from the word list to reduce the array for the max sequence length for the model\n word_ix_list = np.append(word_ix_list,next_ix)\n word_ix_list.shape\n word_ix_list = np.delete(word_ix_list,0,0)\n print(generated)\n print('-----')\n #print(generated)\n return", "def load_word2vec_model():\n model = Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True, norm_only=True)\n return model", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def build_model():\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n\n ('starting_verb', StartingVerbExtractor())\n ])),\n\n ('clf', MultiOutputClassifier(AdaBoostClassifier()))\n])\n \n # specify parameters for grid search\n parameters = {\n 'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),\n 'features__text_pipeline__vect__max_df': (0.75, 1.0)\n }\n\n # create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs= 8, cv = 3, verbose = 2)\n\n return cv", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def train_sentence_dm(model, sentence, lbls, alpha, work=None, neu1=None, train_words=True, train_lbls=True):\n lbl_indices = [lbl.index for lbl in lbls if lbl is not None]\n lbl_sum = np_sum(model.syn0[lbl_indices], axis=0)\n lbl_len = len(lbl_indices)\n neg_labels = []\n if model.negative:\n # precompute negative labels\n neg_labels = zeros(model.negative + 1)\n neg_labels[0] = 1.\n\n for pos, word in enumerate(sentence):\n if word is None:\n continue # OOV word in the input sentence => skip\n reduced_window = random.randint(model.window) # `b` in the original doc2vec code\n start = max(0, pos - model.window + reduced_window)\n window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)\n word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]\n l1 = np_sum(model.syn0[word2_indices], axis=0) + lbl_sum # 1 x layer1_size\n if word2_indices and model.cbow_mean:\n l1 /= (len(word2_indices) + lbl_len)\n neu1e = train_cbow_pair(model, word, word2_indices, l1, alpha, neg_labels, train_words, train_words)\n if train_lbls:\n model.syn0[lbl_indices] += neu1e\n\n return len([word for word in sentence if word is not None])", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def retrain_model(self, new_sentences, with_punctiations):\n if with_punctiations:\n model_ = Word2Vec.load('./model/model_word2vec.bin')\n else:\n model_ = Word2Vec.load('./model/model_no_punctuation_word2vec.bin')\n\n model_.build_vocab(new_sentences, update=True)\n model_.train(new_sentences, total_examples=model_.corpus_count, epochs=model_.iter)\n\n if with_punctiations:\n model_.save('./model/model_word2vec.bin')\n else:\n model_.save('./model/model_no_punctuation_word2vec.bin')\n\n\n pass", "def build_model(X_train, Y_train):\n #Choosing a straighforward single tree model to make training tractable in terms of time\n DTC = DecisionTreeClassifier(random_state = 11)\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=DTC))\n ])\n\n parameters = {'clf__estimator__criterion': [\"gini\", \"entropy\"],\n 'clf__estimator__splitter': [\"best\", \"random\"],\n 'clf__estimator__max_depth': randint(3, 6),\n 'clf__estimator__min_samples_split': randint(2,6)}\n\n grid_obj = RandomizedSearchCV(pipeline,parameters,n_iter=5, cv=5 )\n grid_obj.fit(X_train, Y_train)\n\n return grid_obj.best_estimator_", "def build(self, lang, linearInMLP=False):\n inputLayers, concLayers = [], []\n inputToken = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputToken)\n tokenEmb = Embedding(len(self.vocabulary.tokenIndices), configuration['mlp']['tokenEmb'],\n trainable=configuration['mlp']['trainable'],\n weights=self.getWeightMatrix(self.vocabulary.tokenIndices, lang))(inputToken)\n tokenFlatten = Flatten()(tokenEmb)\n concLayers.append(tokenFlatten)\n inputPos = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputPos)\n posEmb = Embedding(len(self.vocabulary.posIndices), configuration['mlp']['posEmb'],\n trainable=configuration['mlp']['trainable'])(inputPos)\n posFlatten = Flatten()(posEmb)\n concLayers.append(posFlatten)\n if linearInMLP:\n linearPredInput = Input(shape=(8,))\n inputLayers.append(linearPredInput)\n concLayers.append(linearPredInput)\n\n conc = keras.layers.concatenate(concLayers) if len(concLayers) > 1 else concLayers[0]\n dense1Layer = Dense(configuration['mlp']['dense1UnitNumber'],\n activation=configuration['nn']['dense1Activation'])(conc)\n lastLayer = Dropout(configuration['mlp']['dense1Dropout'])(dense1Layer)\n if configuration['mlp2']['dense2']:\n dense2Layer = Dense(configuration['mlp2']['dense2UnitNumber'],\n activation=configuration['mlp2']['dense2Activation'])(lastLayer)\n lastLayer = Dropout(configuration['mlp2']['dense2Dropout'])(dense2Layer)\n softmaxLayer = Dense(8 if enableCategorization else 4, activation='softmax')(lastLayer)\n return inputLayers, softmaxLayer", "def word2vec_main(df: pd.DataFrame,\n window_size: int = WINDOW_SIZE,\n layer_size: int = VECTOR_DEPTH,\n negative_sample_size: int = NEG_SAMPLE_SIZE,\n num_epochs: int = NUM_EPOCHS, \n learn_rate: float = LEARN_RATE,\n dist_alpha: float = DIST_ALPHA,\n verbose: bool = VERBOSE\n ) -> tuple: \n \n # Separate the keywords and the sentences from the DataFrame\n keywords, corpus = separate_keywords_corpus(df)\n \n # Preprocess the corpus\n print(\"Commencing data preprocessing...\")\n w2vp = W2VPreprocessor(keywords, corpus)\n \n # Obtain a noise distribution containing vocabulary for use within the\n # negative sampling\n noise_distribution = get_noise_distribution(\n w2vp.preprocess_corpus, w2vp.vocabulary, dist_alpha\n )\n \n # Encode the words such that they correspond with matrix indices\n word2onehot = one_hot_vocab_encoding(w2vp)\n \n # Initialise the embedding and contex matrices\n matrices = get_matrices((w2vp.vocab_size, layer_size))\n\n # Initialise the dataset via the skipgrams methodology\n skipgram_data = obtain_skipgram_dataset(\n w2vp.preprocess_corpus, \n window_size\n )\n # print(skipgram_data.head(15))\n \n if verbose:\n print(\n f\"\\nTraining Word2vec, via skipgrams and negative sampling, \" \n f\"using the following parameters. \\n\"\n f\"Vocabulary size: {w2vp.vocab_size} \\n\"\n f\"Window size: {window_size} \\n\"\n f\"Word vector depth: {layer_size} \\n\"\n f\"Negative sample size: {negative_sample_size} \\n\"\n f\"Distribution parameter: {dist_alpha} \\n\"\n f\"Number of epochs: {num_epochs} \\n\"\n f\"Learning rate (alpha): {learn_rate} \\n\"\n )\n \n # Train the model to obtain the final embedding and context matrices. The\n # embedding matrix will contain the final word vectors, which can be \n # extracted using the one hot encodings of the original vocabulary\n matrices = train_w2v(\n skipgram_data, \n matrices, \n word2onehot, \n w2vp.vocabulary, \n noise_distribution,\n negative_sample_size, \n num_epochs,\n learn_rate,\n verbose\n )\n \n return word2onehot, w2vp, matrices", "def bag_of_words_model(x, y):\n target = tf.one_hot(y, NUMBER_OF_CATEGORIES, 1, 0)\n word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,\n embedding_size=EMBEDDING_SIZE, name='words')\n features = tf.reduce_max(word_vectors, reduction_indices=1)\n prediction, loss = learn.models.logistic_regression(features, target)\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op", "def get_wordlists():\n\n\tCS = {'ACM', 'IEEE', 'Computer Science', 'Artificial Intelligence',\n\t\t'Pattern Recognition', 'Computer Vision', 'Machine Learning',\n\t\t'Signal Processing', 'Electrical Engineering', 'Image Processing',\n\t\t'Data Mining', 'Neural Networks', 'Computer Graphics', 'Graphics',\n\t\t'Language Processing', 'Internet', 'Intelligent Systems',\n\t\t'Robotic','Data','Software', 'Machine Vision', 'Image Analysis',\n\t\t'Scientific Computing', 'SIAM', 'Malware','World Wide Web', \n\t\t'Computational Intelligence', 'Computational Linguistics',\n\t\t'Computational linguistics','Algorithm','Computer','ITiCSE',\n\t\t'ITICSE','Machine learning','Learning','learning',\n\t\t'Artificial intelligence','CIVR','Document Analysis'}\n\n\tbio = {'Biology', 'Microbiology', 'Molecular', 'Medical', 'Biological',\n\t\t'Cancer', 'Genome', 'Bioinformatics', 'Protein', 'Biocomputing',\n\t\t'Biomedical', 'biology', 'Medicine', 'Biosystems', 'Virology',\n\t\t'Brain', 'Psychology', 'Genetics', 'Bioengineering', 'Cell',\n\t\t'Cardiology', 'Metabolic', 'Biotechnology', 'Pathogens',\n\t\t'Pathology', 'Plant', 'PLANT', 'Virus', 'Drug','Medicinal',\n\t\t'Neuro','Psych',\n\t\t'Genomic','Diseases','Endocrinology', 'Epidemiology',\n\t\t'Proteom','Biochem', 'DNA', 'Pharma', 'Biomedic', 'biomedica',\n\t\t'Neurobiological'}\n\n\tmath = {'Mathemati','Markov','Probability','Algebra','Network',\n\t\t'Topology','Optimization', 'Geometr','Statistic','Algorithm',\n\t\t'Graph ','Graphs','Combinatori','Riemann Surfaces','Permutation Groups',\n\t\t'Functional Analysis', 'SIAM','Fixed Point','Wavelet','Statistics',\n\t\t'Linear Regression','Fractal','geometry','Multivariate','Chaos',\n\t\t'mathemati','Kernel'}\n\n\tlinguistics = {}\n\n\tcomputer_vision = {}\n\n\tchemistry = {}\n\n\tphysics = {}\n\n\t# Rename \"Computer Vision\" to \"Image Processing\"?\n\ttopic_names = ['Computer Science','Biology','Mathematics','Chemistry',\n\t\t'Physics','Computer Vision','Natural Language Processing']\n\ttopics = [CS, bio, math]#, linguistics, computer_vision, chemistry, physics]\n\n\treturn {topic_names[i]:topics[i] for i in range(len(topics))}", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n bestBicScore = float(\"+inf\")\n bestModel = None\n log_n_samples = np.log(sum(self.lengths))\n \n for n_components in range(self.min_n_components,self.max_n_components+1): \n logL = float(\"-inf\")\n bicScore = float(\"+inf\")\n hmm_model = None\n logging.info('BIC: Training word =%s with number of components=%d', self.this_word, n_components)\n \n try :\n hmm_model = GaussianHMM(n_components=n_components, covariance_type=\"diag\", \n n_iter=1000, random_state=self.random_state,verbose=False).fit(self.X, self.lengths)\n logL = hmm_model.score(self.X, self.lengths)\n # Bayesian information criteria: BIC = -2 * logL + p * logN\n # p is number of Free Parameters in the Model\n parameters = n_components * n_components + 2 * len(self.X[0]) * n_components - 1\n bicScore = -2 * logL + parameters * log_n_samples\n if bicScore < bestBicScore:\n logging.debug('BIC: found lower bic score=%f for word =%s with components=%d', bicScore, self.this_word, n_components)\n bestBicScore = bicScore\n bestModel = hmm_model\n \n except RuntimeWarning as rw:\n logging.warning('BIC: RuntimeWarning : %s', rw)\n except ValueError as ve:\n logging.warning('BIC: ValueError : %s', ve) \n \n if bestModel == None:\n return None\n \n logging.info('BIC: returning : best model with BIC score=%f for word=%s with number of components=%d', bestBicScore, self.this_word, bestModel.n_components) \n return bestModel", "def generate_corpus(model, sample):\r\n \r\n dl_corpus = []\r\n for word in sample:\r\n if word in model:\r\n dl_corpus.append(model[word])\r\n else:\r\n dl_corpus.append([0]*VECTOR_DIM)\r\n\r\n return [dl_corpus]", "def fit(self, **skip_gram_params):\n\n if 'workers' not in skip_gram_params:\n skip_gram_params['workers'] = self.workers\n\n if 'size' not in skip_gram_params:\n skip_gram_params['size'] = self.dimensions\n\n return gensim.models.Word2Vec(self.walks, **skip_gram_params)", "def build_model(self) -> nn.Module:\n pass", "def build_model_gensim(corpus, id2word, num_topics=20, validset=None):\n\n # Build LDA model\n lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics,\n random_state=100,\n eval_every=5,\n chunksize=10000, #nb of docs in each training chunk\n passes=50,\n iterations=500,\n alpha=0.001,\n per_word_topics=True,\n workers=4,)\n\n print(\"eta\",lda_model.eta)\n print(\"alpha\",lda_model.alpha)\n\n if validset:\n valid_corpus, valid_id2word, valid_data_lemmatized = validset\n print(lda_model.log_perplexity(valid_corpus, len(valid_corpus)))\n\n return lda_model", "def build_inputs(self, rebuild=False):\n from sagas.ru.ru_comparisons import build_voc, build_input_pairs\n\n if rebuild:\n print('collecting words ...')\n self.save_voc()\n print('rebuilding voc file ...')\n # _, skips=build_voc()\n build_voc()\n\n all_words=self.get_all_words()\n print('building input-method resources ...')\n addons=[]\n for word in all_words:\n key = self.get_norm_key(word)\n if len(key) > 0:\n addons.append('%s %s' % (key, word))\n\n print('merge output ...')\n build_input_pairs(addons)\n print('done.')", "def build(X, y=None):\n model = Pipeline([\n ('preprocessor',NLTKPreprocessor()),\n ('vectorizer', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(C=0.9)))])\n\n model.fit(X, y)\n return model", "def build_model_fn(self):", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]", "def train_build(df):\n print(\"Constructing training set...\")\n recent_labels = pr.labels.get_last_keypresses() #List of strings\n labeled_df = pr.labels.apply_labels_all(df, recent_labels)\n X, y = pr.build_model.make_training_set(labeled_df)\n\n return X, y", "def fit(self, examples):\n\n sentences = []\n for example in examples:\n sentences += text_to_w2v_input(example)\n\n self.w2v_model = get_w2v_model(sentences)", "def build_model(category_names):\n try:\n # initialise columns to be used for data preparation purposes in the model pipeline\n message_col = 0\n\n # build a pipeline containing the feature transformations and estimator\n pipeline = Pipeline([\n\n ('features', ColumnTransformer([\n # apply message transformations\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=partial(tokenize))),\n ('tfidf', TfidfTransformer())\n ]), message_col),\n\n ('starting_verb', StartingVerbExtractor(), message_col),\n\n ('category_terms', CategoryTermExtractor(category_names=category_names),\n message_col),\n\n ], remainder='drop')),\n\n # specify the estimator\n ('clf', LabelPowerset(MultinomialNB(fit_prior=True)))\n ])\n\n # parameter grid to be used for grid search\n parameters = {\n 'features__text_pipeline__vect__max_features': [10000],\n 'features__text_pipeline__tfidf__sublinear_tf': [True],\n 'features__text_pipeline__vect__ngram_range': [(1,1), (1,2)],\n 'features__text_pipeline__vect__min_df': [1],\n 'features__text_pipeline__vect__max_df': [.95],\n 'features__text_pipeline__tfidf__smooth_idf': [True],\n 'features__text_pipeline__tfidf__norm': ['l2'],\n 'clf__classifier__alpha': [0.01, 1.]\n }\n\n # perform cross validation using grid search on the pipeline described above\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=5, verbose=2)\n return cv\n except:\n raise Exception(\"Could not build model.\")\n #finally:\n # return cv", "def build_model(self) -> DM:\n model = DM()\n model[self.modelroot] = content = DM()\n\n content['potential'] = DM()\n content['potential']['key'] = self.potential_key\n content['potential']['id'] = self.potential_id\n content['implementation'] = DM()\n content['implementation']['key'] = self.potential_LAMMPS_key\n content['implementation']['id'] = self.potential_LAMMPS_id\n\n for subset in self.subsets:\n subset.build_model(content)\n\n self._set_model(model)\n return model", "def build_models():\n train_models()\n return build_response.sent_ok()", "def init_word2vec():\n start = time()\n if not os.path.exists('/cs/engproj/314/proj2/trained_model/GoogleNews-vectors-negative300.bin.gz'):\n raise ValueError(\"SKIP: You need to download the google news model\")\n model = KeyedVectors.load_word2vec_format('/cs/engproj/314/proj2/trained_model/GoogleNews-vectors-negative300.bin.gz', binary=True)\n print('Cell took %.2f seconds to run.' % (time() - start))\n # model.init_sims(replace=True)\n global trained_model\n trained_model = model\n return", "def build_model():\n # build pipeline with count vecotrizer, tfidf and support vector machine\n pipeline_SVC = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('multi-clf', MultiOutputClassifier(LinearSVC()))\n ])\n\n # define parameters for gridsearch\n parameters_SVC = {\n 'vect__max_df': (.6, 1),\n 'tfidf__norm': ('l1', 'l2'),\n 'multi-clf__estimator__C': (.1, 1, 100)\n }\n\n # build parameter grid and fit data\n model = GridSearchCV(pipeline_SVC, parameters_SVC)\n\n return model", "def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')", "def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta", "def build_lm(self, x, y=None, mode=TRAINING, prev_h=None, step_num=None): \n one_step = False\n \n # Check parameter consistency\n if mode == LanguageModel.EVALUATION or mode == LanguageModel.TRAINING:\n assert y\n else:\n assert not y\n assert prev_h\n one_step = True\n\n # if x.ndim == 2 then \n # x = (n_steps, batch_size)\n if x.ndim == 2:\n batch_size = x.shape[1]\n # else x = (word_1, word_2, word_3, ...)\n # or x = (last_word_1, last_word_2, last_word_3, ..)\n # in this case batch_size is \n else:\n batch_size = 1\n \n if not prev_h:\n prev_h = T.alloc(np.float32(0.), batch_size, self.qdim)\n \n xe = self.approx_embedder(x)\n # Gated Encoder\n if self.step_type == \"gated\":\n f_enc = self.gated_step\n o_enc_info = [prev_h, None, None, None]\n else:\n f_enc = self.plain_step\n o_enc_info = [prev_h]\n \n # Run through all the sentence (encode everything)\n if not one_step: \n _res, _ = theano.scan(f_enc,\n sequences=[xe],\\\n outputs_info=o_enc_info) \n # Make just one step further\n else:\n _res = f_enc(xe, prev_h)\n\n h = _res[0]\n # Store last h for further use\n pre_activ = self.output_layer(h, xe)\n \n # EVALUATION : Return target_probs\n # target_probs.ndim == 3\n outputs = self.output_softmax(pre_activ)\n \n if mode == LanguageModel.EVALUATION:\n target_probs = GrabProbs(outputs, y)\n return target_probs, h, outputs\n # BEAM_SEARCH : Return output (the softmax layer) + the new hidden states\n elif mode == LanguageModel.BEAM_SEARCH:\n return outputs, h\n # SAMPLING : Return a vector of n_sample from the output layer \n # + log probabilities + the new hidden states\n elif mode == LanguageModel.SAMPLING:\n if outputs.ndim == 1:\n outputs = outputs.dimshuffle('x', 0)\n \n sample = self.trng.multinomial(pvals=outputs, dtype='int64').argmax(axis=-1)\n if outputs.ndim == 1:\n sample = sample[0]\n \n log_prob = -T.log(T.diag(outputs.T[sample]))\n return sample, log_prob, h", "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {\n 'vect__ngram_range': ((1, 1), (1, 2)),\n 'clf__estimator__min_samples_split': [2, 4],\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv", "def generate_sentence(word1, word2, length, vocab, model):\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n output_string = np.zeros((1, length), dtype=np.int)\n output_string[:, 0: 2] = vocab[word1], vocab[word2]\n\n for end in range(2, length):\n start = end - 2\n output_string[:, end] = np.argmax(model(output_string[:, start:end]), axis=1)\n text = [reverse_vocab[i] for i in list(output_string[0])]\n \n print(\" \".join(text))", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.sentence_lengths = {}\n self.stems = {}\n self.commas_per_sentence = {}", "def build_model():\n # noise for soise sampling in NCE\n noise = build_unigram_noise(\n torch.FloatTensor(corpus.vocab.idx2count)\n )\n\n norm_term = 'auto' if args.norm_term == -1 else args.norm_term\n # setting up NCELoss modules\n if args.index_module == 'linear':\n criterion = IndexLinear(\n args.emsize,\n ntoken,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n loss_type=args.loss,\n reduction='none',\n )\n model = RNNModel(\n ntoken, args.emsize, args.nhid, args.nlayers,\n criterion=criterion, dropout=args.dropout,\n )\n elif args.index_module == 'gru':\n if args.nlayers != 1:\n logger.warning('Falling into one layer GRU due to Index_GRU supporting')\n nce_criterion = IndexGRU(\n ntoken, args.emsize, args.nhid,\n args.dropout,\n noise=noise,\n noise_ratio=args.noise_ratio,\n norm_term=norm_term,\n )\n model = GenModel(\n criterion=nce_criterion,\n )\n else:\n logger.error('The index module [%s] is not supported yet' % args.index_module)\n raise(NotImplementedError('index module not supported'))\n\n if args.cuda:\n model.cuda()\n\n logger.info('model definition:\\n %s', model)\n return model", "def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def createModel(self, X_train, y_train):\n total_words = len(self.tokenizer.word_index) + 1\n # Create model and layers\n model = Sequential()\n model.add(Embedding(total_words, 100, input_length=self.max_sequence_len-1))\n model.add(Bidirectional(LSTM(150)))\n model.add(Dense(total_words, activation=\"softmax\"))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['accuracy'])\n # Fit model to training data\n fitting = model.fit(X_train, y_train, epochs=100, verbose=1, callbacks=[self.callback])\n return model", "def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()", "def build_models_DOC(train_pos_vec, train_neg_vec):\n Y = [\"pos\"]*len(train_pos_vec) + [\"neg\"]*len(train_neg_vec)\n\n # Use sklearn's GaussianNB and LogisticRegression functions to fit two models to the training data.\n # For LogisticRegression, pass no parameters\n # YOUR CODE HERE\n from sklearn import linear_model,naive_bayes\n lr = linear_model.LogisticRegression()\n lr_model = lr.fit(train_pos_vec + train_neg_vec, Y)\n\n gnb = naive_bayes.GaussianNB()\n gnb_model = gnb.fit(train_pos_vec + train_neg_vec, Y)\n \n return gnb_model, lr_model", "def read_word2vec_model():\n file_name = \"word2vec_model.txt\"\n # these are the pre-2018 lines to load a model:\n # from gensim.models.word2vec import Word2Vec\n # m = Word2Vec.load_word2vec_format(file_name, binary=False)\n \n # here are the post-2018 lines to load a model:\n from gensim.models import KeyedVectors\n print(\"Starting to load the model in \", file_name, \"...\")\n m = KeyedVectors.load_word2vec_format(file_name, binary=False)\n print(\"Model loaded.\\n\")\n\n print(\"The model built is\", m, \"\\n\")\n print(\"m.vocab has\", len(m.vocab), \"words\")\n ## The above line should print\n ## m.vocab has 43981 words\n\n print(\"Each word is a vector of size\", m.vector_size)\n ## which should tells us that each word is represented by a 300-dimensional vector\n\n print(\"\\nTry m.get_vector('hello') to see one...!\\n\")\n ## Once the model is built, it can't be changed without rebuilding it; we'll leave it. \n\n return m", "def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()", "def sample(self, M):\n # Helper function to get mdls\n def recur_mdl(model, lst):\n if isinstance(model, UnigramLM): # Base case\n return\n \n recur_mdl(model.prev_mdl, lst)\n lst.append(model)\n return lst\n \n tokens = ['\\x02'] # START token\n\n # Use a helper function to generate sample tokens of length `length`\n mdls = recur_mdl(self, []) # List of models\n\n if M <= self.N: # Before model ngrams\n mdls = mdls[:M]\n else: # If reach model ngrams\n for _ in range(M - self.N + 1): # Append additional used models\n mdls.append(mdls[-1])\n\n tups = tuple('\\x02'.split()) # First word depend on '\\x02'\n for mdl in mdls: # Loop through used models\n probs = mdl.mdl[mdl.mdl['n1gram'] == tups] # Get ngrams and probability dataframe\n if len(probs.ngram) == 0: # No word to choose\n ran = '\\x03' # Append '\\x03'\n break\n else:\n random = np.random.choice(probs.ngram, p=probs.prob) # Choose token based on probs\n ran = random[-1]\n \n if mdl.N < self.N: # If still smaller than N\n tups = random\n else: # ngram models\n tups = random[1:]\n\n tokens.append(ran) # Append\n \n for _ in range(M - len(tokens)): # Fill the gap of missing due to '\\x03'\n tokens.append('\\x03')\n \n # Transform the tokens to strings\n return ' '.join(tokens)", "def train(self, X, y):\n for c in set(y):\n # prepare a list of words within each class\n self.class_words[c] = []\n\n # loop through each sentence in our training data\n for _element, _class in zip(X,y):\n # process n-grams\n _element = self.transform_ngrams(_element)\n print(_element)\n for w in _element:\n # have we not seen this word combination already?\n if w not in self.corpus_words:\n self.corpus_words[w] = 1\n else:\n self.corpus_words[w] += 1\n\n # add the word to our words in class list\n self.class_words[_class].extend([w])\n self.save()", "def _build_model(self, **kwargs):\n pass", "def generate_language(sent, vocab, model, end_tok=END_TOK):\n sent = [vocab[w] if w in vocab else vocab[\"<UNK>\"] for w in sent.split(' ')]\n ix = 0\n ix2vocab = {vocab[w]: w for w in vocab}\n gen_s = deepcopy(sent)\n while ix != 10:\n inp = np.array(sent).reshape(1, -1)\n probs = model(inp)\n # Sample from the model\n sample = np.random.multinomial(100, probs.flatten(), size=1)\n pred = np.argmax(sample)\n sent = sent[1:] + [pred]\n gen_s.append(pred)\n ix += 1\n if ix2vocab[pred] == end_tok:\n break\n return ' '.join([ix2vocab[jx] for jx in gen_s])", "def _get_model(self):\n if self.model == None:\n model_path = self.model_path\n w2v_model = gensim.models.Word2Vec.load(model_path)\n # Keep only the normalized vectors.\n # This saves memory but makes the model untrainable (read-only).\n w2v_model.init_sims(replace=True)\n self.model = w2v_model\n return self.model", "def train(self, x_train, y_train, w2v_size=300, w2v_window=5, w2v_min_count=1,\n w2v_epochs=100, k_max_sequence_len=500, k_batch_size=128, k_epochs=32, k_lstm_neurons=128,\n k_hidden_layer_neurons=(128, 64, 32), verbose=1):\n # Set variables\n self.w2v_size = w2v_size\n self.w2v_window = w2v_window\n self.w2v_min_count = w2v_min_count\n self.w2v_epochs = w2v_epochs\n self.k_max_sequence_len = k_max_sequence_len\n self.k_batch_size = k_batch_size\n self.k_epochs = k_epochs\n self.k_lstm_neurons = k_lstm_neurons\n self.k_hidden_layer_neurons = k_hidden_layer_neurons\n\n # split text in tokens\n x_train = [gensim.utils.simple_preprocess(text) for text in x_train]\n\n logging.info(\"Build & train Word2Vec model\")\n self.w2v_model = gensim.models.Word2Vec(min_count=self.w2v_min_count, window=self.w2v_window,\n size=self.w2v_size,\n workers=multiprocessing.cpu_count())\n self.w2v_model.build_vocab(x_train)\n self.w2v_model.train(x_train, total_examples=self.w2v_model.corpus_count, epochs=self.w2v_epochs)\n w2v_words = list(self.w2v_model.wv.vocab)\n logging.info(\"Vocabulary size: %i\" % len(w2v_words))\n logging.info(\"Word2Vec trained\")\n\n logging.info(\"Fit LabelEncoder\")\n self.label_encoder = LabelEncoder()\n y_train = self.label_encoder.fit_transform(y_train)\n self.num_classes = len(self.label_encoder.classes_)\n y_train = utils.to_categorical(y_train, self.num_classes)\n\n logging.info(\"Fit Tokenizer\")\n self.tokenizer = Tokenizer()\n self.tokenizer.fit_on_texts(x_train)\n x_train = keras.preprocessing.sequence.pad_sequences(self.tokenizer.texts_to_sequences(x_train),\n maxlen=self.k_max_sequence_len)\n num_words = len(self.tokenizer.word_index) + 1\n logging.info(\"Number of unique words: %i\" % num_words)\n\n logging.info(\"Create Embedding matrix\")\n word_index = self.tokenizer.word_index\n vocab_size = len(word_index) + 1\n embedding_matrix = np.zeros((vocab_size, self.w2v_size))\n for word, idx in word_index.items():\n if word in w2v_words:\n embedding_vector = self.w2v_model.wv.get_vector(word)\n if embedding_vector is not None:\n embedding_matrix[idx] = self.w2v_model.wv[word]\n logging.info(\"Embedding matrix: %s\" % str(embedding_matrix.shape))\n\n logging.info(\"Build Keras model\")\n logging.info('x_train shape: %s' % str(x_train.shape))\n logging.info('y_train shape: %s' % str(y_train.shape))\n\n self.k_model = Sequential()\n self.k_model.add(Embedding(vocab_size,\n self.w2v_size,\n weights=[embedding_matrix],\n input_length=self.k_max_sequence_len,\n trainable=False))\n self.k_model.add(LSTM(self.k_lstm_neurons, dropout=0.5, recurrent_dropout=0.2))\n for hidden_layer in self.k_hidden_layer_neurons:\n self.k_model.add(Dense(hidden_layer, activation='relu'))\n self.k_model.add(Dropout(0.2))\n if self.num_classes > 1:\n self.k_model.add(Dense(self.num_classes, activation='softmax'))\n else:\n self.k_model.add(Dense(self.num_classes, activation='sigmoid'))\n\n self.k_model.compile(loss='categorical_crossentropy' if self.num_classes > 1 else 'binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n logging.info(self.k_model.summary())\n\n # Callbacks\n early_stopping = EarlyStopping(monitor='acc', patience=6, verbose=0, mode='max')\n rop = ReduceLROnPlateau(monitor='acc', factor=0.1, patience=3, verbose=1, epsilon=1e-4, mode='max')\n callbacks = [early_stopping, rop]\n\n logging.info(\"Fit Keras model\")\n self.k_model.fit(x_train, y_train,\n batch_size=self.k_batch_size,\n epochs=self.k_epochs,\n callbacks=callbacks,\n verbose=verbose)\n\n logging.info(\"Done\")", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def _build_model(self):\n raise NotImplementedError()", "def __init__(self):\n # parse MSR data\n test_data, sent_dict, pair_dict = parse()\n # word mover model -- take long to load the model!\n wm_model = WMD()\n # copnvert the ID->String dict to ID-> token dict\n candidate_dict = wmd_utils.sent_dict_to_tok_dict(sent_dict)\n wm_model.evaluate_model(candidate_dict, pair_dict)", "def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):\n print(\"build------------------\")\n self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey\n # trim by min_count & precalculate downsampling\n self.scale_vocab(trim_rule=trim_rule, update=update)\n self.finalize_vocab(update=update)", "def lm_train(data_dir, language, fn_LM):\r\n\r\n # TODO: Implement Function\r\n\r\n language_model, unigram, bigram = {}, {}, {}\r\n CKP = \"WEAREDELETINGEND\"\r\n pre_w = CKP\r\n for root, dirs, files in os.walk(data_dir, topdown=False):\r\n for name in files:\r\n if name.endswith(language):\r\n #print(\"reading \", name)\r\n filepath = os.path.join(data_dir, name)\r\n readingfile = open(filepath, \"r\")\r\n for line in readingfile:\r\n processed = preprocess(line, language)\r\n if len(processed) != 0:\r\n tokenList = processed.split()\r\n for w in tokenList:\r\n # ======================\r\n # for unigram structure\r\n # ======================\r\n # not exist yet, initialize it at count 1\r\n if w not in unigram.keys():\r\n unigram[w] = 1\r\n else:\r\n unigram[w] += 1\r\n\r\n # ======================\r\n # for bigram structure\r\n # ======================\r\n if pre_w not in bigram.keys():\r\n bigram[pre_w] = {} # building the first words level\r\n bigram[pre_w][w] = 1\r\n else:\r\n if w not in bigram[pre_w].keys():\r\n bigram[pre_w][w] = 1\r\n else:\r\n bigram[pre_w][w] += 1\r\n pre_w = w\r\n pre_w = CKP\r\n\r\n\r\n language_model[\"uni\"] = unigram\r\n bigram.pop(CKP)\r\n bigram.pop(\"SENTEND\")\r\n language_model[\"bi\"] = bigram\r\n\r\n #Save Model\r\n with open(fn_LM+'.pickle', 'wb') as handle:\r\n pickle.dump(language_model, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\n return language_model", "def read_model(self):\n \n # words dictionary\n f = open(self.name + \"_words\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.words = d\n\n # word_lengths dictionary\n f = open(self.name + \"_word_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.word_lengths = d\n\n # stems dictionary\n f = open(self.name + \"_stems\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.stems = d\n\n # sentence_lengths dictionary\n f = open(self.name + \"_sentence_lengths\", 'r') \n d_str = f.read()\n f.close()\n \n d = dict(eval(d_str))\n self.sentence_lengths = d\n\n # ten most common words\n f = open(self.name + \"_common_word\", 'r') \n d_str = f.read()\n f.close()\n \n d = list(eval(d_str))\n self.common_word = d", "def build_model(self, X: pd.DataFrame, y: pd.DataFrame = None) -> pm.Model:\n idx = X.index\n \n if y is None:\n y = pd.Series(0, index=idx)\n elif self.oversample: # only if y is given\n n_pos = (y == 1).sum()\n n_neg = (y == 0).sum()\n to_add = int(np.ceil(n_neg/n_pos) - 1)\n # print(n_pos, n_neg, to_add)\n if to_add > 4:\n to_add = 4\n for i in range(to_add):\n idx = idx.append(y[y==1].index)\n X = X.loc[idx]\n y = y.loc[idx]\n \n A = X[self.v_known + self.v_oob_bio]\n B_vals = X[self.v_fuzzy]\n B_mask = (B_vals == -1).astype(int)\n C_raw = X[self.v_float_adm + self.v_float_bio]\n # C_scaled = (C_raw - self.C_mean_) / self.C_std_ \n C_scaled = np.log1p(C_raw/self.C_mean_)\n C_scaled[~np.isfinite(C_scaled)] = np.nan\n C_vals = C_scaled.fillna(0)\n C_mask = C_scaled.isnull().astype(int)\n \n coords = {\"idx\": idx, \"a\": A.columns, \"b\": B_vals.columns, \"c\": C_vals.columns}\n with pm.Model(coords=coords) as m:\n pm.Data(\"A\", A, dims=[\"idx\", \"a\"])\n pm.Data(\"B_vals\", B_vals, dims=[\"idx\", \"b\"])\n pm.Data(\"B_mask\", B_mask, dims=[\"idx\", \"b\"])\n pm.Data(\"C_vals\", C_vals, dims=[\"idx\", \"c\"])\n pm.Data(\"C_mask\", C_mask, dims=[\"idx\", \"c\"])\n pm.Data(\"y\", y, dims=[\"idx\"])\n\n pm.Normal(\"avg\", mu=0, sd=1)\n\n pm.Beta(\"h_a_incl\", alpha=1, beta=4)\n pm.Normal(\"a_coef_raw\", mu=0, sd=1, dims=[\"a\"])\n pm.Bernoulli(\"a_incl\", p=m[\"h_a_incl\"], dims=[\"a\"])\n pm.Deterministic(\"a_coef\", m['a_coef_raw'] * m['a_incl'], dims=[\"a\"])\n \n pm.Normal(\"b_vals_coef\", mu=0, sd=1, dims=[\"b\"])\n pm.Normal(\"b_mask_coef_raw\", mu=0, sd=1, dims=[\"b\"])\n pm.Beta(\"h_b_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"b_mask_incl\", p=m[\"h_b_mask_incl\"], dims=[\"b\"])\n pm.Deterministic(\"b_mask_coef\", m['b_mask_coef_raw'] * m['b_mask_incl'], dims=[\"b\"])\n \n pm.Normal(\"c_vals_coef\", mu=0, sd=1, dims=[\"c\"])\n pm.Normal(\"c_mask_coef_raw\", mu=0, sd=1, dims=[\"c\"])\n pm.Beta(\"h_c_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"c_mask_incl\", p=m[\"h_c_mask_incl\"], dims=[\"c\"])\n pm.Deterministic(\"c_mask_coef\", m['c_mask_coef_raw'] * m['c_mask_incl'], dims=[\"c\"])\n unprob = pm.Deterministic(\n \"logit\",\n m['avg']\n + tt.dot(m[\"A\"], m[\"a_coef\"])\n + tt.dot(m[\"B_vals\"] * (1 - m['B_mask']), m[\"b_vals_coef\"])\n + tt.dot(m[\"B_mask\"], m[\"b_mask_coef\"])\n + tt.dot(m[\"C_vals\"] * (1 - m['C_mask']), m[\"c_vals_coef\"])\n + tt.dot(m[\"C_mask\"], m[\"c_mask_coef\"])\n )\n pm.Bernoulli(\"y_pred\", p = tt.nnet.sigmoid(unprob), dims=['idx'], observed=m['y'])\n\n m.graph = pm.model_to_graphviz()\n\n return m", "def build_features(self, example):\n context_idxs = np.full([self._para_limit],\n fill_value=self._word_vocab[self._word_vocab.padding_token],\n dtype=np.float32)\n\n ctx_chars_idxs = np.full([self._para_limit, self._char_limit],\n fill_value=self._char_vocab[self._char_vocab.padding_token],\n dtype=np.float32)\n\n ques_idxs = np.full([self._ques_limit],\n fill_value=self._word_vocab[self._word_vocab.padding_token],\n dtype=np.float32)\n\n ques_char_idxs = np.full([self._ques_limit, self._char_limit],\n fill_value=self._char_vocab[self._char_vocab.padding_token],\n dtype=np.float32)\n\n context_len = min(len(example['context_tokens']), self._para_limit)\n context_idxs[:context_len] = self._get_words_emb(example['context_tokens'][:context_len])\n\n ques_len = min(len(example['ques_tokens']), self._ques_limit)\n ques_idxs[:ques_len] = self._get_words_emb(example['ques_tokens'][:ques_len])\n\n for i in range(0, context_len):\n char_len = min(len(example['context_chars'][i]), self._char_limit)\n ctx_chars_idxs[i, :char_len] = self._char_vocab[example['context_chars'][i][:char_len]]\n\n for i in range(0, ques_len):\n char_len = min(len(example['ques_chars'][i]), self._char_limit)\n ques_char_idxs[i, :char_len] = self._char_vocab[example['ques_tokens'][i][:char_len]]\n\n start, end = example['y1s'][-1], example['y2s'][-1]\n\n record = (example['id'],\n example['record_idx'],\n context_idxs,\n ques_idxs,\n ctx_chars_idxs,\n ques_char_idxs,\n start,\n end,\n example['context'],\n example['spans'])\n\n return record", "def read_model(self):\n filename = self.name + '_words'\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n d = dict(eval(d_str))\n self.words = d\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'r') \n d2_str = f.read() \n f.close()\n d2 = dict(eval(d2_str))\n self.word_lengths = d2\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'r') \n d3_str = f.read() \n f.close()\n d3 = dict(eval(d3_str))\n self.stems = d3\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'r') \n d4_str = f.read() \n f.close()\n d4 = dict(eval(d4_str))\n self.sentence_lengths = d4\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'r') \n d5_str = f.read() \n f.close()\n d5 = dict(eval(d5_str))\n self.punctuation = d5", "def train_use_word(self, dataset):\n pred_labels = []\n gold_labels = []\n loss_array = []\n for inputs, labels in tqdm(dataset):\n if self.config.use_gpu:\n inputs = inputs.cuda()\n labels = labels.cuda()\n mask = inputs.ne(0).byte()\n word_mask = mask.reshape(-1, mask.size(2))\n sent_mask = mask.sum(2).ne(0).byte()\n if self.l_embedding is not None:\n label_embedding = torch.tensor(self.l_embedding, dtype = torch.float)\n output, word_weights, sent_weights = self.model(x_word=inputs, word_mask=word_mask,\n sent_mask=sent_mask, label=label_embedding)\n else:\n output, word_weights, sent_weights = self.model(inputs, word_mask, sent_mask)\n if self.config.triplet:\n triplet_loss, triplet_len = self.online_triplet_loss(output, labels)\n output = self.classification_net(output)\n\n result = torch.max(output, 1)[1]\n pred_labels.extend(result.cpu().numpy().tolist())\n gold_labels.extend(labels.cpu().numpy().tolist())\n\n loss = self.criterion(output, labels)\n loss = torch.mean(loss) + triplet_loss\n loss = torch.mean(loss)\n pass\n else:\n output = self.classification_net(output)\n\n result = torch.max(output, 1)[1]\n pred_labels.extend(result.cpu().numpy().tolist())\n gold_labels.extend(labels.cpu().numpy().tolist())\n\n loss = self.criterion(output, labels)\n loss = torch.mean(loss)\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n loss_array.append(loss.cpu().item())\n return loss_array, pred_labels, gold_labels", "def trainingModel4wmd(corpus):\n model = Word2Vec(corpus, workers = nCores, size = 100, window = 300,\n min_count = 2, iter = 250)\n # model = Word2Vec(corpus)\n\n # use the following if we want to normalize the vectors\n model.init_sims(replace=True)\n\n return model", "def initialize_gensim_synset_model_with_dictionary(dictionary, window):\n result = []\n for i, word in enumerate(dictionary):\n result.append([dictionary[word]])\n model = Word2Vec(min_count=1, window=int(window), sorted_vocab=1)\n model.build_vocab(result)\n return model", "def retrain_sub_model(self):\r\n \r\n self.sub_model = self.load_weights_to_sub_model()\r\n X = np.array(self.conv4_characters_list)\r\n X = np.reshape(X, (X.shape[0]*X.shape[1], X.shape[2]))\r\n y = np.repeat(np.arange(1283), 9)\r\n \r\n opt = optimizers.Adam(lr=0.001)\r\n self.sub_model.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])\r\n print(\"***Start to creat new decision model***\")\r\n self.sub_model.fit(X, y, epochs=20)\r\n print(\"***Finish***\")", "def build_input_data_from_word2vec(sentence, word2vec_vocab, word2vec_vec):\n X_data = []\n for word in sentence:\n try:\n word2vec_index = word2vec_vocab[word].index\n word_vector = word2vec_vec[word2vec_index]\n except:\n word2vec_index = word2vec_vocab['<un_known>'].index\n word_vector = word2vec_vec[word2vec_index]\n #word_vector = np.random.uniform(low=-0.25, high=0.25, size=word2vec_vec.shape[1])\n X_data.append(word_vector)\n X_data = np.asarray(X_data)\n return X_data" ]
[ "0.64706546", "0.628739", "0.61955893", "0.6154911", "0.6109166", "0.60060537", "0.5975403", "0.59434366", "0.5934024", "0.588384", "0.58455455", "0.58168155", "0.58109325", "0.57811666", "0.5767697", "0.56962854", "0.56701654", "0.56225514", "0.56121624", "0.5598342", "0.5598342", "0.55950046", "0.55468446", "0.55452704", "0.55203944", "0.55165434", "0.55068994", "0.55055916", "0.5497747", "0.5493409", "0.54776615", "0.54505044", "0.5446643", "0.5442284", "0.54390967", "0.5437491", "0.54306114", "0.54286176", "0.5421789", "0.5405538", "0.5403654", "0.53965133", "0.53890336", "0.538902", "0.53885746", "0.5386732", "0.53720725", "0.53673595", "0.53670275", "0.5359553", "0.53510547", "0.5346899", "0.5344941", "0.5328229", "0.5322742", "0.53191656", "0.53155345", "0.5315319", "0.5314005", "0.5312879", "0.53103435", "0.5308042", "0.5303527", "0.53023636", "0.52985007", "0.52970123", "0.5287681", "0.52731836", "0.52665955", "0.52652985", "0.52516836", "0.5250802", "0.52413315", "0.52404064", "0.5233667", "0.52197033", "0.52168727", "0.5196292", "0.5196133", "0.5194301", "0.5190913", "0.51891136", "0.5183784", "0.5179275", "0.5171996", "0.51692325", "0.51666677", "0.5165432", "0.5163803", "0.5163407", "0.51629657", "0.51626676", "0.51614463", "0.5160957", "0.51591384", "0.5150989", "0.51503605", "0.51450896", "0.51446", "0.51441854", "0.5141087" ]
0.0
-1
Generate scores on features on validation by L2X. Train the L2X model with variational approaches if train = True.
def L2X(train = True): print('Loading dataset...') x_train, y_train, x_val, y_val, id_to_word = load_data() #pred_train = np.load('data/pred_train.npy') #pred_val = np.load('data/pred_val.npy') print('Creating model...') # P(S|X) with tf.variable_scope('selection_model'): X_ph = Input(shape=(maxlen,), dtype='int32') logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups tau = 0.5 T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp) T = Reshape((maxlen, num_groups))(T) T = Permute((2, 1))(T) # bs, num_groups, max_len # q(X_S) with tf.variable_scope('prediction_model'): emb2 = Embedding(max_features, embedding_dims, input_length=maxlen)(X_ph) # emb2 bs, max_len, 50 # apply the matrix trick as before # here the output size of matmul layer is different from before net = matmul_layer([T, emb2]) # bs, num_groups, 50 #print(net.shape) net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1 # net = Mean(net) # bs, 50 input_group = Flatten()(net) # bs, num_groups # num_groups = K.int_shape(input_group)[1] # here we add instance wise f-s again!!!! net = Dense(100, activation='relu', name = 's/dense1', kernel_regularizer=regularizers.l2(1e-3))(input_group) net = Dense(100, activation='relu', name = 's/dense2', kernel_regularizer=regularizers.l2(1e-3))(net) logits = Dense(num_groups)(net) # A tensor of shape, [batch_size, max_sents, 100] samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits) new_input_group = Multiply()([input_group, samples]) net = Dense(hidden_dims, activation='relu')(new_input_group) preds = Dense(2, activation='softmax', name = 'new_dense')(net) model = Model(inputs=X_ph, outputs=preds) model.summary() model.compile(loss='categorical_crossentropy', optimizer='rmsprop',#optimizer, metrics=['acc']) #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1)) #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1)) #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc)) if train: filepath="models/l2x.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] st = time.time() model.fit(x_train, y_train, validation_data=(x_val, y_val), callbacks = callbacks_list, epochs=epochs, batch_size=batch_size) duration = time.time() - st print('Training time is {}'.format(duration)) model.load_weights('models/l2x.hdf5', by_name=True) pred_model = Model(X_ph, [T, samples]) pred_model.summary() pred_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc']) st = time.time() #scores = pred_model.predict(x_val, # verbose = 1, batch_size = batch_size)[:,:,0] #scores = np.reshape(scores, [scores.shape[0], maxlen]) scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size) scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size) return scores_t, group_importances_t, scores_v, group_importances_v, x_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r", "def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)", "def sklearn_train() -> None:\n cross_validate(args=SklearnTrainArgs().parse_args(), train_func=run_sklearn)", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def train(self, X, y, X_cv=None, y_cv=None, verbose=False):\n np.random.seed(seed=self.randomSeed)\n\n n, m = X.shape # n samples, m features\n if self.loss == 'linear':\n w = np.array([np.zeros(m)]).T # dim: m by 1\n elif self.loss == 'logistic':\n w = np.array([np.random.rand(m)]).T / 1 # dim: m by 1\n # w = np.array([np.zeros(m)]).T # dim: m by 1\n elif self.loss == 'perceptron':\n w = np.array([np.random.rand(m)]).T # dim: m by 1\n # w = np.array([np.zeros(m)]).T # dim: m by 1\n elif self.loss == 'svm':\n w = np.array([np.random.rand(m)]).T / 5 # dim: m by 1\n # w = np.array([np.zeros(m)]).T # dim: m by 1\n\n for i in range(1, self.iteration + 1):\n gradient = self.computeGradient(X, y, w)\n w = w - self.learning_rate * gradient / n\n Error, Acc = self.evaluate(X, y, w)\n self.trainError.append(Error)\n self.trainAcc.append(Acc)\n self.w.append(w)\n # evaluate on the cross-validation set\n if self.CV == True:\n tmp_cv_Error, tmp_cv_Acc = self.evaluate(X_cv, y_cv, w)\n self.cvError.append(tmp_cv_Error)\n self.cvAcc.append(tmp_cv_Acc)\n\n # print current process\n if verbose == True and self.showFreq != 0 and i % self.showFreq == 0:\n print(str(i) + \"th Iteration, \", \"Error: \", Error, \" Accuracy : \", Acc)\n if self.CV == True:\n print(\"Cross-Validation: \", \"Error: \", tmp_cv_Error, \" Accuracy : \", tmp_cv_Acc)\n if verbose == True:\n print(\"Reach the Maximum Iteration : \" + str(i) + \"th Iteration\")\n bestError, bestAcc, bestW = self.getBest(\"Accuracy\")\n print(\"Best Training Error: \", bestError, \" Highest Training Accuracy : \", bestAcc)\n if self.CV == True:\n best_cv_Error, best_cv_Acc = self.evaluate(X_cv, y_cv, bestW)\n print(\"Best Development Error: \", best_cv_Error, \" Highest Development Accuracy : \", best_cv_Acc)", "def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def train_model_and_score(X,y_train):\n scaler = MinMaxScaler()\n X_scaled = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n #split train/test\n x_train,x_test,y_train,y_test = train_test_split(X_scaled,y_train,test_size=0.33,random_state =42)\n\n #train\n model.fit(x_train,y_train)\n\n #evaluation\n sc = model.score(x_test,y_test), model.score(x_train,y_train)\n\n print(sc)\n\n return model,sc", "def learn(self, Xtrain, ytrain):", "def my_impl_variational(in_train, in_test, labels):\n X_train = []\n X_test = []\n for lab in labels:\n for datum in in_train[lab]:\n X_train.append([datum, lab])\n for datum in in_test[lab]:\n X_test.append([datum, lab])\n Variationer_learn(X_train, 500, 1, 0.01, X_test, labels)", "def train_model(lrmodel, X, Y, devX, devY, devscores):\n done = False\n best = -1.0\n r = np.arange(1,6)\n \n while not done:\n # Every 100 epochs, check Pearson on development set\n lrmodel.fit(X, Y, verbose=2, shuffle=False, validation_data=(devX, devY))\n yhat = np.dot(lrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n if score > best:\n print score\n best = score\n bestlrmodel = prepare_model(ninputs=X.shape[1])\n bestlrmodel.set_weights(lrmodel.get_weights())\n else:\n done = True\n\n yhat = np.dot(bestlrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n print 'Dev Pearson: ' + str(score)\n return bestlrmodel", "def train(self, dataset): \n dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.\n #print dataset\n \n ########\n # Compute p(y=1) for all ys.\n ########\n label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted. \n self.p_ys = np.log(label_counts * 1.0 / len(dataset)) # Compute probs. \n \n ########\n # Compute p(x|y) for all x,y.\n ########\n self.feature_count = len(dataset[0]) - 1 \n self.class_count = len(label_counts)\n \n self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix\n start_index = 0\n for i in range(self.class_count): # Loop over each class \n end_index = start_index + label_counts[i] # end of this class index \n class_word_counts = np.sum(dataset[start_index:end_index,:-1]) # sum all words of class i \n denominator = class_word_counts + self.alpha * self.feature_count # Here we add the feature_count as Laplace smoothing\n \n for j in range(self.feature_count): # Loop over each feature\n single_word_count = np.sum(dataset[start_index:end_index,j]) # sum number times word j appears in class i \n numerator = single_word_count + self.alpha\n self.p_xi_given_ys[i][j] = log(numerator * 1.0 / denominator) # Compute p(xi|y)\n \n start_index = end_index", "def train_with_validation_provided(self, features, labels, validation_features, validation_labels):\n pass", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def train_with_validation_provided(self, features, labels, val_features, val_labels):\n hist = self.model.fit(\n features, labels, batch_size=self.config['training']['batch_size'],\n epochs=self.config['training']['epochs'],\n validation_data=(val_features, val_labels),\n validation_freq=self.config['training']['validation_frequency'],\n callbacks=[TensorBoard(log_dir=self.config['model']['tensorboard_dir'])])\n return hist", "def train(self, X, y):\n tf.logging.set_verbosity(\n tf.logging.INFO) # comment if you don't want to display the information during training/evaluation\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=self.params[\"TEST_SIZE\"], random_state=42, stratify=y)\n\n self.label_list = y.unique()\n\n train_features = self.sentences_to_features(X_train, y_train)\n test_features = self.sentences_to_features(X_test, y_test)\n if DEBUG:\n print(\"Transformation to features completed\")\n\n num_train_steps = int(\n len(train_features) / self.params[\"BATCH_SIZE\"] * self.params[\"NUM_TRAIN_EPOCHS\"])\n num_warmup_steps = int(\n num_train_steps * self.params[\"WARMUP_PROPORTION\"])\n\n run_config = self.run_config_builder()\n model_fn = self.model_fn_builder(len(self.label_list), self.params[\"LEARNING_RATE\"], num_train_steps,\n num_warmup_steps)\n self.estimator = self.estimator_builder(model_fn, run_config)\n\n train_input_fn = bert.run_classifier.input_fn_builder(features=train_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=True, drop_remainder=False)\n if DEBUG:\n print(\"Beginning Training!\")\n current_time = time.time()\n self.estimator.train(input_fn=train_input_fn,\n max_steps=num_train_steps)\n if DEBUG:\n print(\"Training took time :\", time.time() - current_time,\n \"s, or \", (time.time() - current_time) / 60, \"min\")\n\n self.classifier_trained = True\n\n test_input_fn = run_classifier.input_fn_builder(features=test_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n\n # apply model on test set and print all metrics\n if DEBUG:\n print(\"Evaluating\")\n self.estimator.evaluate(input_fn=test_input_fn, steps=None)", "def train(self, X, y):\n lagrange_multipliers = self._compute_multipliers(X, y)\n return self._construct_predictor(X, y, lagrange_multipliers)", "def fitting_scoring(features, cv=5, verbose=False, is_RFE_mode=False, n_dims_RFE=1):\n # N-fold cross-validation\n num_fold = cv\n accuracy = [0] * num_fold\n for i in range(num_fold):\n training_set = list()\n training_labels = list()\n testing_set = list()\n testing_labels = list()\n for family in features:\n feature_mat = features.get(family)\n if verbose: print(family, \"sample size:\", len(feature_mat))\n\n fold_start = i * int(len(feature_mat) / num_fold)\n fold_end = fold_start + int(len(feature_mat) / num_fold) - 1\n\n # separate training and testing set\n for j in range(len(feature_mat)):\n if fold_start <= j <= fold_end:\n testing_set.append(feature_mat[j])\n testing_labels.append(family)\n else:\n training_set.append(feature_mat[j])\n training_labels.append(family)\n\n p_res = None\n X_new = None\n X_mask = None\n if is_RFE_mode:\n clf = svm.SVC(kernel='linear')\n clf_reduced = RFE(clf, n_dims_RFE, step=1)\n clf_reduced = clf_reduced.fit(training_set, training_labels)\n X_new = clf_reduced.transform(training_set)\n X_mask = clf_reduced.get_support()\n p_res = clf_reduced.predict(testing_set)\n else:\n clf = svm.SVC()\n clf.fit(training_set, training_labels)\n p_res = clf.predict(testing_set)\n\n accuracy[i] = 0\n for j in range(len(p_res)):\n if p_res[j] == testing_labels[j]:\n accuracy[i] += 1\n accuracy[i] = (accuracy[i] / len(p_res)) * 100\n\n if is_RFE_mode:\n if verbose: print('n_dims:', n_dims_RFE, accuracy)\n return np.mean(accuracy), X_new, X_mask\n\n return np.mean(accuracy)", "def run_lgr():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X, axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define CVmodel to manage hyperparameter selection\n cvmodel = CVmodel(LogisticRegressor_skl,\n [1e-6, 1e-5, 1e-4, 1e-3, 1e-2,1e-1,1,10,100,1000], 'C^-1',\n solver = 'lbfgs', max_iter=5000, multi_class='auto')\n\n # define Predictor object to manage nested CV\n lg_predictor = Predictor(cvmodel,scorers.accuracy_scorer)\n\n # cross validate\n lg_cross_validation_scores = \\\n lg_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Logistic Regression cross-validation = {0:.3f}'.format(\n np.mean(lg_cross_validation_scores)))", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def fit(self, X_train, y_train, X_test=None, y_test=None):\n\n self.initialize_weights_and_bias(X_train)\n\n # for progress formatting\n epoch_strlen = len(str(self.epochs))\n self.eval_ = {'cost_train': [], \n 'cost_test': [], \n 'train_preform': [], \n 'valid_preform': [],\n 'train_preform_r2': [], \n 'valid_preform_r2': []}\n\n # iterate over training epochs\n for epoch in range(self.epochs):\n\n # Includes forward + backward prop.\n self._minibatch_sgd( X_train, y_train)\n\n # Evaluation after each epoch during training\n z_h, a_h, z_out, a_out = self._forwardprop(X_train)\n _, _, _, a_out_test = self._forwardprop(X_test)\n\n y_train_pred = self.predict(X_train)\n y_test_pred = self.predict(X_test)\n\n y_test = y_test.reshape((len(y_test),1))\n y_train = y_train.reshape((len(y_train),1))\n\n y_test = standardicing_responce(y_test)\n y_test_pred = standardicing_responce(y_test_pred)\n \n y_train = standardicing_responce(y_train)\n y_train_pred = standardicing_responce(y_train) \n \n train_preform = mean_squared_error(y_train, y_train_pred) \n valid_preform = mean_squared_error(y_test, y_test_pred)\n \n train_preform_r2 = r2_score(y_train, y_train_pred) \n valid_preform_r2 = r2_score(y_test, y_test_pred)\n\n self.eval_['train_preform'].append(train_preform)\n self.eval_['valid_preform'].append(valid_preform)\n self.eval_['train_preform_r2'].append(train_preform_r2)\n self.eval_['valid_preform_r2'].append(valid_preform_r2)\n\n # Calculate the error in the output\n self.model_error = np.subtract(y_train, y_train_pred)\n \n return self", "def train_model(X_train, y_train):\n rgs = linear_model.Lasso(alpha=0.1)\n rgs.fit(X_train, y_train)\n return rgs", "def train(self, dataset): \n dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.\n #print dataset\n \n ########\n # Compute p(y=1) for all ys.\n ########\n label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted. \n self.p_ys = label_counts * 1.0 / len(dataset) # Compute probs. \n \n ########\n # Compute p(x|y) for all x,y.\n ########\n self.feature_count = len(dataset[0]) - 1 \n self.class_count = len(label_counts)\n \n self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix\n start_index = 0\n for i in range(self.class_count): # Loop over each class \n end_index = start_index + label_counts[i] # end of this class index \n denominator = label_counts[i] + 2.0 * self.alpha\n \n for j in range(self.feature_count): # Loop over each feature\n numerator = np.sum(dataset[start_index:end_index,j]) + self.alpha # Sum number of times word j = 1 in class i\n self.p_xi_given_ys[i][j] = numerator * 1.0 / denominator # Compute p(xi|y)\n \n start_index = end_index", "def fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test):\n print(\"**** LINEAR REGRESSION ****\")\n lin_mod = sm.OLS(y_train, sm.add_constant(X_train))\n fit_lin = lin_mod.fit()\n print(fit_lin.summary())\n\n y_pred_test = fit_lin.predict(sm.add_constant(X_test))\n df_test = pd.concat([df[mask_test][['player','wkts','year1_wkts_pm']].reset_index(),\n pd.DataFrame(y_pred_test).reset_index()],axis=1,)\n df_test = df_test.drop('index',axis=1)\n df_test.columns = ['player','wkts','wkts_baseline','wkts_exp']\n\n df_by_player = df_test.groupby('player').sum()\n\n print('Explained Variance (LR model): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Explained Variance (Baseline): ' + str(explained_variance_score(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print('Mean Squared Error (LR model): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_exp)))\n print('Mean Squared Error (Baseline): ' + str(mean_squared_error(df_by_player.wkts,df_by_player.wkts_baseline)))\n print('----')\n print(' ')", "def stage_2_training(solver):\n positive = np.load(\"./processed_data/train/raw/train_p.npy\")\n sub_u_negative = np.load(\"./processed_data/train/sub_u_negative.npy\")\n unlabeled_negative = np.load(\"./processed_data/train/unlabeled_negative.npy\")\n\n # only use the sub-u set for training\n train_p_subu = np.concatenate([positive, sub_u_negative])\n np.random.shuffle(train_p_subu)\n x_train_p_subu = train_p_subu[:, :-1]\n y_train_p_subu = train_p_subu[:, -1]\n classifier = LogisticRegression(solver=solver,\n class_weight='balanced', penalty='l2', max_iter=_max_iteration, C=_l2_coefficient)\n classifier.fit(x_train_p_subu, y_train_p_subu)\n\n image_dir = _stage2_result_path + \"/\" + solver + \"/sub_u/\"\n result_p = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(result_p, bins=_bins_num)\n plt.savefig(image_dir + \"train_positive.png\")\n plt.show()\n result_sub_u = np.array(classifier.predict_proba(sub_u_negative[:, :-1])[:,1])\n plt.hist(result_sub_u, bins=_bins_num)\n plt.savefig(image_dir + \"train_sub_u.png\")\n plt.show()\n model_path = _stage2_result_path + \"/\" + solver + \"/sub_u/logistic.pkl\"\n joblib.dump(classifier, model_path)\n\n # use negative instances from the whole unlabeled set for training\n train_p_unlabeled = np.concatenate([positive, unlabeled_negative])\n np.random.shuffle(train_p_unlabeled)\n x_train_p_unlabeled = train_p_unlabeled[:, :-1]\n y_train_p_unlabeled = train_p_unlabeled[:, -1]\n classifier = LogisticRegression(solver=solver,\n class_weight='balanced', penalty='l2', max_iter=_max_iteration, C=_l2_coefficient)\n classifier.fit(x_train_p_unlabeled, y_train_p_unlabeled)\n result_p = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n image_dir = _stage2_result_path + \"/\" + solver + \"/unlabeled/\"\n plt.hist(result_p, bins=_bins_num)\n plt.savefig(image_dir + \"train_positive.png\")\n plt.show()\n result_unlabeled = np.array(classifier.predict_proba(unlabeled_negative[:, :-1])[:,1])\n plt.hist(result_unlabeled, _bins_num)\n plt.savefig(image_dir + \"train_unlabeled.png\")\n plt.show()\n model_path = _stage2_result_path + \"/\" + solver + \"/unlabeled/logistic.pkl\"\n joblib.dump(classifier, model_path)", "def fit(self, X_train, y_train):\n \n # Number of examples where y = 0,1\n No_y_train_1 = np.sum(y_train)\n No_y_train_0 = y_train.shape[0] - No_y_train_1\n \n #Ratio of Number of examples where y=0,1 and the total number of examples\n self.theta_0 = No_y_train_0/y_train.shape[0]\n self.theta_1 = No_y_train_1/y_train.shape[0]\n \n #Ratio of Number of examples where x_j =1 and y=0,1 and Number of examples where y=0,1 respectively\n No_inst_j1 = X_train.T.dot(y_train.reshape([-1,1])) \n No_inst_j0 = X_train.T.dot(1-y_train.reshape([-1,1]))\n \n #Whether or not laplace smoothing is implemented or not\n if self.l_smooth:\n self.prob1 = (No_inst_j1 + 1)/(No_y_train_1 + 2)\n self.prob0 = (No_inst_j0 + 1)/(No_y_train_0 + 2)\n else:\n self.prob1 = No_inst_j1/No_y_train_1\n self.prob0 = No_inst_j0/No_y_train_0\n \n return self", "def mlr(df, exp_vars, resp_var, \n method='ols', \n fit_intercept=True,\n kcv=3,\n normalize=False):\n from sklearn import cross_validation\n from sklearn.linear_model import LinearRegression, RidgeCV\n from sklearn.linear_model import LassoCV, ElasticNetCV\n from sklearn.metrics import r2_score\n from sklearn.utils import resample\n import matplotlib.pyplot as plt\n import seaborn as sn\n import pandas as pd\n import numpy as np\n \n # Separate data\n X = df[exp_vars]\n y = df[resp_var]\n \n # Setup model\n if method == 'ols':\n model = LinearRegression(fit_intercept=fit_intercept, \n normalize=normalize)\n elif method == 'lasso':\n model = LassoCV(fit_intercept=fit_intercept, \n normalize=normalize, \n max_iter=10000,\n cv=kcv)\n elif method == 'ridge':\n model = RidgeCV(fit_intercept=fit_intercept, \n normalize=normalize, \n alphas=np.logspace(-10, 10, 21))\n elif method == 'el-net':\n model = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],\n fit_intercept=fit_intercept, \n normalize=normalize,\n cv=kcv)\n else:\n raise ValueError('\"method\" parameter must be in [\"ols\", \"lasso\", \"ridge\", \"el-net\"]')\n \n # k-fold cross validation\n #cv_scores = cross_validation.cross_val_score(model, X, y, cv=kcv, scoring='r2')\n #print 'Mean r2 from %s-fold CV: %.3f\\n' % (kcv, cv_scores.mean())\n \n # Train model on full dataset\n model.fit(X, y)\n \n # Get y-hat\n y_pred = model.predict(X)\n \n # r2 based on calibration data\n r2 = r2_score(y, y_pred)\n print 'r2:', r2\n print ''\n \n # Summary of model\n print model\n print ''\n \n if method == 'lasso':\n print 'Lasso alpha:', model.alpha_\n print ''\n elif method == 'ridge':\n print 'Ridge alpha:', model.alpha_\n print ''\n elif method == 'el-net':\n print 'Elastic net alpha:', model.alpha_ \n print 'Elastic net L1 ratio:', model.l1_ratio_ \n print ''\n else: # OLS\n pass\n \n # Plot\n fig = plt.figure(figsize=(15,15))\n \n # Paired points for each site\n ax1 = plt.subplot2grid((2,2), (0,0), colspan=2)\n ax1.plot(range(0, len(X.index)), y, 'ro', label='Observed')\n ax1.plot(range(0, len(X.index)), y_pred, 'b^', label='Modelled')\n \n ax1.set_xticks(range(0, len(X.index)))\n ax1.set_xticklabels(X.index, rotation=90, fontsize=12)\n ax1.set_xlim(0, len(X.index)-1)\n \n ax1.set_xlabel('Site code', fontsize=16)\n ax1.set_ylabel(resp_var)\n ax1.set_title('Points paired for each location', fontsize=20)\n ax1.legend(loc='best', fontsize=16)\n \n # Modelled versus observed\n ax2 = plt.subplot2grid((2,2), (1,0), colspan=1)\n ax2.plot(y, y_pred, 'ro')\n ax2.set_xlabel('Observed', fontsize=16)\n ax2.set_ylabel('Modelled', fontsize=16)\n ax2.set_title('Modelled versus observed', fontsize=20)\n \n # Hist of residuals\n ax3 = plt.subplot2grid((2,2), (1,1), colspan=1)\n sn.distplot(y - y_pred, kde=True, ax=ax3)\n ax3.set_title('Histogram of residuals', fontsize=20)\n \n plt.tight_layout()\n \n # Get param estimates\n params = pd.Series(model.coef_, index=X.columns)\n\n # Estimate confidence using bootstrap\n # i.e. what is the std. dev. of the estimates for each parameter\n # based on 1000 resamplings\n err = np.std([model.fit(*resample(X, y)).coef_ for i in range(1000)], \n axis=0)\n\n # Build df\n res = pd.DataFrame({'effect':params,\n 'error':2*err})\n\n # Rough indicator of significance: are the estimated values more than\n # 2 std. devs. from 0 (~95% CI?). NB: this assumnes the \"marginal posterior\" \n # is normal, which I haven't tested for and which quite possibly isn't true\n # - use with care! \n res['signif'] = np.abs(res['effect']) > res['error']\n \n return res", "def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def cross_validation(self, x, t):\n # Initialize accuracy / hyperparameters\n best_accuracy = 0.0\n best_reg = 0.0\n\n # Cross-validation 80-20\n N = x.shape[0]\n N_train = int(math.floor(0.8 * N))\n\n # Initialize the grid search hyperparameters\n min_reg = 0.001\n max_reg = 1000\n log_min_reg = np.log(min_reg)\n log_max_reg = np.log(max_reg)\n reg_list = np.logspace(log_min_reg, log_max_reg, num=7, base=math.e)\n\n for reg in reg_list:\n accuracy = np.zeros((self.k_fold))\n for i in range(self.k_fold):\n map_index = list(zip(x, t))\n random.shuffle(map_index)\n random_x, random_t = zip(*map_index)\n\n train_x = random_x[:N_train]\n valid_x = random_x[N_train:]\n train_t = random_t[:N_train]\n valid_t = random_t[N_train:]\n\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=reg, max_iter=1000, \n random_state=self.random_state)\n self.train(train_x, train_t)\n accuracy[i] = self.model.score(valid_x, valid_t)\n\n mean_accuracy = np.mean(accuracy)\n # print(mean_accuracy)\n if mean_accuracy > best_accuracy:\n best_accuracy = mean_accuracy\n best_reg = reg\n print(\"The new best hyperparameters are : \", best_reg)\n\n print(\"Best hyperparameters are : \", best_reg)\n print(\"Valid Accuracy :\", best_accuracy)\n self.reg = best_reg\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=best_reg, max_iter=1000, \n random_state=self.random_state)\n self.train(x, t)", "def train(self, epoch, evaluate=True):\n\n # training data\n start = time.time()\n train_stats = self.compute_epoch(self.training_data, False)\n train_loss, train_acc = train_stats\n\n self.train_losses.append(train_loss)\n self.train_accs.append(train_acc)\n\n if self.opt.verbose:\n print(' - (Training) perplexity: {perplexity: 8.5f}, accuracy: {accu:3.3f} %, '\n 'elapse: {elapse:3.3f} min'.format(\n perplexity=math.exp(min(train_loss, 100)), accu=100*train_acc,\n elapse=(time.time()-start)/60))\n\n if evaluate:\n # validation data\n with torch.no_grad():\n valid_stats = self.compute_epoch(self.validation_data, True)\n valid_loss, valid_acc = valid_stats\n\n self.valid_losses.append(valid_loss)\n self.valid_accs.append(valid_acc)\n\n if self.opt.verbose:\n print(' - (Validation) perplexity: {perplexity: 8.5f}, accuracy: {accu:3.3f} %, '\n 'elapse: {elapse:3.3f} min'.format(\n perplexity=math.exp(min(valid_loss, 100)), accu=100*valid_acc,\n elapse=(time.time()-start)/60))\n\n return self", "def train_val_training(X_train, y_train, model):\n # set pach where trained models will be saved to \n savepath = Path('/home/kwaygo/Documents/NUS/SPH6004/P2/SPH6004_P2/models/Regression')\n checkpoint_name = os.path.join(savepath, 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5' ) \n # define callbacks\n cp = ModelCheckpoint(checkpoint_name, monitor='val_loss', verbose = 1, save_best_only = True, mode ='auto')\n es = EarlyStopping(monitor='val_loss', patience= 4, verbose=1)\n callbacks_list = [es, cp]\n # start training\n hist = model.fit(X_train, y_train, epochs=500, batch_size=500, validation_split = 0.2, callbacks=callbacks_list) \n \n print(\"[INFO] avg. ICU LOS of train set: {}, std ICU LOS of test set: {}\".format(np.mean(y_train), np.std(y_train)))\n # plot training History \n plotHist(hist)\n return model", "def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None,\r\n plot_feature_importance=False, model=None,\r\n verbose=10000, early_stopping_rounds=200, n_estimators=50000):\r\n columns = X.columns if columns is None else columns\r\n X_test = X_test[columns]\r\n\r\n # to set up scoring parameters\r\n metrics_dict = {'mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'sklearn_scoring_function': metrics.mean_absolute_error},\r\n 'group_mae': {'lgb_metric_name': 'mae',\r\n 'catboost_metric_name': 'MAE',\r\n 'scoring_function': group_mean_log_mae},\r\n 'mse': {'lgb_metric_name': 'mse',\r\n 'catboost_metric_name': 'MSE',\r\n 'sklearn_scoring_function': metrics.mean_squared_error}\r\n }\r\n\r\n result_dict = {}\r\n\r\n # out-of-fold predictions on train data\r\n oof = np.zeros(len(X))\r\n\r\n # averaged predictions on train data\r\n prediction = np.zeros(len(X_test))\r\n\r\n # list of scores on folds\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n\r\n # split and train on folds\r\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\r\n print(f'Fold {fold_n + 1} started at {time.ctime()}')\r\n if type(X) == np.ndarray:\r\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\r\n y_train, y_valid = y[train_index], y[valid_index]\r\n else:\r\n X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]\r\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\r\n\r\n if model_type == 'lgb':\r\n model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)\r\n model.fit(X_train, y_train,\r\n eval_set=[(X_train, y_train), (X_valid, y_valid)],\r\n eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],\r\n verbose=verbose, early_stopping_rounds=early_stopping_rounds)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test, num_iteration=model.best_iteration_)\r\n\r\n if model_type == 'xgb':\r\n train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)\r\n valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)\r\n\r\n watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]\r\n model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,\r\n verbose_eval=verbose, params=params)\r\n y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns),\r\n ntree_limit=model.best_ntree_limit)\r\n y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)\r\n\r\n if model_type == 'sklearn':\r\n model = model\r\n model.fit(X_train, y_train)\r\n\r\n y_pred_valid = model.predict(X_valid).reshape(-1, )\r\n score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)\r\n print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')\r\n print('')\r\n\r\n y_pred = model.predict(X_test).reshape(-1, )\r\n\r\n if model_type == 'cat':\r\n model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'],\r\n **params,\r\n loss_function=metrics_dict[eval_metric]['catboost_metric_name'])\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True,\r\n verbose=False)\r\n\r\n y_pred_valid = model.predict(X_valid)\r\n y_pred = model.predict(X_test)\r\n\r\n oof[valid_index] = y_pred_valid.reshape(-1, )\r\n if eval_metric != 'group_mae':\r\n scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))\r\n else:\r\n scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type']))\r\n\r\n prediction += y_pred\r\n\r\n if model_type == 'lgb' and plot_feature_importance:\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n fold_importance[\"fold\"] = fold_n + 1\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n\r\n prediction /= folds.n_splits\r\n\r\n print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))\r\n\r\n result_dict['oof'] = oof\r\n result_dict['prediction'] = prediction\r\n result_dict['scores'] = scores\r\n\r\n # if model_type == 'lgb':\r\n # if plot_feature_importance:\r\n # feature_importance[\"importance\"] /= folds.n_splits\r\n # cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n # by=\"importance\", ascending=False)[:50].index\r\n #\r\n # best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n #\r\n # plt.figure(figsize=(16, 12));\r\n # sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n # plt.title('LGB Features (avg over folds)');\r\n #\r\n # result_dict['feature_importance'] = feature_importance\r\n\r\n return result_dict", "def test(self, verbose=False):\n\t\tif not self.trained: self.train()\n\t\tloss = self.compute_loss(self.w0, self.w, 'test')\n\t\tprint('Mean log loss of TEST data:', loss)", "def trainModel( self, featureTrain, classTrain):", "def fit(self, X_train, y_train):\n if X_train.shape[0] != y_train.shape[0]:\n raise ValueError\n self._fitted = True\n X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))\n num_measures = X_train.shape[0]\n num_features = X_train.shape[1]\n indices = np.arange(num_measures)\n self.theta = np.zeros(num_features)\n vt = np.zeros_like(self.theta)\n for i in range(self.max_iter):\n np.random.shuffle(indices)\n X_batch = np.take(X_train, indices[:self.batch_size], axis=0)\n y_batch = np.take(y_train, indices[:self.batch_size])\n y_proba = self.predict_proba(X_batch)\n vt = self.gamma * vt + self.lambda_coef * self.grad(X_batch, y_batch)\n self.theta -= vt", "def crossValidationKfold(automodel, \r\n X, y,\r\n params_automl : dict = {},\r\n score_function = accuracy_score,\r\n cv : int = 3,\r\n shuffle: bool = True,\r\n verbose : bool = True,\r\n allmetrics: bool = False):\r\n if(isinstance(X, pd.DataFrame) or isinstance(y, pd.DataFrame)):\r\n X = X.values\r\n y = y.values\r\n skf = StratifiedKFold(n_splits = cv, \r\n shuffle = shuffle, \r\n random_state = 42)\r\n if(allmetrics):\r\n train_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n test_scores = {'accuracy' : [], \r\n 'roc_auc': [], \r\n 'f1' : [], \r\n 'recall' : [], \r\n 'precision': []}\r\n else:\r\n train_scores = np.empty((cv, ))\r\n test_scores = np.empty((cv, ))\r\n for idx, (idx_tr, idx_ts) in enumerate(skf.split(X, y)):\r\n X_tr, X_ts = X[idx_tr], X[idx_ts]\r\n y_tr, y_ts = y[idx_tr], y[idx_ts] \r\n am = automodel(**params_automl)\r\n am.fit(X_tr, y_tr)\r\n if(not allmetrics):\r\n \r\n train_scores[idx] = score_function(am.predict(X_tr), y_tr)\r\n test_scores[idx] = score_function(am.predict(X_ts), y_ts)\r\n if(verbose):\r\n print('it: {} train score: {:.3f}, val score: {:.3f}'.format(idx, \r\n train_scores[idx],\r\n test_scores[idx]))\r\n else:\r\n train_current = {}\r\n test_current = {}\r\n for name, metric in all_metrics_classifications.items():\r\n train_current[name] = metric(am.predict(X_tr), y_tr)\r\n test_current[name] = metric(am.predict(X_ts), y_ts)\r\n train_scores[name].append(train_current[name])\r\n test_scores[name].append(test_current[name])\r\n \r\n if(verbose):\r\n print('it: {} train scores: {}, val scores: {}'.format(idx, train_current,\r\n test_current))\r\n\r\n if(not allmetrics):\r\n return test_scores.mean(), test_scores.std()\r\n else:\r\n # -- calculate means of all metrics-- #\r\n return dict(map(lambda kv: (kv[0], np.asarray(kv[1]).mean()), test_scores.items()))", "def run(test_xs=None, test_ys=None, num_samples=10000, verbose=True):\n\n # Data\n (train_xs, train_ys), (val_xs, val_ys) = _get_review_data(path=\"../data/review_10k.csv\", num_samples=num_samples)\n if verbose:\n print(\"\\n[Example of xs]: [\\\"{}...\\\", \\\"{}...\\\", ...]\\n[Example of ys]: [{}, {}, ...]\".format(\n train_xs[0][:70], train_xs[1][:70], train_ys[0], train_ys[1]))\n print(\"\\n[Num Train]: {}\\n[Num Test]: {}\".format(len(train_ys), len(val_ys)))\n\n # Create bow representation of train set\n my_vocab, train_bows = create_bow(train_xs, msg_prefix=\"\\n[Train]\")\n assert isinstance(my_vocab, dict)\n assert isinstance(train_bows, list) or isinstance(train_bows, np.ndarray) or isinstance(train_bows, tuple)\n if verbose:\n print(\"\\n[Vocab]: {} words\".format(len(my_vocab)))\n\n # You can see hyper-parameters (train_kwargs) that can be tuned in the document below.\n # https://scikit-learn.org/stable/modules/classes.html.\n #train_kwargs = dict(verbose=1, penalty='l2', solver=\"saga\") # liblinear, saga\n #clf = LogisticRegression(**train_kwargs)\n clf = MLPClassifier(activation='relu', solver='adam', hidden_layer_sizes=(3,), verbose=1, alpha=0.01, tol=0.001)\n clf.fit(train_bows, train_ys)\n assert hasattr(clf, \"predict\")\n # Create bow representation of validation set\n _, val_bows = create_bow(val_xs, vocab=my_vocab, msg_prefix=\"\\n[Validation]\")\n\n # Evaluation\n val_preds = clf.predict(val_bows)\n val_accuracy = accuracy_score(val_ys, val_preds)\n if verbose:\n print(\"\\n[Validation] Accuracy: {}\".format(val_accuracy))\n _get_example_of_errors(val_xs, val_preds, val_ys)\n\n # Grading: Do not modify below lines.\n if test_xs is not None:\n _, test_bows = create_bow(test_xs, vocab=my_vocab, msg_prefix=\"\\n[Test]\")\n test_preds = clf.predict(test_bows)\n return {\"clf\": clf, \"val_accuracy\": val_accuracy, \"test_accuracy\": accuracy_score(test_ys, test_preds)}\n else:\n return {\"clf\": clf}", "def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time", "def train_model(X_train, y_train, X_valid, y_valid, params=None, model_type='lgb', \r\n model_path_name='lgb', plot_feature_importance=False, model=None):\r\n def lgb_f1_score(y_true, y_pred):\r\n y_pred = np.round(y_pred)\r\n return 'f1', f1_score(y_true, y_pred), True\r\n\r\n scores = []\r\n feature_importance = pd.DataFrame()\r\n print('Started at', time.ctime())\r\n \r\n \r\n if model_type == 'lgb':\r\n \r\n model = lgb.LGBMClassifier(**params, n_estimators=50000, n_jobs=-1)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), \r\n eval_metric=lgb_f1_score, early_stopping_rounds=300)\r\n \r\n y_pred_valid = model.predict(X_valid)\r\n \r\n if model_type == 'cat':\r\n model = cb.CatBoost(iterations=20000, **params)\r\n model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False)\r\n y_pred_valid = model.predict(X_valid)\r\n\r\n #save the model\r\n joblib.dump(model, model_path_name)\r\n \r\n scores.append(f1_score(y_valid, y_pred_valid)) \r\n \r\n if model_type == 'lgb':\r\n # feature importance\r\n fold_importance = pd.DataFrame()\r\n fold_importance[\"feature\"] = X_train.columns\r\n fold_importance[\"importance\"] = model.feature_importances_\r\n feature_importance = pd.concat([feature_importance, fold_importance], axis=0)\r\n \r\n print('score: {0:.4f}.'.format(np.mean(scores)))\r\n\r\n if model_type == 'lgb':\r\n feature_importance[\"importance\"]\r\n if plot_feature_importance:\r\n cols = feature_importance[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(\r\n by=\"importance\", ascending=False)[:50].index\r\n\r\n best_features = feature_importance.loc[feature_importance.feature.isin(cols)]\r\n\r\n #sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False));\r\n \r\n return feature_importance, np.mean(scores)\r\n return np.mean(scores)\r\n \r\n else:\r\n return np.mean(scores)", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def train_model(algorithm, X_train, y_train, X_test, y_test, cv_type='rand', transformation_type='tf'):\n \n model = algorithm(X_train, y_train, cv_type=cv_type)\n model_preds = model.predict(X_test)\n model_score = f1_score(y_test, model_preds, average='weighted')\n \n return model, model_score, transformation_type", "def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]", "def train(self, x, y):\n try:\n t_start = time.time()\n self.managerlogger.logger.info(\"start lr..\")\n self._model.fit(x, y)\n self.managerlogger.logger.info(\"finished lr!\")\n t_end = time.time()\n self.managerlogger.logger.info(\"lr train time: %s\" % (t_end - t_start))\n return runstatus.RunStatus.SUCC\n except Exception as err:\n self.managerlogger.logger.error(\"lr train error: %s \" % err)\n self.errorlogger.logger.error(\"lr train error:\\n %s \" % traceback.format_exc())\n return runstatus.RunStatus.FAILED", "def train_standard(self, X, y, lambd, n_iter):\n self.n_rows = X.shape[0]\n if lambd == 0:\n # lambd = TOLERANCE\n logging.warning(\"calling regularization with zero lambda\")\n self.n = X.shape[1]\n theta = np.array([np.random.normal() for j in xrange(0, self.n)])\n prev_error = 0\n denom = [np.linalg.norm(X[:, k], 2)**2 for k in xrange(0, len(theta))]\n for i in xrange(1, n_iter):\n ind = np.ones(len(theta), dtype=bool)\n for k in xrange(0, len(theta)):\n ind[k] = False\n r = y - np.dot(X[:, ind], theta[ind])\n num = np.dot(np.transpose(X[:, k]), r)\n if denom[k] == 0:\n theta[k] = 0\n continue\n temp = num / denom[k]\n theta[k] = self.soft_threshold(temp, (2.0*self.alpha*lambd)/denom[k])\n ind[k] = True\n errors1 = y - np.dot(X, theta)\n train_error = np.sqrt(1/(1.0*len(errors1))*sum(np.square(errors1)))\n if abs(prev_error - train_error) < TOLERANCE:\n logging.info(\"converged at iteration %s\", i)\n break\n else:\n prev_error = train_error\n return theta", "def train_and_score_pipeline(pipeline, automl, full_X_train, full_y_train):\n start = time.time()\n cv_data = []\n logger.info(\"\\tStarting cross validation\")\n X_pd = _convert_woodwork_types_wrapper(full_X_train.to_dataframe())\n y_pd = _convert_woodwork_types_wrapper(full_y_train.to_series())\n y_pd_encoded = y_pd\n # Encode target for classification problems so that we can support float targets. This is okay because we only use split to get the indices to split on\n if is_classification(automl.problem_type):\n y_mapping = {original_target: encoded_target for (encoded_target, original_target) in enumerate(y_pd.value_counts().index)}\n y_pd_encoded = y_pd.map(y_mapping)\n for i, (train, valid) in enumerate(automl.data_splitter.split(X_pd, y_pd_encoded)):\n if pipeline.model_family == ModelFamily.ENSEMBLE and i > 0:\n # Stacked ensembles do CV internally, so we do not run CV here for performance reasons.\n logger.debug(f\"Skipping fold {i} because CV for stacked ensembles is not supported.\")\n break\n logger.debug(f\"\\t\\tTraining and scoring on fold {i}\")\n X_train, X_valid = full_X_train.iloc[train], full_X_train.iloc[valid]\n y_train, y_valid = full_y_train.iloc[train], full_y_train.iloc[valid]\n if is_binary(automl.problem_type) or is_multiclass(automl.problem_type):\n diff_train = set(np.setdiff1d(full_y_train.to_series(), y_train.to_series()))\n diff_valid = set(np.setdiff1d(full_y_train.to_series(), y_valid.to_series()))\n diff_string = f\"Missing target values in the training set after data split: {diff_train}. \" if diff_train else \"\"\n diff_string += f\"Missing target values in the validation set after data split: {diff_valid}.\" if diff_valid else \"\"\n if diff_string:\n raise Exception(diff_string)\n objectives_to_score = [automl.objective] + automl.additional_objectives\n cv_pipeline = None\n try:\n logger.debug(f\"\\t\\t\\tFold {i}: starting training\")\n cv_pipeline = EngineBase.train_pipeline(pipeline, X_train, y_train, automl.optimize_thresholds, automl.objective)\n logger.debug(f\"\\t\\t\\tFold {i}: finished training\")\n if automl.optimize_thresholds and pipeline.can_tune_threshold_with_objective(automl.objective) and automl.objective.can_optimize_threshold:\n logger.debug(f\"\\t\\t\\tFold {i}: Optimal threshold found ({cv_pipeline.threshold:.3f})\")\n logger.debug(f\"\\t\\t\\tFold {i}: Scoring trained pipeline\")\n scores = cv_pipeline.score(X_valid, y_valid, objectives=objectives_to_score)\n logger.debug(f\"\\t\\t\\tFold {i}: {automl.objective.name} score: {scores[automl.objective.name]:.3f}\")\n score = scores[automl.objective.name]\n except Exception as e:\n if automl.error_callback is not None:\n automl.error_callback(exception=e, traceback=traceback.format_tb(sys.exc_info()[2]), automl=automl,\n fold_num=i, pipeline=pipeline)\n if isinstance(e, PipelineScoreError):\n nan_scores = {objective: np.nan for objective in e.exceptions}\n scores = {**nan_scores, **e.scored_successfully}\n scores = OrderedDict({o.name: scores[o.name] for o in [automl.objective] + automl.additional_objectives})\n score = scores[automl.objective.name]\n else:\n score = np.nan\n scores = OrderedDict(zip([n.name for n in automl.additional_objectives], [np.nan] * len(automl.additional_objectives)))\n\n ordered_scores = OrderedDict()\n ordered_scores.update({automl.objective.name: score})\n ordered_scores.update(scores)\n ordered_scores.update({\"# Training\": y_train.shape[0]})\n ordered_scores.update({\"# Validation\": y_valid.shape[0]})\n\n evaluation_entry = {\"all_objective_scores\": ordered_scores, \"score\": score, 'binary_classification_threshold': None}\n if is_binary(automl.problem_type) and cv_pipeline is not None and cv_pipeline.threshold is not None:\n evaluation_entry['binary_classification_threshold'] = cv_pipeline.threshold\n cv_data.append(evaluation_entry)\n training_time = time.time() - start\n cv_scores = pd.Series([fold['score'] for fold in cv_data])\n cv_score_mean = cv_scores.mean()\n logger.info(f\"\\tFinished cross validation - mean {automl.objective.name}: {cv_score_mean:.3f}\")\n return {'cv_data': cv_data, 'training_time': training_time, 'cv_scores': cv_scores, 'cv_score_mean': cv_score_mean}", "def train():\n # YOUR TRAINING CODE GOES HERE", "def eval_additional_scores(self, **kwargs):\n self.model.eval()\n self.likelihood.eval()\n\n X_train_torch = torch.from_numpy(kwargs[\"X_train\"]).to(self.device)\n y_train_torch = torch.from_numpy(kwargs[\"y_train\"]).to(self.device)\n mll = gpytorch.mlls.VariationalELBO(self.likelihood, self.model, num_data=y_train_torch.numel())\n\n with torch.no_grad(), gpytorch.settings.num_likelihood_samples(self.num_likelihood_samples):\n f_pred = self.model(X_train_torch)\n elbo = mll(f_pred, y_train_torch).item()\n\n return {\n \"elbo\": elbo\n }", "def train(self):\n not_improved_count = 0\n best_validation_fscore = 0.0\n\n for epoch in range(self.start_epoch, self.max_epochs + 1):\n # Perform one training epoch and output training metrics\n training_metrics = self.run_epoch(epoch, self.train_data_loader, training=True)\n self.logger.info(\"Training epoch {} finished.\".format(epoch))\n self.log_metrics(training_metrics)\n\n # Perform one validation epoch and output validation metrics\n validation_metrics = self.run_epoch(epoch, self.valid_data_loader, training=False)\n self.logger.info(\"Validation epoch {} finished.\".format(epoch))\n self.log_metrics(validation_metrics)\n\n # Check if model is new best according to validation F1 score\n improved = validation_metrics[\"fscore\"] > best_validation_fscore\n if improved:\n best_validation_fscore = validation_metrics[\"fscore\"]\n not_improved_count = 0\n else:\n not_improved_count += 1\n\n if improved or epoch % self.save_period == 0:\n self._save_checkpoint(epoch, is_best=improved)\n\n if not_improved_count > self.early_stop and epoch >= self.min_epochs:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs. \"\n \"Training stops.\".format(self.early_stop))\n break", "def train(models, X_train, y_train, X_test, y_test):\n \n # Train and test each model in a for lop\n accuracies = []\n \n for model in models:\n clf = model.fit(X_train, y_train) # Train\n score = clf.score(X_test, y_test) # Test\n accuracies.append(score)\n\n return accuracies", "def fit(self, X_train, y_train, lr, epochs, batch_size, X_val, y_val, optimiser='sga',\n visualise_training=False, avg_ll=False):\n assert optimiser in ['sga', 'newton'], \"Invalid optimiser!\"\n\n X_orig_train = X_train\n X_orig_val = X_val\n\n if self.basis_function is not None:\n X_train = self.basis_function(X_train, *self.basis_function_args)\n X_val = self.basis_function(X_val, *self.basis_function_args)\n\n X_train = ones_for_bias_trick(X_train)\n X_val = ones_for_bias_trick(X_val)\n\n weights = np.random.randn(X_train.shape[1]) * 0.5\n train_log_likelihoods = []\n test_log_likelihoods = []\n train_accs = []\n test_accs = []\n steps_per_epoch = math.ceil(X_train.shape[0]/batch_size)\n\n for epoch in range(epochs):\n print(\"Epoch:\", epoch)\n\n for step in range(steps_per_epoch):\n X_batch, y_batch = self.create_batches(X_train, y_train, batch_size, step)\n gradient = self.compute_gradient(X_batch, y_batch, weights)\n weights = weights + lr * gradient\n\n self.weights = weights\n\n if visualise_training:\n train_log_likelihoods.append(self.compute_log_likelihood(X_train,\n y_train,\n weights,\n avg=avg_ll))\n test_log_likelihoods.append(self.compute_log_likelihood(X_val,\n y_val,\n weights,\n avg=avg_ll))\n train_accs.append(self.compute_accuracy(X_orig_train, y_train))\n test_accs.append(self.compute_accuracy(X_orig_val, y_val))\n\n if visualise_training:\n plt.figure(1)\n plt.plot(np.arange(1, epochs+1), train_log_likelihoods, label='Training')\n plt.plot(np.arange(1, epochs+1), test_log_likelihoods, label='Test')\n plt.legend()\n plt.show()\n\n plt.figure(2)\n plt.plot(np.arange(1, epochs+1), train_accs, label='Training')\n plt.plot(np.arange(1, epochs + 1), test_accs, label='Test')\n plt.legend()\n plt.show()", "def fit(self, train_data_tuple, valid_data_tuple=None):\n n_total = train_data_tuple[0].size\n batch_loader = RatingsMiniBatchIterator(\n *train_data_tuple,\n batch_size=self.batch_size,\n random_state=self.random_state)\n\n self.trace_epoch = []\n self.trace_loss = []\n self.trace_smooth_loss = []\n self.trace_mae_train = []\n self.trace_mae_valid = []\n\n self.all_loss = []\n\n ## Store list of L1 gradient norms for each parameter\n self.trace_norm_per_param = dict()\n for key in self.param_dict.keys():\n self.trace_norm_per_param[key] = list()\n self.trace_smooth_norm_per_param = dict()\n for key in self.param_dict.keys():\n self.trace_smooth_norm_per_param[key] = list()\n\n for epoch_count in range(self.n_epochs): \n epoch = 1.0 * epoch_count\n batch_loader.shuffle()\n\n for i, batch_tuple in enumerate(batch_loader):\n\n ## Compute loss and gradient\n # loss : scalar float\n # grad_dict : dict\n # Keys are string names of individual parameters\n # Values are autograd-generated numpy arrays\n loss, grad_dict = self.calc_loss_and_grad_wrt_parameter_dict(\n self.param_dict, batch_tuple)\n\n ## Rescale loss and gradient vectors\n # So we always estimate the *per-example loss*\n n_per_batch = batch_tuple[0].size\n scale = 1.0 / n_per_batch\n loss *= scale\n for key, arr in grad_dict.items():\n arr *= scale\n self.all_loss.append(loss)\n\n ## Periodically report progress to stdout\n ## & write to internal state attributes: self.trace_*\n do_report_now = self.check_if_report_progress_now(\n epoch_count, self.n_epochs, i, batch_loader.n_batches)\n if do_report_now:\n self.trace_epoch.append(epoch)\n self.trace_loss.append(loss)\n\n # Compute MAE/MSE metrics on training and validation data\n train_perf_dict = self.evaluate_perf_metrics(*train_data_tuple)\n valid_perf_dict = self.evaluate_perf_metrics(*valid_data_tuple)\n self.trace_mae_train.append(train_perf_dict['mae'])\n self.trace_mae_valid.append(valid_perf_dict['mae'])\n\n # Compute 'smoothed' loss by averaging over last B batches\n # Might remove some of the stochasticity in using only the\n # loss from most recent batch.\n smooth_loss = np.mean(self.all_loss[-batch_loader.n_batches:])\n self.trace_smooth_loss.append(smooth_loss)\n\n # Compute L1 norm of gradient of each parameter\n avg_grad_norm_str_list = []\n for key, arr in grad_dict.items():\n norm = np.mean(np.abs(arr))\n self.trace_norm_per_param[key].append(norm)\n cur_norm_str = \"grad_wrt_%s %11.5f\" % (key, norm)\n avg_grad_norm_str_list.append(cur_norm_str)\n avg_grad_norm_str = ' | '.join(avg_grad_norm_str_list)\n\n print(\"epoch %11.3f | loss_total % 11.5f | train_MAE % 11.5f | valid_MAE % 11.5f | %s\" % (\n epoch, loss if epoch <= 2 else smooth_loss,\n train_perf_dict['mae'], valid_perf_dict['mae'],\n avg_grad_norm_str))\n\n ## Update each parameter by taking step in direction of gradient\n epoch += n_per_batch / n_total \n for key, arr in self.param_dict.items():\n arr[:] = arr - self.step_size * grad_dict[key]\n\n # That's all folks.", "def relabelling(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gammma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn pre training model:\")\n clf1.fit(X_train, y_train)\n if run == 1:\n print(\"calculating weighting and fit final model:\")\n bb = clf1.predict_proba(X_train)\n nn = len(y_train)\n ind = np.where(abs(bb[:, 1] - y_train) >= 0.5)\n y_train[ind] = 1 - y_train[ind]\n ind_p = int(nn / 3)\n ind5 = np.hstack((np.argsort(-bb[:, 1])[0:ind_p], np.argsort(-bb[:, 0])[0:ind_p]))\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, max_iter=max_itera)\n clf2.fit(X_train[ind5, :], y_train[ind5])\n return clf2.score(Xts, Yts)", "def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\": 0, \"tol\": 1e-5})]\n\n # Add polynomial features\n poly = preprocessing.PolynomialFeatures(2)\n\n # scaler\n scaler = preprocessing.StandardScaler().fit(train)\n\n print(\"Use linear models with linear features\")\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")\n\n print(\"Use linear models with polynomial features\")\n train = poly.fit_transform(train)\n test = poly.transform(test)\n scaler = preprocessing.StandardScaler().fit(train)\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")", "def house_prices_mlp(args: Args) -> Tuple[Score, Args]:\n (\n test_data_full,\n train_transformed,\n eval_transformed,\n test_transformed,\n cardinalities,\n ) = house_prices_data()\n train_dataset = get_dataset(\n x_num=train_transformed.X_num,\n x_cat=train_transformed.X_cat.astype(int),\n y_data=train_transformed.y,\n batch_size=args.batch_size,\n buffer_size=len(train_transformed.y),\n single_batch=args.single_batch,\n )\n eval_dataset = get_dataset(\n x_num=eval_transformed.X_num,\n x_cat=eval_transformed.X_cat.astype(int),\n y_data=eval_transformed.y,\n batch_size=len(eval_transformed.y),\n buffer_size=len(eval_transformed.y),\n single_batch=False,\n )\n model = CustomMLP(\n layer_sizes=[args.hidden_size for _ in range(args.n_layers)] + [1],\n vocab_sizes=[card + 1 for card in cardinalities],\n embed_size=args.embed_size,\n dropout_rate=args.dropout_rate,\n dropout=args.dropout_enabled,\n bias=train_transformed.y.mean(),\n batch_norm=args.batch_norm,\n residuals=args.resnet,\n )\n trained_params, eval_loss = train(\n rng=random.PRNGKey(12345),\n model=model,\n optimizer=optax.adamw(\n args.lr,\n weight_decay=args.decay_rate,\n mask=lambda params: jax.tree_map(lambda x: x.ndim > 1, params),\n )\n if args.weight_decay\n else optax.adam(args.lr),\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n num_epochs=args.n_epochs,\n cat_input_shape=(args.batch_size, train_transformed.X_cat.shape[1],),\n num_input_shape=(args.batch_size, train_transformed.X_num.shape[1],),\n hist_every=1,\n print_every=args.print_every,\n )\n print(f\"Evaluation RMSE after training: {eval_loss:.3f}\")\n rng = random.PRNGKey(1513241)\n predictions = jnp.exp(\n model.apply(\n trained_params,\n test_transformed.X_num,\n test_transformed.X_cat.astype(int),\n rngs={\"dropout\": rng},\n train=False,\n )\n )\n print(\n f\"predictions mean: {predictions.mean():.3f}, std: {predictions.std():.3f}, min: {predictions.min():.3f}, max: {predictions.max():.3f}.\"\n )\n sub_name = f\"submission_{hash(args)}\"\n submit_predictions(\n sub_name,\n predictions=predictions,\n id_col=test_data_full.loc[:, \"Id\"].values,\n )\n return eval_loss, sub_name", "def train_model_cross_validation(model, train_docs, test_docs, nb_iter, output_dir, spacy_type = True, nb_folds = 5):\n\n print(output_dir)\n os.mkdir(output_dir) # creating the output directory\n print(\" ============= TRAINING MODEL ===========================\")\n\n\n # tuple conversion (the tuple type is lost when dataframe -> excel -> dataframe)\n\n #docs['annotations'] = [[tuple(ann) for ann in annotations] for annotations in docs['annotations'].to_numpy()]\n\n\n # cross validation :\n\n models = []\n all_scores = []\n\n kf = KFold(n_splits=nb_folds)\n c = 0\n for train_index, val_index in kf.split(train_docs):\n\n train_data = train_docs.iloc[train_index, :]\n val_data = train_docs.iloc[val_index, :]\n\n # spacy_format\n TRAIN_DATA = [(text, {'entities': entities}) for [text, entities] in train_data[['text', 'annotations']].to_numpy()]\n\n # trim entities : leading whitespace make the model bug\n TRAIN_DATA = trim_entity_spans(TRAIN_DATA)\n\n # loading of the model\n nlp = model\n\n optimizer = nlp.begin_training()\n\n # get names of other pipes to disable them during training\n pipe_exceptions = [\"ner\" ] #\"trf_wordpiecer\", \"trf_tok2vec\"\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]\n\n scores = []\n\n # training\n with nlp.disable_pipes(*other_pipes): # only train NER\n\n if not spacy_type : # add the other labels\n ner = nlp.get_pipe(\"ner\")\n ner.add_label('AGE_RELATED')\n ner.add_label('DURATION')\n ner.add_label('FREQUENCY')\n ner.add_label('OTHER')\n\n for i in range(nb_iter):\n\n print('Iteration ', i)\n print()\n losses = {}\n random.shuffle(TRAIN_DATA) # ??\n\n path = ''\n if spacy_type:\n path = 'spacy_model_' + str(c) + '_fold'\n else:\n path = 'all_types_model_' + str(c) + '_fold'\n\n batches = minibatch(TRAIN_DATA, size=1) #compounding(4.0, 20.0, 1.001)\n\n for batch in batches:\n texts, annotations = zip(*batch)\n try:\n nlp.update(texts, annotations, sgd = optimizer, drop=0.5, losses = losses)\n print(\"Losses\", losses)\n except Exception as e:\n print(e)\n #print(text)\n\n tp_g, fp_g, fn_g, p, r, f, pt, rt, ft, type_dict = test_model(test_docs, nlp)\n scores += [(p, r, r, pt, rt, ft)]\n print()\n print()\n\n # test the trained model\n test_model(val_data, nlp)\n\n df_scores = pd.DataFrame(scores, columns = ['span_precision', 'span_recall', 'span_f1', 'type_precision', 'type_recall', 'type_f1'])\n df_scores.to_excel(output_dir + '/' + path + '.xlsx')\n\n\n models += [nlp]\n all_scores += [scores]\n # save model to output directory\n if output_dir is not None:\n nlp.to_disk(output_dir + '/' + path)\n print(\"Saved model to\", output_dir + '/' + path)\n\n c += 1\n\n return models, all_scores", "def _evaluate_during_fit(self, test_loader, epoch):", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def train(self, x_train_unvec, y_train, x_val_unvec, y_val, verbose=True):\n # early stopping by monitoring validation loss\n # custom implementation to accomodate batch data processing & training\n class EarlyStopping(Exception):\n pass\n try:\n best_val_loss = float('inf')\n patience_counter = self._patience\n\n for epoch in range(1, self._max_num_epoch + 1):\n epoch_start = time.time()\n\n if verbose:\n print('\\n', 'Epoch {} start:'.format(epoch))\n print('{} train batches'.format(\n int(ceil(float(len(x_train_unvec)) / self._batch_size))))\n\n # train by batch\n for i, (x, y) in enumerate(\n zip(chunks(x_train_unvec, self._batch_size),\n chunks(y_train, self._batch_size))):\n if i % 250 == 0 and verbose:\n print('-- train batch {}'.format(i))\n\n assert len(x) == len(y) # chunk sizes should be equal\n x = self.process_x(x)\n y = self.process_y(y)\n\n self._model.train_on_batch(x, y)\n\n if verbose:\n print('{} val batches'.format(\n int(ceil(float(len(x_val_unvec)) / self._batch_size))))\n\n # validation by batch\n y_val_probas = np.empty([0, self._num_class])\n for i, (x, y) in enumerate(\n zip(chunks(x_val_unvec, self._batch_size),\n chunks(y_val, self._batch_size))):\n if i % 250 == 0 and verbose:\n print('-- val batch {}'.format(i))\n\n assert len(x) == len(y) # chunk sizes should be equal\n x = self.process_x(x)\n y = self.process_y(y)\n\n batch_probas = self._model.predict_proba(\n x, batch_size=self._batch_size, verbose=0)\n y_val_probas = np.append(\n y_val_probas, batch_probas, axis=0)\n\n val_loss = log_loss(y_val, y_val_probas,\n labels=range(self._num_class))\n\n if verbose:\n print('Epoch {} / loss: {:.3f} / time: {:.3f} s'\n .format(epoch, val_loss, time.time() - epoch_start))\n\n # trigger early stopping (do not save current model)\n if val_loss >= best_val_loss:\n if patience_counter == 0:\n if verbose:\n print('Early stopping on epoch {}'.format(epoch))\n raise EarlyStopping\n patience_counter -= 1\n # continue training, go onto next epoch\n else:\n patience_counter = self._patience\n best_val_loss = val_loss\n best_epoch = epoch\n model_weights = self._model.get_weights() # save best model\n\n if verbose:\n print('Hit max number of training epochs: {}'\n .format(self._max_num_epoch))\n raise EarlyStopping\n\n except EarlyStopping:\n if verbose:\n print('Best epoch was epoch {}'.format(best_epoch))\n # load most recent model weights from prior to early stopping\n self._model.set_weights(model_weights)", "def train(self, X, y, X_val, y_val,\n alpha=1e-3, alpha_decay=0.95,\n reg=1e-5, num_iters=100,\n batch_size=100):\n num_train = X.shape[0]\n iteration = max(num_train / batch_size, 1)\n\n loss_train = []\n train_acc = []\n val_acc = []\n\n for it in range(num_iters):\n print('iteration '+str(it))\n data_batch = None\n label_batch = None\n \n #########################################################################\n # create a random batch of data and labels for\n indx = np.random.permutation(len(X))\n data, labels = X[indx], y[indx]\n data_batch = data[0:batch_size]\n label_batch = labels[0:batch_size]\n #########################################################################\n pass\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n # calculate loss and gradients\n loss, gradient = self.loss(data_batch, y=label_batch, reg=reg)\n loss_train.append(loss)\n #########################################################################\n # update weights and biases which stored in the slef.p_net regarding \n # to gradient dictionary.\n self.p_net['W1'] -= alpha * gradient['W1']\n self.p_net['b1'] -= alpha * gradient['b1']\n self.p_net['W2'] -= alpha * gradient['W2']\n self.p_net['b2'] -= alpha * gradient['b2']\n #########################################################################\n pass\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n if it % 100 == 0:\n print ('iteration %d / %d: loss %f' % (it, num_iters, loss))\n \n if it % iteration == 0:\n # Check accuracy\n train_acc_ = (self.predict(data_batch) == label_batch).mean()\n val_acc_ = (self.predict(X_val) == y_val).mean()\n train_acc.append(train_acc_)\n val_acc.append(val_acc_)\n\n alpha *= alpha_decay\n\n return {\n 'loss_train': loss_train,\n 'train_acc': train_acc,\n 'val_acc': val_acc,\n }", "def learn(self, Xtrain, ytrain):\n # Dividing by numsamples before adding ridge regularization\n # to make the regularization parameter not dependent on numsamples\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n y = ytrain[:, np.newaxis]\n #self.weights = np.dot(np.dot(np.transpose(Xless), np.linalg.inv(np.dot(Xless, np.transpose(Xless))/numsamples) / numsamples), y) / numsamples\n #Solves with respect to w for the equation Xless * w = y: it computes the pseudo inverse, using singular values internally, for the matri Xlessx, avoiding the original singular matrix error.\n self.weights = np.linalg.lstsq(Xless, y)[0]", "def train(features, outputs, test_features, test_outputs, model, params_grid=None, verbose=2):\n\tstart = time.time()\n\topt_model = fine_tune(features, outputs, model, params_grid=params_grid)\n\tscore = opt_model.score(test_features, test_outputs)\n\tend = time.time()\n\tprint(\"Time to fine tune on %s model: %f\"%(type(model), score)) \n\tprint(\"Optimal model parameters: %s\"%(opt_model.best_estimator_))\n\tprint(\"Best fine tune score: %f\"%(opt_model.best_score_))\n\tprint(\"Accuracy of model on test set: %f\"%(score))\n\treturn opt_model.best_estimator_", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def train(self, df, feature, max_range, extra=False, defender=False):\n\n df2 = self._train_preprocess(df, feature, extra)\n\n # No need for names anymore\n if defender:\n df2 = df2.drop([\"Player Id\"], axis=1)\n\n # Instantiate the models\n self.rfrg = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=69420)\n\n if not defender:\n self.gbrg = LGBMRegressor(n_estimators=1000, learning_rate=0.01)\n\n # Then, perform regression -> This is to see how it performs over weeks\n mean_error1 = []\n mean_error2 = []\n\n for week in range(max_range - 5, max_range):\n train = df2[df2['week'] < week]\n val = df2[df2['week'] == week]\n\n x_train, x_test = train.drop([feature], axis=1), val.drop([feature], axis=1)\n y_train, y_test = train[feature].values, val[feature].values\n\n self.rfrg.fit(x_train, y_train)\n preds1 = self.rfrg.predict(x_test)\n error1 = rmsle(y_test, preds1)\n print('Week %d - Error for Random Forest %.5f' % (week, error1))\n\n mean_error1.append(error1)\n if not defender:\n self.gbrg.fit(x_train, np.log1p(y_train))\n preds2 = np.expm1(self.gbrg.predict(x_test))\n error2 = rmsle(y_test, preds2)\n print('Week %d - Error for Gradient Boosting %.5f' % (week, error2))\n mean_error2.append(error2)\n\n print()\n print()\n print(\"Feature statistics:\")\n print(f\"Min value for feature {feature}: {df[feature].min()}\")\n print(f\"Max value for feature {feature}: {df[feature].max()}\")\n print(f\"Mean value for feature {feature}: {df[feature].mean()}\")\n print(f\"Standard deviation for feature {feature}: {df[feature].std()}\")\n print()\n print(\"Results\")\n print('Mean Error for Random Forest = %.5f' % np.mean(mean_error1))\n\n # Note: the final model is trained on every week and stored in self.model!\n final_xtrain = df2.drop([feature], axis=1)\n final_ytrain = df2[feature].values\n self.rfrg.fit(final_xtrain, final_ytrain)\n\n if not defender:\n print('Mean Error for Gradient Boosting = %.5f' % np.mean(mean_error2))\n self.gbrg.fit(final_xtrain, np.log1p(final_ytrain))", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate(self):\n self.training = False", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def eval(self):\n self.train(mode=False)", "def validation():\n global SEARCH_REGION\n predictor = None\n response = []\n count = 0\n for chromosome in train_set:\n if chromosome not in configs.chromosome_list:\n continue\n for inf in train_set[chromosome]:\n strand = inf[3]\n if strand == 1:\n base = inf[0] - SEARCH_REGION\n final = inf[0] \n else:\n base = inf[1]\n final = inf[1] + SEARCH_REGION\n value = inf[2]\n if base < 0:\n continue\n result = fectch_predictor_avg(chromosome, base, final)\n if result is None:\n continue\n response.append(value)\n if predictor is None:\n predictor = result\n else:\n predictor = np.vstack((predictor, result))\n count += 1\n print(\"in train:\", predictor.shape)\n response = np.asarray(response).T\n regr = linear_model.LinearRegression()\n regr.fit(predictor, response)\n \n pre_response = regr.predict(predictor)\n adj_r2 = util.adj_r2_score(response, pre_response,count,state_n)\n r2 = sklearn.metrics.r2_score(response, pre_response)\n configs.toLog(\"train r2:{}\".format(r2))\n configs.toLog(\"train adjr2:{}\".format(adj_r2))\n\n predictor = None\n response = [] \n count = 0\n for chromosome in test_set:\n if chromosome not in configs.chromosome_list:\n continue\n for inf in test_set[chromosome]:\n strand = inf[3]\n if strand == 1:\n base = inf[0] - SEARCH_REGION\n final = inf[0] \n else:\n base = inf[1]\n final = inf[1] + SEARCH_REGION\n value = inf[2]\n if base < 0:\n continue\n result = fectch_predictor_avg(chromosome, base, final)\n if result is None:\n continue\n response.append(value)\n if predictor is None:\n predictor = result\n else:\n predictor = np.vstack((predictor, result))\n count += 1\n print(\"in test:\", predictor.shape)\n pre_response = regr.predict(predictor)\n adj_r2 = util.adj_r2_score(response, pre_response, count, state_n)\n r2 = sklearn.metrics.r2_score(response, pre_response)\n configs.toLog(\"test r2:{}\".format(r2))\n configs.toLog(\"test adjr2:{}\".format(adj_r2))", "def compute_beta_vae_sklearn(ground_truth_data,\n representation_function,\n random_state,\n batch_size=gin.REQUIRED,\n num_train=gin.REQUIRED,\n num_eval=gin.REQUIRED):\n logging.info(\"Generating training set.\")\n train_points, train_labels = _generate_training_batch(\n ground_truth_data, representation_function, batch_size, num_train,\n random_state)\n\n logging.info(\"Training sklearn model.\")\n model = linear_model.LogisticRegression(random_state=random_state)\n model.fit(train_points, train_labels)\n\n logging.info(\"Evaluate training set accuracy.\")\n train_accuracy = model.score(train_points, train_labels)\n train_accuracy = np.mean(model.predict(train_points) == train_labels)\n logging.info(\"Training set accuracy: %.2g\", train_accuracy)\n\n logging.info(\"Generating evaluation set.\")\n eval_points, eval_labels = _generate_training_batch(\n ground_truth_data, representation_function, batch_size, num_eval,\n random_state)\n\n logging.info(\"Evaluate evaluation set accuracy.\")\n eval_accuracy = model.score(eval_points, eval_labels)\n logging.info(\"Evaluation set accuracy: %.2g\", eval_accuracy)\n scores_dict = {}\n scores_dict[\"train_accuracy\"] = train_accuracy\n scores_dict[\"eval_accuracy\"] = eval_accuracy\n return scores_dict", "def fit(self):\n accuracy = 0\n no_improvement = 0\n epochs = trange(self.args.epochs, desc=\"Accuracy\")\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n self.model.train()\n for epoch in epochs:\n self.optimizer.zero_grad()\n prediction = self.model(self.propagation_matrix, self.features)\n loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])\n loss = loss + self.args.lambd*torch.sum(self.model.page_rank_convolution_1.weight_matrix**2)\n loss.backward()\n self.optimizer.step()\n new_accuracy = self.score(self.validation_nodes)\n epochs.set_description(\"Validation Accuracy: %g\" % round(new_accuracy,4))\n if new_accuracy < accuracy:\n no_improvement = no_improvement + 1\n if no_improvement == self.args.early_stopping:\n epochs.close()\n break\n else:\n no_improvement = 0\n accuracy = new_accuracy \n acc = self.score(self.test_nodes)\n print(\"\\nTest accuracy: \" + str(round(acc,4)) )", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def LogisticRegression_self_test(X_train, X_test, y_train, y_test, learning_rates, epochs, iteration):\n\n\t# scoping number of training samples\n\n\tn_inputs = X_train.shape[0]\n\tn_features = X_train.shape[1]\n\n\t\n\n\teta_ = 1e-12\n\tbeta_opt = np.random.randn(X_train.shape[1], 2)\n\tcalc_beta_GD, norm = GradientDescent(X_train, beta_opt, y_train, iteration, eta_)\n\tprob_GD, predict_GD= Probability_GD(X_test, calc_beta_GD) #defining values to be between 0 and 1\n\t#yPred_GD = (predict_GD >= 0.5).astype(int) # converting to just 0 or 1\n\n\t#Define Logistic regression\n\tclf = LogisticRegression(solver='lbfgs', max_iter=1e5)\n\tclf = clf.fit(X_train, np.ravel(y_train))\n\tpred_sklearn = clf.predict(X_test)\n\tprob_sklearn = clf.predict_proba(X_test)\n\t#print(prob_sklearn)\n\n\t#for eta in np.logspace(np.log10(1e-6), np.log10(1e0), 7):\n\taccuracy = np.zeros(len(learning_rates))\n\tauc_score = np.zeros(len(learning_rates))\n\n\tfor i, eta in enumerate(learning_rates):\n\t\tbeta_SGD = stochastic_gradient_descent(X_train, beta_opt, y_train, eta, epochs, iteration)\n\t\tprob_SGD, predict_SGD= Probability(X_test, beta_SGD) #defining values to be between 0 and 1\n\t\t\n\t\t\n\t\taccuracy[i] = metrics.accuracy_score(y_test, predict_SGD)\n\t\tauc_score[i] = metrics.roc_auc_score(y_test, predict_SGD)\n\t\tdifference = y_test - predict_SGD\n\n\t\t\n\n\t\tif i> 0 and auc_score[i] > auc_score[i-1]:\n\t\t\tbest_pred_SGD= predict_SGD\n\t\t\tbest_prob_SGD = prob_SGD\n\t\n\n\t\tprint('Accuracy {}, learning rate= {}, iterations = {}'.format(accuracy[i], eta, iteration))\n\t\n\t\tprint('Auc score: {}'.format(auc_score[i]))\n\n\n\t\t\"\"\"\n\t\tplt.plot(yPred, label='predict')\n\t\tplt.plot(optimal_beta, label ='optimal beta')\n\t\tplt.plot(y_test, label='test')\n\t\tplt.show()\n\t\t\"\"\"\n\n\tsns.set()\n\tsns.heatmap(pd.DataFrame(accuracy), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('accuracy_logreg.png')\n\tplt.show()\n\n\tsns.heatmap(pd.DataFrame(auc_score), annot= True, fmt='.4g')\n\tplt.title('Grid-search for logistic regression')\n\tplt.ylabel('Learning rate: $\\\\eta$')\n\tplt.xlabel('Regularization Term: $\\\\lambda$')\n\t#plt.xticks(ticks=np.arange(len(learning_rates)) + 0.5, labels=learning_rates)\n\t#plt.yticks(ticks=np.arange(len(lambda_values)) + 0.5, labels=lambda_values)\n\tb, t = plt.ylim() # discover the values for bottom and top\n\tb += 0.5 # Add 0.5 to the bottom\n\tt -= 0.5 # Subtract 0.5 from the top\n\tplt.ylim(b, t) # update the ylim(bottom, top) values\n\t#plt.savefig('auc_score_logreg.png')\n\tplt.show()\n\n\t#plot confusion matrix\n\tConfusion_Matrix(y_test, predict_GD)\n\t#Confusion_Matrix(y_test, best_pred_SGD)\n\t#Confusion_Matrix(y_test, pred_sklearn)\n\n\t#diff = np.concatenate((1- predict, predict), axis=1)\n\n\tdiff_sklearn = np.concatenate((1- prob_sklearn, prob_sklearn), axis=1)\n\tdiff_GD = np.concatenate((1- prob_GD, prob_GD), axis=1)\n\tdiff_SGD = np.concatenate((1- best_prob_SGD, best_prob_SGD), axis=1)\n\n\t#plot roc curves\n\tplot_roc(y_test, prob_sklearn)\n\tplot_roc(y_test, diff_SGD)\n\tplot_roc(y_test, prob_GD)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, prob_sklearn)\n\tax = plot_cumulative_gain(y_test, diff_SGD)\n\tplot_cumulative_gain(y_test, prob_GD)\n\t#plt.show()\n\n\n\n\t\"\"\"\n\t#plot roc curves\n\tplot_roc(y_test, diff_sklearn, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_GD, plot_micro=False, plot_macro= False)\n\tplot_roc(y_test, diff_SGD, plot_micro=False, plot_macro= False)\n\tplt.show()\n\n\t#plot cumulative gain curves\n\tplot_cumulative_gain(y_test, diff_sklearn)\n\tplot_cumulative_gain(y_test, diff_GD)\n\tplot_cumulative_gain(y_test, diff_SGD)\n\tplt.show()\t\n\n\t\"\"\"\n\n\tmodel_curve = auc_score\n\tarea_baseline = 0.5\n\tarea_ratio = (model_curve - area_baseline)/(area_baseline)\n\tprint('Area Ratio:',area_ratio)\n\n\n\treturn accuracy, learning_rates", "def learn(self, Xtrain, ytrain):\n self.min = np.amin(ytrain)\n self.max = np.amax(ytrain)", "def score_dataset(X_train, X_valid, y_train, y_valid):\r\n model = RandomForestRegressor(n_estimators=100, random_state=0)\r\n model.fit(X_train, y_train)\r\n preds = model.predict(X_valid)\r\n score = mean_absolute_error(y_valid, preds)\r\n return score", "def fit(self, X, Y):\n np.random.seed(40)\n self.num_samples=X.shape[0]\n self.layers_sizes.insert(0,X.shape[1])\n self.initialize_parameters()\n variable=self.num_epochs//5\n\n # loop for epochs\n for vv in range(self.num_epochs):\n # creating batches of dataset of specified batch size\n X,Y=shuffle(X,Y,random_state=vv)\n num_batches=X.shape[0]//self.batch_size\n train_x=np.vsplit(X,num_batches)\n train_y=np.vsplit(Y,num_batches)\n train_cost=0\n \n for i in range(num_batches):\n # iterating over batches and applying forward and backward propagation\n # and determining training cost (cross entropy loss) for every batch\n # and averaging them to give a generalised loss\n A,d_collection=self.forward(train_x[i])\n train_cost+=(-np.mean(train_y[i]*np.log(np.transpose(A))))/num_batches\n derivatives=self.backward(train_x[i],train_y[i],d_collection)\n\n self.weight_update(derivatives)\n \n if vv%variable==0:\n print(\"Accuracy score:\",self.score(X,Y))\n \n # adding both training and testing losses in a list to plot in further ques\n self.training_loss_values.append(train_cost)\n test_cost=-np.mean(self.YTEST*np.log(np.transpose(self.predict_proba(self.XTEST))))\n self.testing_loss_values.append(test_cost)\n return self", "def kFoldCrossValidation(self, n_splits ):\n X = self.X\n y = self.y\n\n k_fold = KFold(n_splits)\n model = self.model\n\n for train, test in k_fold.split(X):\n model.fit(X[train], y[train])\n p = model.predict( X[test] )\n # Add line for scores\n\n return model #return scores here?", "def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn", "def train(self, inputs, targets, validation_data, num_epochs, regularizer_type=None):\n for k in xrange(num_epochs):\n loss = 0\n # Forward pass\n a1, probs = self._feed_forward(inputs)\n \n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(inputs, targets, a1, probs,len(inputs))\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n \n\n # validation using the validation data\n\n validation_inputs = validation_data[0]\n validation_targets = validation_data[1]\n\n print 'Validation'\n\n # Forward pass\n a1, probs = self._feed_forward(validation_inputs)\n\n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(validation_inputs, validation_targets, a1, probs,len(validation_inputs))\n\n if regularizer_type == 'L2':\n dWhy = self.reg_lambda * self.Why\n dWxh = self.reg_lambda * self.Wxh\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n\n if k%1 == 0:\n print \"Epoch \" + str(k) + \" : Loss = \" + str(self._calc_smooth_loss(loss, len(inputs), regularizer_type))\n\n #self.save('models.pkl')", "def _score_fn(self, unused_context_features, group_features, mode, unused_params, unused_config):\n with tf.compat.v1.name_scope(\"input_layer\"):\n group_input = [\n tf.compat.v1.layers.flatten(group_features[name])\n for name in sorted(self.example_feature_columns())\n ]\n\n # if self.sparse_features:\n # self.sparse_emb_inputlist = [\n # tf.compat.v1.layers.flatten(group_features[name])\n # for name in self.sparse_features\n # ]\n\n self.group_input = group_input\n input_layer = tf.concat(self.group_input, 1)\n tf.compat.v1.summary.scalar(\"input_sparsity\",\n tf.nn.zero_fraction(input_layer))\n tf.compat.v1.summary.scalar(\"input_max\",\n tf.reduce_max(input_tensor=input_layer))\n tf.compat.v1.summary.scalar(\"input_min\",\n tf.reduce_min(input_tensor=input_layer))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n input_layer, training=is_training)\n for i, layer_width in enumerate(int(d) for d in self.hidden_layer_dims):\n cur_layer = tf.compat.v1.layers.dense(cur_layer, units=layer_width)\n cur_layer = tf.compat.v1.layers.batch_normalization(\n cur_layer, training=is_training)\n cur_layer = tf.nn.relu(cur_layer)\n tf.compat.v1.summary.scalar(\"fully_connected_{}_sparsity\".format(i),\n tf.nn.zero_fraction(cur_layer))\n\n cur_layer = tf.compat.v1.layers.dropout(\n cur_layer, rate=self.dropout_rate, training=is_training)\n logits = tf.compat.v1.layers.dense(cur_layer, units=self.group_size)\n self.logits = logits\n\n if self._use_multi_head():\n # Duplicate the logits for both heads.\n return {_PRIMARY_HEAD: logits, _SECONDARY_HEAD: logits}\n else:\n return logits", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def compute_score_fast(verbose=1):\n res = []\n\n batch = math.ceil(len(train) / LINEAR_ASSIGNMENT_SEGMENT_SIZE)\n for start in range(0, len(train), batch):\n end = min(len(train), start + batch)\n train_batch = train[start:end]\n\n features = branch_model.predict_generator(FeatureGen(train_batch, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = head_model.predict_generator(ScoreGen(features, verbose=verbose), max_queue_size=12, workers=6, verbose=0)\n score = score_reshape(score, features)\n\n res.append(score)\n\n return res", "def learn(self):\n Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)\n Yt = self.Y * 2 - 1\n\n w = np.ones(Xt.shape[1]) # avoiding random init, for debugging\n lw = [[] for k in range(len(w))]\n \n for iter in range(self.max_steps):\n P = Yt * np.dot(Xt, w)\n M = np.where(P <= 0)[0] # indices of misclassified datapoints\n\n if len(M) == 0: \n self.logger.debug(\"Found linearly separable hyperplane!\")\n break\n\n if self.is_stochastic:\n # just pick one randomly from M\n M = [M[random.randint(0, len(M)-1)]]\n\n grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)\n\n if self.reg_constant > 0:\n grad += self.reg_constant * w\n \n eta = self.step_size * 10000 / (10000 + iter)\n \n w = w - grad * eta\n \n if iter % 100 == 0:\n for k in range(len(w)):\n lw[k].append(w[k])\n \n if iter % 1000 == 0:\n self.logger.debug(\"Iter %s:\\t %f %f %f\" %(iter, w[0], w[1], w[2]))\n \n self.logger.debug(\"Iterations: %s\" %(iter))\n\n# x_range = range(len(lw[0]))\n# fig = plt.figure()\n# ax1 = fig.add_subplot(111) \n# for j, lwn in enumerate(lw):\n# if j % 3 >= 2: # plot an arbitrary subset of features\n# a = w[j]\n# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))\n# \n# plt.xlabel(\"Iteration\")\n# plt.ylabel(\"Feature weight\")\n# plt.show()\n \n #self.logger.debug(\"%s\" % np.array2string(w, precision=2, separator=','))\n \n self.w = w", "def fit(self, Y, STATUS, ntop=100, nrecent=100, nmax=400, ntopmu=100, ntopvar=100, nkmeans=300, nkeamnsdata=5000,\n lam=1e-6):\n X = self.X\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n tested = [i for i in range(self.n) if STATUS[i] == 2]\n ytested = Y[tested].reshape(-1)\n self.y_max = np.max(ytested)\n # each 10 fits we update the hyperparameters, otherwise we just update the data which is a lot faster\n if np.mod(self.update_counter, self.updates_per_big_fit) == 0:\n print('fitting hyperparameters')\n # how many training points are there\n ntested = len(tested)\n # if more than nmax we will subsample and use the subsample to fit hyperparametesr\n if ntested > nmax:\n # subsample is uniion of 100 best points, 100 most recent points and then random points \n top = list(np.argsort(ytested)[-ntop:])\n recent = list(range(ntested - nrecent, ntested))\n topandrecent = list(set(top + recent))\n rand = list(\n np.random.choice([i for i in range(ntested) if i not in topandrecent], nmax - len(topandrecent),\n False))\n testedtrain = topandrecent + rand\n ytrain = ytested[testedtrain]\n train = [tested[i] for i in testedtrain]\n else:\n train = tested\n ytrain = ytested\n \n # use GPy code to fit hyperparameters to minimize NLL on train data\n mfy = GPy.mappings.Constant(input_dim=self.d, output_dim=1) # fit dense GPy model to this data\n ky = GPy.kern.RBF(self.d, ARD=True, lengthscale=np.ones(self.d))\n self.GP = GPy.models.GPRegression(X[train], ytrain.reshape(-1, 1), kernel=ky, mean_function=mfy)\n self.GP.optimize('bfgs')\n # strip out fitted hyperparameters from GPy model, because cant do high(ish) dim sparse inference\n self.mu = self.GP.flattened_parameters[0]\n self.a = self.GP.flattened_parameters[1]\n self.l = self.GP.flattened_parameters[2]\n self.b = self.GP.flattened_parameters[3]\n # selecting inducing points for sparse inference \n print('selecting inducing points')\n # get prediction from GPy model \n self.py = self.GP.predict(X)\n # points with 100 highest means\n topmu = [untested[i] for i in np.argsort(self.py[0][untested].reshape(-1))[-ntopmu:]]\n # points with 100 highest uncertatinty\n topvar = [untested[i] for i in np.argsort(self.py[1][untested].reshape(-1))[-ntopvar:]]\n # combine with train set above to give nystrom inducing points (inducing points that are also actual trainingdata points) \n nystrom = topmu + topvar + train\n # also get some inducing points spread throughout domain by using kmeans\n # kmeans is very slow on full dataset so choose random subset \n # also scale using length scales l so that kmeans uses approproate distance measure\n kms = KMeans(n_clusters=nkmeans, max_iter=5).fit(\n np.divide(X[list(np.random.choice(untested, nkeamnsdata))], self.l))\n # matrix of inducing points \n self.M = np.vstack((X[nystrom], np.multiply(kms.cluster_centers_, self.l)))\n # dragons...\n # email james.l.hook@gmail.com if this bit goes wrong!\n print('fitting sparse model')\n DXM = euclidean_distances(np.divide(X, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_XM = self.a * np.exp(-DXM / 2)\n DMM = euclidean_distances(np.divide(self.M, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_MM = self.a * np.exp(-DMM / 2) + np.identity(self.M.shape[0]) * lam * self.a\n self.B = self.a + self.b - np.sum(np.multiply(np.linalg.solve(self.SIG_MM, self.SIG_XM.T), self.SIG_XM.T),0)\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n else:\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n self.update_counter += 1\n \"\"\" \n key attributes updated by fit \n \n self.SIG_XM : prior covarience matrix between data and inducing points\n self.SIG_MM : prior covarience matrix at inducing points\n \n self.SIG_MM_pos : posterior covarience matrix at inducing points\n self.mu_M_pos : posterior mean at inducing points \n \n \"\"\"", "def learn(self, Xtrain, ytrain):\n numsamples = Xtrain.shape[0]\n Xless = Xtrain[:,self.params['features']]\n self.weights = np.dot(np.dot(np.linalg.inv(np.dot(Xless.T,Xless)/numsamples + (self.params['regwgt'] * np.identity(np.shape(Xless)[1]))), Xless.T),ytrain)/numsamples", "def train(self, X_train, Y_train, X_test = None, Y_test = None, epochs = 100, batch_size = 32, learning_rate = 0.005):\n m_train = X_train.shape[1]\n for epoch in range(epochs + 1):\n batch = np.arange(0, m_train)\n np.random.shuffle(batch)\n for k in range(m_train // batch_size + 1):\n if k * batch_size < m_train:\n X_mini_batch = X_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n Y_mini_batch = Y_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n self.update_weights(X_mini_batch, Y_mini_batch, learning_rate)\n \n if epoch % 10 == 0: \n # Loss function\n A2 = self.feedforward(X_train)\n cost = (1 / m_train) * np.sum(-np.multiply(Y_train, np.log(A2)) - np.multiply(1 - Y_train, np.log(1 - A2)))\n print(f\"epoch:{epoch}, Cost: {cost}, \", end = '')\n # Accutacy on training data\n if X_test is not None and Y_test is not None:\n A2_test = self.feedforward(X_test)\n class_pred = A2_test.argmax(axis = 0)\n class_actual = Y_test.argmax(axis = 0)\n acc = sum(class_actual == class_pred)\n print(f\"accuracy:{acc}/{X_test.shape[1]}\")", "def learning(\n cfg: OmegaConf,\n training_data_loader: torch.utils.data.DataLoader,\n validation_data_loader: torch.utils.data.DataLoader,\n model: SupervisedModel,\n) -> None:\n\n local_rank = cfg[\"distributed\"][\"local_rank\"]\n num_gpus = cfg[\"distributed\"][\"world_size\"]\n epochs = cfg[\"parameter\"][\"epochs\"]\n num_training_samples = len(training_data_loader.dataset.data)\n steps_per_epoch = int(num_training_samples / (cfg[\"experiment\"][\"batches\"] * num_gpus)) # because the drop=True\n total_steps = cfg[\"parameter\"][\"epochs\"] * steps_per_epoch\n warmup_steps = cfg[\"parameter\"][\"warmup_epochs\"] * steps_per_epoch\n current_step = 0\n\n best_metric = np.finfo(np.float64).max\n\n optimizer = torch.optim.SGD(\n params=model.parameters(),\n lr=calculate_initial_lr(cfg),\n momentum=cfg[\"parameter\"][\"momentum\"],\n nesterov=False,\n weight_decay=cfg[\"experiment\"][\"decay\"]\n )\n\n # https://github.com/google-research/simclr/blob/master/lars_optimizer.py#L26\n optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)\n\n cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer.optim,\n T_max=total_steps - warmup_steps,\n )\n\n for epoch in range(1, epochs + 1):\n # training\n model.train()\n training_data_loader.sampler.set_epoch(epoch)\n\n for data, targets in training_data_loader:\n # adjust learning rate by applying linear warming\n if current_step <= warmup_steps:\n lr = calculate_lr(cfg, warmup_steps, current_step)\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n optimizer.zero_grad()\n data, targets = data.to(local_rank), targets.to(local_rank)\n unnormalized_features = model(data)\n loss = torch.nn.functional.cross_entropy(unnormalized_features, targets)\n loss.backward()\n optimizer.step()\n\n # adjust learning rate by applying cosine annealing\n if current_step > warmup_steps:\n cos_lr_scheduler.step()\n\n current_step += 1\n\n if local_rank == 0:\n logger_line = \"Epoch:{}/{} progress:{:.3f} loss:{:.3f}, lr:{:.7f}\".format(\n epoch, epochs, epoch / epochs, loss.item(), optimizer.param_groups[0][\"lr\"]\n )\n\n # During warmup phase, we skip validation\n sum_val_loss, num_val_corrects = validation(validation_data_loader, model, local_rank)\n\n torch.distributed.barrier()\n torch.distributed.reduce(sum_val_loss, dst=0)\n torch.distributed.reduce(num_val_corrects, dst=0)\n\n num_val_samples = len(validation_data_loader.dataset)\n\n # logging and save checkpoint\n if local_rank == 0:\n\n validation_loss = sum_val_loss.item() / num_val_samples\n validation_acc = num_val_corrects.item() / num_val_samples\n\n logging.info(logger_line + \" val loss:{:.3f}, val acc:{:.2f}%\".format(validation_loss, validation_acc * 100.))\n\n if cfg[\"parameter\"][\"metric\"] == \"loss\":\n metric = validation_loss\n else:\n metric = 1. - validation_acc\n\n if metric <= best_metric:\n if \"save_fname\" in locals():\n if os.path.exists(save_fname):\n os.remove(save_fname)\n\n save_fname = \"epoch={}-{}\".format(epoch, cfg[\"experiment\"][\"output_model_name\"])\n torch.save(model.state_dict(), save_fname)", "def train(self, X, y):", "def learn(self, Xtrain, ytrain):\n # Ensure ytrain is {-1,1}\n yt = np.copy(ytrain)\n yt[yt == 0] = -1\n\n # Dividing by numsamples before adding ridge regularization\n # for additional stability; this also makes the\n # regularization parameter not dependent on numsamples\n # if want regularization disappear with more samples, must pass\n # such a regularization parameter lambda/t\n numsamples = Xtrain.shape[0]\n self.weights = np.dot(np.dot(np.linalg.pinv(np.add(np.dot(Xtrain.T,Xtrain)/numsamples,self.params['regwgt']*np.identity(Xtrain.shape[1]))), Xtrain.T),yt)/numsamples", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def epoch_train(self, rng_key, svi_state):\n def body_fn(i, val):\n rng_key_i = random.fold_in(rng_key, i)\n rng_key_i, rng_key_ls, rng_key_var, rng_key_sigma = random.split(rng_key_i, 4)\n\n loss_sum, svi_state = val # val -- svi_state\n\n # directly draw sample from the GP for a random lengthscale\n length_i = numpyro.sample(\"length\", dist.InverseGamma(1,.1), rng_key=rng_key_ls)\n var_i = numpyro.sample(\"var\", dist.LogNormal(0,0.1), rng_key=rng_key_var)\n sigma_i = numpyro.sample(\"noise\", dist.HalfNormal(0.1), rng_key=rng_key_sigma)\n batch = self.gp_predictive(rng_key_i, self.x\n , ls=length_i, var=var_i, sigma=sigma_i\n )\n\n # `update` returns (svi_state, loss)\n svi_state, loss = self.svi.update(svi_state, batch['y']) \n loss_sum += loss # / self.batch_size\n return loss_sum, svi_state\n\n return lax.fori_loop(0, self.num_train, body_fn, (0.0, svi_state))", "def fit(self, model, dl_train, dl_val, verbose=True):\n\n self.verbose = verbose\n\n optimizer = model.configure_optimizers()\n train_loss_epochs = []\n train_acc_epochs = []\n val_acc_epochs = []\n train_time = 0.0\n val_time = 0.0\n\n for e in range(self.nb_epochs):\n loss_train = []\n acc_train = []\n for batch_idx, batch in enumerate(dl_train):\n start_time = time.time()\n model.train()\n optimizer.zero_grad()\n loss, acc = model.training_step(batch, batch_idx)\n loss.backward()\n optimizer.step()\n end_time = time.time()\n train_time_batch = end_time - start_time\n train_time += train_time_batch\n\n loss_train.append(loss.item())\n acc_train.append(acc)\n\n loss_val = []\n acc_val = []\n\n if self.verbose:\n for batch_idx, batch in enumerate(dl_val):\n model.eval()\n start_time = time.time()\n with torch.no_grad():\n loss, acc = model.validation_step(batch, batch_idx)\n end_time = time.time()\n val_time_batch = end_time - start_time\n val_time += val_time_batch\n loss_val.append(loss.item())\n acc_val.append(acc)\n avg_loss_train = round(sum(loss_train) / len(loss_train), 2)\n avg_acc_train = round(sum(acc_train) / len(acc_train), 2)\n train_loss_epochs.append(avg_loss_train)\n train_acc_epochs.append(avg_acc_train)\n\n avg_loss_val = round(sum(loss_val) / len(loss_val), 2)\n avg_acc_val = round(sum(acc_val) / len(acc_val), 2)\n val_acc_epochs.append(avg_acc_val)\n print(\n f\"# Epoch {e+1}/{self.nb_epochs}:\\t loss={avg_loss_train}\\t loss_val={avg_loss_val}\\t acc_val={avg_acc_val}\"\n )\n\n # Write to tensor board\n # self.tb.add_scalar(\"Training loss\", avg_loss_train, e)\n # self.tb.add_scalar(\"Training accuracy\", avg_acc_train, e)\n # self.tb.add_scalar(\"Validation loss\", avg_loss_val, e)\n # self.tb.add_scalar(\"Validation accuracy\", avg_acc_val, e)\n\n # self.tb.close()\n end_time = time.time()\n\n print(f\"train time {train_time} s\")\n\n if self.verbose:\n print(f\"validation time {val_time} s\")\n return train_loss_epochs, train_acc_epochs, val_acc_epochs", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def run_CV(X,y,model,func, n_splits = 3, how = 'up', categorical = 'label_encoder'):\n logloss = []\n skf = StratifiedKFold(n_splits = n_splits, random_state = 144)\n for i, (train_idx, val_idx) in enumerate(skf.split(X,y)):\n X_train, X_val = X.iloc[train_idx], X.iloc[val_idx]\n y_train, y_val = y[train_idx], y[val_idx]\n\n # # SMOTE\n # X_train = X_train.drop('poor', axis = 1) # drop target\n # cat_columns = X_train.select_dtypes(['object']).columns\n # X_train[cat_columns] = X_train[cat_columns].apply(LabelEncoder().fit_transform)\n # orig_cols = X_train.columns # SMOTE will return a numpy array. Store the column names here to recreate the dataframe for feature engineering/transforms below\n # X_train, y_train = SMOTE().fit_sample(X_train,y_train)\n # # recreate dataframe\n # X_train = pd.DataFrame(X_train, columns = orig_cols)\n\n if how is not None:\n # resample to balance data\n X_resampled = resample_data(X_train, how = how)\n # store the targets now that they are balanced\n y_train = X_resampled['poor']\n # drop target from train\n X_train = X_resampled.drop('poor', axis = 1)\n X_val.drop('poor', axis = 1, inplace = True)\n # print(X_val.columns.values)\n ####### feature engineering goes blow this comment:\n \n func(X_train)\n func(X_val)\n \n ###### end feature eng\n X_train = pre_process_data(X_train, normalize_num='standardize', categorical = categorical)\n assert X_train.shape[0] == y_train.shape[0]\n\n model.fit(X_train, y_train)\n # standardize X_val to predict\n X_val = pre_process_data(X_val,normalize_num= 'standardize', enforce_cols=X_train.columns, categorical = categorical)\n preds = model.predict_proba(X_val)\n \n logloss.append(log_loss(y_val, preds[:,1]))\n \n return logloss", "def _fit_and_score(estimator, X, modality, y, scorer, train, test, verbose,\n parameters, return_train_score=False,\n return_parameters=False, return_n_test_samples=False,\n return_times=False):\n\n X = X[modality]\n\n # Adjust length of sample weights\n # fit_params = fit_params if fit_params is not None else {}\n # fit_params = dict([(k, _index_param_value(X, v, train))\n # for k, v in fit_params.items()])\n\n train_scores = {}\n if parameters is not None:\n estimator.set_params(**parameters)\n\n start_time = time.time()\n\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n valid_train = [i for i, x in enumerate(X_train) if ~np.any(np.isnan(x))]\n X_train = [x for i, x in enumerate(X_train) if i in valid_train]\n y_train = [y_ for i, y_ in enumerate(y_train) if i in valid_train]\n valid_test = [i for i, x in enumerate(X_test) if ~np.any(np.isnan(x))]\n X_test = [x for i, x in enumerate(X_test) if i in valid_test]\n y_test = [y_ for i, y_ in enumerate(y_test) if i in valid_test]\n\n is_multimetric = not callable(scorer)\n\n if y_train is None:\n # estimator.fit(X_train, **fit_params)\n estimator.fit(X_train)\n else:\n # estimator.fit(X_train, y_train, **fit_params)\n estimator.fit(X_train, y_train)\n\n fit_time = time.time() - start_time\n # _score will return dict if is_multimetric is True\n if y_test:\n test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)\n else:\n test_scores = dict(score=np.nan)\n\n score_time = time.time() - start_time - fit_time\n if return_train_score:\n train_scores = _score(estimator, X_train, y_train, scorer,\n is_multimetric)\n\n ret = [train_scores, test_scores] if return_train_score else [test_scores]\n\n if return_n_test_samples:\n ret.append(_num_samples(X_test))\n if return_times:\n ret.extend([fit_time, score_time])\n if return_parameters:\n ret.append(parameters)\n return ret", "def _cross_valid_feature(max_fold, num_movies, num_users, lamda, subplt):\n loss_train_vector = [.0]*31\n loss_cross_vector = [.0]*31\n cost_train_per_fold = [.0]*max_fold\n cost_cross_per_fold = [.0]*max_fold\n for i in range(1, 31, 1):\n for k in range(1, max_fold + 1, 1):\n cost_train_per_fold[k-1], cost_cross_per_fold[k-1] = (\n _cross_valid_fold(k, num_movies, num_users,\n i, lamda))\n loss_train_vector[i] = np.mean(cost_train_per_fold)\n loss_cross_vector[i] = np.mean(cost_cross_per_fold)\n\n # draw the Loss v.s num_feature graph\n subplt.plot(loss_train_vector, \"r\")\n subplt.plot(loss_cross_vector, \"b\")\n v1 = np.array(loss_cross_vector)\n v2 = np.array(loss_train_vector)\n v3 = v1 + v2\n sel_feature = np.argmin(v3[1:]) + 1\n subplt.plot(v3, \"g\", label=\"lambda=\"+str(lamda))\n plt.axis([1, 30, 0, 1.2*max(v3)])\n return sel_feature", "def cross_validate_model(self, X_train, y_train):\n\n\t\t# Build a stratified k-fold cross-validator object\n\t\tskf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)\n\n\t\t'''\n\t\tEvaluate the score by cross-validation\n\t\tThis fits the classification model on the training data, according to the cross-validator\n\t\tand reports the scores.\n\t\tAlternative: sklearn.model_selection.cross_validate\n\t\t'''\n\t\tscores = cross_val_score(self.classifier, X_train, y_train, scoring='accuracy', cv=skf)\n\n\t\tprint(\"%.2f seconds: Cross-validation finished\" % time.process_time())\n\n\t\t# Log the cross-validation scores, the mean score and the 95% confidence interval, according to:\n\t\t# http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics\n\t\t# https://en.wikipedia.org/wiki/Standard_error#Assumptions_and_usage\n\t\t# print(\"Scores = %s\" % scores)\n\t\t# print(\"Accuracy: %0.2f (±%0.2f)\" % (scores.mean()*100, scores.std()*2*100))\n\t\t# ↳ https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html", "def train(self, warning=True):\n self.learner = ActiveLearner(estimator=self.model, X_training=self.x_t, y_training=self.y_t)\n # Evaluate zero-point performance\n self.evaluate(warning=warning)" ]
[ "0.654589", "0.64421695", "0.63593584", "0.63469756", "0.62618685", "0.62167436", "0.6188725", "0.6184751", "0.6142719", "0.6141688", "0.60937035", "0.60724396", "0.6038608", "0.6014961", "0.5990709", "0.59876823", "0.59866655", "0.59690255", "0.5963425", "0.5955654", "0.59464085", "0.5944214", "0.5943549", "0.59426737", "0.5939648", "0.59261805", "0.59038794", "0.5903101", "0.589493", "0.5889993", "0.5888294", "0.58876777", "0.5887593", "0.5887448", "0.58868784", "0.5881668", "0.5859218", "0.5857252", "0.5851073", "0.584552", "0.584257", "0.5840434", "0.5824812", "0.5824385", "0.5821228", "0.5817255", "0.58162266", "0.5806092", "0.58057076", "0.58001816", "0.5796467", "0.5788079", "0.5787368", "0.5783551", "0.57832247", "0.5782509", "0.57811034", "0.5776429", "0.577444", "0.5771535", "0.57710105", "0.5770706", "0.57674295", "0.57616526", "0.576106", "0.5760657", "0.57593024", "0.57558745", "0.5754845", "0.57547027", "0.57504165", "0.5749616", "0.5746822", "0.5745824", "0.5745024", "0.57439613", "0.5739091", "0.57383555", "0.5736953", "0.5736632", "0.5730852", "0.5725392", "0.5724992", "0.5718114", "0.5715448", "0.5711391", "0.5711234", "0.57108593", "0.57080257", "0.57057697", "0.570428", "0.5703256", "0.56993943", "0.56962025", "0.5695346", "0.5692707", "0.5692269", "0.5691251", "0.56889415", "0.568859", "0.56880444" ]
0.0
-1
Generate the predictions of the original model on training and validation datasets. The original model is also trained if train = True.
def generate_post_preds(train = True): x_train, y_train, x_val, y_val = np.load('data/x_train_new.npy'),np.load('data/y_train.npy'),np.load('data/x_val_new.npy'),np.load('data/y_val.npy') with open('data/id_to_word.pkl','rb') as f: id_to_word = pickle.load(f) model = create_original_model() if train: filepath="./models/post.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size) model.load_weights('./models/post.hdf5', by_name=True) pred_train = model.predict(x_train,verbose = 1, batch_size = 1000) pred_val = model.predict(x_val,verbose = 1, batch_size = 1000) if not train: print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val))) print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_original_preds(train = True):\n x_train, y_train, x_val, y_val, id_to_word = load_data() \n model = create_original_model()\n\n if train:\n filepath=\"models/original.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint]\n model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size)\n\n model.load_weights('./models/original.hdf5', \n by_name=True) \n\n pred_train = model.predict(x_train,verbose = 1, batch_size = 1000)\n pred_val = model.predict(x_val,verbose = 1, batch_size = 1000)\n if not train:\n print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val)))\n print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train)))\n\n\n np.save('data/pred_train.npy', pred_train)\n np.save('data/pred_val.npy', pred_val)", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)", "def make_prediction(x_train, y_train, x_test, model):\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n return y_predict", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def predict(self, test_batch_size=64, device='cuda', load=False, model_path=None, dataloader_num_workers=4, save_prediction=True):\n self.model.eval()\n self.device = device\n self.test_batch_size = test_batch_size\n if load:\n if model_path:\n self.load(model_path, device=self.device)\n else:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"loaded model={model_path}\")\n self.load(model_path, device=self.device)\n if self.model is None:\n raise Exception(\"model cannot be None. Load or train the model before inference\")\n dataloader = self.data_module.get_test_dataloader(batch_size=self.test_batch_size, shuffle=False, num_workers=dataloader_num_workers)\n all_outputs = []\n tk0 = tqdm(enumerate(dataloader, 1), total=len(dataloader))\n for batch_id, data in tk0:\n for key, value in data.items():\n data[key] = value.to(self.device)\n # batch_outputs, batch_loss = self.model(**data)\n batch_outputs, batch_loss= self.validate_one_batch(data)\n all_outputs.append(batch_outputs.detach().cpu().numpy())\n predictions = np.concatenate(all_outputs, axis=0)\n if save_prediction:\n submission = pd.read_csv(path_sample_submission_file)\n assert submission.shape[0] == predictions.shape[0], \"unexpected behavior.code fix required\"\n submission.iloc[:, 1:] = predictions\n\n if not os.path.isdir(path_submissions_dir):\n os.mkdir(path_submissions_dir)\n submission.to_csv(os.path.join(path_submissions_dir, f\"{self.experiment_id}.csv\"), index=False)\n tk0.close()\n return predictions", "def _predict(self, test_dl: torch.utils.data.DataLoader) -> torch.Tensor:\n\n # Initialize an empty tensor to store the predicted output\n output = torch.tensor([]).to(cfg.training.device)\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the test data loader\n for x_batch in test_dl:\n # Move the batch to the appropriate device\n x_batch = x_batch.to(cfg.training.device)\n # Forward pass to obtain model predictions\n y_star = self.forward(x_batch)\n # Concatenate the predictions to the output tensor\n output = torch.cat((output, y_star), 0)\n\n # Return the tensor containing the predicted output\n return output", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def predict(self): \n return self.model.predict(self.test_x)", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def fit_and_get_test_predictions(self, trace, tuning=True):\n pass", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, test_dataset: Dataset) -> PredictionOutput:\n test_dataloader = self.get_test_dataloader(test_dataset)\n return self._prediction_loop(test_dataloader, description=\"Prediction\")", "def predict(self, model, x_test):\n pass", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def predict(self, X_test):\n return self.model.predict(X_test)", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def predict(config: Config, device: torch.device, resume: Optional[ResumeInfo]) -> None:\n # pylint: disable=too-many-locals\n # Load datasets\n print(colored(\"loading training datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory()\n datasets, preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n print(colored(\"saving question ids:\", attrs=[\"bold\"]))\n split_map = {\n \"train\": (config.training.data.train, datasets.train),\n \"val\": (config.training.data.val, datasets.val),\n \"test\": (config.training.data.test, datasets.test),\n }\n for split, (dataconfig, dataset) in split_map.items():\n root = Path(wandb.run.dir) / \"predictions\"\n if not root.exists():\n root.mkdir(parents=True)\n path = root / f\"{split}_ids.json\"\n start = int(dataconfig.subset[0] * len(dataset))\n end = int(dataconfig.subset[1] * len(dataset))\n subset = torch.utils.data.Subset(dataset, range(start, end))\n ids = [subset[i][\"question\"][\"questionId\"] for i in range(len(subset))]\n with open(path, \"w\") as file:\n json.dump(ids, file)\n\n # Create model runner\n print(colored(\"model:\", attrs=[\"bold\"]))\n runner_factory = RunnerFactory()\n runner = runner_factory.create(config, device, preprocessors, datasets, resume)\n print(f\"{runner.model=}\")\n\n print(colored(\"loading prediction datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory(training=False)\n datasets, pred_preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n # Extend question embedding dictionary with pad vector for OOV.\n # The runner will check if a question token index is out of bounds and\n # set it to the padding index if so.\n runner.model.question_embeddings = torch.nn.Embedding.from_pretrained(\n torch.cat(\n (\n runner.model.question_embeddings.weight.data,\n torch.zeros(\n (\n len(pred_preprocessors.questions.index_to_word)\n - runner.model.question_embeddings.num_embeddings,\n runner.model.question_embeddings.embedding_dim,\n )\n ).to(device),\n ),\n dim=0,\n )\n )\n # Update datasets and preprocessors for prediction\n runner.datasets = datasets\n runner.preprocessors = pred_preprocessors\n\n print(colored(\"predicting:\", attrs=[\"bold\"]))\n runner.predict()", "def train_predict(descriptions_models,\n X_train, y_train,\n X_valid, y_valid,\n scoring=None):\n\n results = []\n for description, model in descriptions_models:\n\n scorer = check_scoring(model, scoring=scoring)\n result = {'description': description}\n\n # Train\n start = time.time()\n model.fit(X_train, y_train)\n result['time_train'] = time.time() - start\n\n # Predict train\n start = time.time()\n result['score_train'] = scorer(model, X_train, y_train)\n result['time_predict_train'] = time.time() - start\n\n # Predict validation\n start = time.time()\n result['score_valid'] = scorer(model, X_valid, y_valid)\n result['time_predict_valid'] = time.time() - start\n\n results.append(result)\n\n return pd.DataFrame(results)[[\n 'description', 'score_train', 'score_valid',\n 'time_train', 'time_predict_train', 'time_predict_valid']]", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def predict(self, test_data):\n return self.leader.predict(test_data)", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds", "def make_predictions(self):\n \n self.Y = self.X.dot(self.w)", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def get_initial_predictions(tuner, input_data, output_path, model_save_name):\n\n best_model = tuner.best_estimator()\n batch_job = best_model.transformer(1, \"ml.m5.large\", output_path=output_path.as_uri(),\n model_name=model_save_name)\n batch_job.transform(input_data.as_uri())\n # TODO: Do an ls first so we can get any/all files\n output_file = output_path / 'validation.csv.out'\n with smart.open(output_file.as_uri(), 'r', transport_params={'session': boto_session}) as f:\n predictions = pd.read_csv(f, header=None)\n return predictions", "def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)", "def predict(self, test_data):\r\n return self.gs.predict(test_data)", "def fit_predict_model(self, X_train, y_train, X_test, pipeline):\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n return y_pred", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)", "def predict(self, scenes, tmp_dir):\n self.backend.load_model(tmp_dir)\n\n for scene in scenes:\n with scene.activate():\n labels = self.predict_scene(scene, tmp_dir)\n label_store = scene.prediction_label_store\n label_store.save(labels)\n\n if self.config.debug and self.config.predict_debug_uri:\n self.save_debug_predict_image(\n scene, self.config.predict_debug_uri)", "def predict_1(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n test_prediction = trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_loss, test_accuracy = trained_model.evaluate(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_loss = \"test_loss: {:.3f}\\n\".format(test_loss)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_loss)\n f.write(msg_test_acc)", "def predict(self, test_inputs, batch_size=None):\n if batch_size is None:\n num_batches = 1\n else:\n num_batches = util.ceil_divide(test_inputs.shape[0], batch_size)\n\n test_inputs = np.array_split(test_inputs, num_batches)\n pred_means = util.init_list(0.0, [num_batches])\n pred_vars = util.init_list(0.0, [num_batches])\n for i in range(num_batches):\n pred_means[i], pred_vars[i] = self.session.run(\n self.predictions, feed_dict={self.test_inputs: test_inputs[i]})\n\n return np.concatenate(pred_means, axis=0), np.concatenate(pred_vars, axis=0)", "def _predict(self, dataset):\n binary_predictions = ProxyClassifier._predict(self, dataset)\n self.ca.estimates = binary_predictions\n predictions = [ {-1: self.__predictneg,\n +1: self.__predictpos}[x] for x in binary_predictions]\n self.ca.predictions = predictions\n return predictions", "def predict(self, epochs): # noqa\n\n # Check that classifier has predict_method (e.g. predict_proba is not\n # always available):\n if not hasattr(self.clf, self.predict_method):\n raise NotImplementedError('%s does not have \"%s\"' % (\n self.clf, self.predict_method))\n\n # Check that at least one classifier has been trained\n if not hasattr(self, 'estimators_'):\n raise RuntimeError('Please fit models before trying to predict')\n\n # Check predict mode\n if self.predict_mode not in ['cross-validation', 'mean-prediction']:\n raise ValueError('predict_mode must be a str, \"mean-prediction\" '\n 'or \"cross-validation\"')\n\n # Check that training cv and predicting cv match\n if self.predict_mode == 'cross-validation':\n n_est_cv = [len(estimator) for estimator in self.estimators_]\n heterogeneous_cv = len(set(n_est_cv)) != 1\n mismatch_cv = n_est_cv[0] != len(self._cv_splits)\n mismatch_y = len(self.y_train_) != len(epochs)\n if heterogeneous_cv or mismatch_cv or mismatch_y:\n raise ValueError(\n 'When predict_mode = \"cross-validation\", the training '\n 'and predicting cv schemes must be identical.')\n\n # Clean attributes\n for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:\n if hasattr(self, att):\n delattr(self, att)\n _warn_once.clear() # reset self-baked warning tracker\n\n X, y, _ = _check_epochs_input(epochs, None, self.picks_)\n\n if not np.all([len(test) for train, test in self._cv_splits]):\n warn('Some folds do not have any test epochs.')\n\n # Define testing sliding window\n if self.test_times == 'diagonal':\n test_times = _DecodingTime()\n test_times['slices'] = [[s] for s in self.train_times_['slices']]\n test_times['times'] = [[s] for s in self.train_times_['times']]\n elif isinstance(self.test_times, dict):\n test_times = copy.deepcopy(self.test_times)\n else:\n raise ValueError('test_times must be a dict or \"diagonal\"')\n\n if 'slices' not in test_times:\n if 'length' not in self.train_times_.keys():\n ValueError('Need test_times[\"slices\"] with adhoc train_times.')\n # Check that same number of time sample in testing than in training\n # (otherwise it won 't be the same number of features')\n test_times['length'] = test_times.get('length',\n self.train_times_['length'])\n # Make a sliding window for each training time.\n slices_list = list()\n for _ in range(len(self.train_times_['slices'])):\n test_times_ = _sliding_window(epochs.times, test_times,\n epochs.info['sfreq'])\n slices_list += [test_times_['slices']]\n test_times = test_times_\n test_times['slices'] = slices_list\n test_times['times'] = [_set_window_time(test, epochs.times)\n for test in test_times['slices']]\n\n for train, tests in zip(self.train_times_['slices'],\n test_times['slices']):\n # The user may define irregular timing. We thus need to ensure\n # that the dimensionality of each estimator (i.e. training\n # time) corresponds to the dimensionality of each testing time)\n if not np.all([len(test) == len(train) for test in tests]):\n raise ValueError('train_times and test_times must '\n 'have identical lengths')\n\n # Store all testing times parameters\n self.test_times_ = test_times\n\n n_orig_epochs, _, n_times = X.shape\n\n # Subselects the to-be-predicted epochs so as to manipulate a\n # contiguous array X by using slices rather than indices.\n test_epochs = []\n if self.predict_mode == 'cross-validation':\n test_idxs = [ii for train, test in self._cv_splits for ii in test]\n start = 0\n for _, test in self._cv_splits:\n n_test_epochs = len(test)\n stop = start + n_test_epochs\n test_epochs.append(slice(start, stop, 1))\n start += n_test_epochs\n X = X[test_idxs]\n\n # Prepare parallel predictions across testing time points\n # FIXME Note that this means that TimeDecoding.predict isn't parallel\n parallel, p_func, n_jobs = parallel_func(_predict_slices, self.n_jobs)\n n_test_slice = max(len(sl) for sl in self.test_times_['slices'])\n # Loop across estimators (i.e. training times)\n n_chunks = min(n_test_slice, n_jobs)\n chunks = [np.array_split(slices, n_chunks)\n for slices in self.test_times_['slices']]\n chunks = map(list, zip(*chunks))\n\n # To minimize memory during parallelization, we apply some chunking\n y_pred = parallel(p_func(\n estimators=self.estimators_, cv_splits=self._cv_splits,\n predict_mode=self.predict_mode, predict_method=self.predict_method,\n n_orig_epochs=n_orig_epochs, test_epochs=test_epochs,\n **dict(zip(['X', 'train_times'], _chunk_data(X, chunk))))\n for chunk in chunks)\n\n # Concatenate chunks across test time dimension.\n n_tests = [len(sl) for sl in self.test_times_['slices']]\n if len(set(n_tests)) == 1: # does GAT deal with a regular array/matrix\n self.y_pred_ = np.concatenate(y_pred, axis=1)\n else:\n # Non regular testing times, y_pred is an array of arrays with\n # different lengths.\n # FIXME: should do this with numpy operators only\n self.y_pred_ = [[test for chunk in train for test in chunk]\n for train in map(list, zip(*y_pred))]\n return self.y_pred_", "def prediction(input_path=INPUT_DIR,\n output_path=OUTPUT_DIR,\n model_path=MODEL_PATH,\n test=False):\n\n X = tf.placeholder(shape=[None, chunk_size, chunk_size], dtype=tf.float32, name='input_area')\n y_inter = deepcn.deepcn(X, chunk_size, False)\n y_pred = tf.cast(tf.argmax(tf.squeeze(y_inter), -1), tf.uint8)\n\n img_ids = []\n for name in os.listdir(input_path):\n if os.path.isdir(os.path.join(input_path, name)):\n img_ids.append(name)\n all_preds = np.zeros((len(img_ids), 256, 256))\n print('num of images: ', len(img_ids))\n\n loader = tf.train.Saver()\n\n with tf.Session() as sess:\n print(\"Import model from: %s\" %model_path)\n loader.restore(sess, model_path)\n # sess.run(tf.global_variables_initializer())\n\n batch_start_pos = 0\n while batch_start_pos < len(img_ids):\n batch_size = 100\n batch_end_pos = min(batch_start_pos + batch_size, len(img_ids))\n print('predict from %s, to %s' % (batch_start_pos, batch_end_pos))\n batch = img_ids[batch_start_pos:batch_end_pos]\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=batch)\n input_arr = pw.ResizedTestData()\n print(\"input_arr.shape: \", input_arr.shape)\n # input test_data_batch, output prediction of shape batch_size * 256 * 256\n pred_arr = sess.run(y_pred, feed_dict={X: input_arr})\n print(\"pred_arr.shape: \", pred_arr.shape)\n all_preds[batch_start_pos:batch_end_pos] = pred_arr\n pw.OutputPrediction(pred_arr*100, path=output_path)\n batch_start_pos = batch_end_pos\n\n # Use all img_ids and all_preds to generate single cell split csv file\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=img_ids)\n pw.GenerateSubmit(all_preds, output_path, cutoff=0.5)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n # print len(data[0])\n # print type(data[0])\n # print data.shape\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, test_set, test_labels):\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def predict(self, dataset):\n # TODO: self.model(training=False)\n # logging.info('Predicting')\n # if self.verbosity > 1:\n # print('Predicting')\n dataset = rdata.data2dataset(dataset) # Convert to dataset\n assert dataset.get_dim_input() == self.n_inputs, \\\n 'Number of covariates does not match the model %d -> %d' % (dataset.get_dim_input(), self.n_inputs)\n n_data = dataset.get_n_data()\n\n pred = self._predict(dataset=dataset) # Predict\n\n if self.isprobabilistic():\n assert pred[0].shape == (n_data, self.n_outputs)\n assert pred[1].shape == (n_data, self.n_outputs)\n else:\n assert pred.shape == (n_data, self.n_outputs)\n return pred", "def get_model_predictions(\n self,\n model: Type[Model],\n start_task_index: int = 0,\n stop_task_index: Optional[int] = None,\n batched: bool = False,\n batch_size: int = _DEFAULT_BATCH_SIZE,\n skip_validation: bool = False,\n ) -> Dict[str, Dict[str, Union[str, float]]]:\n predictions = {}\n if not batched:\n batch_size = None\n n_tasks = (stop_task_index or self.n_tasks) - start_task_index\n with tqdm(total=n_tasks) as pbar:\n if not batched:\n for support_x, support_y, query_x, metadata in self.get_tasks(\n start_task_index=start_task_index,\n stop_task_index=stop_task_index,\n ):\n query_y, scores = _parse_fit_and_predict_result(\n model.fit_and_predict(\n support_x=support_x,\n support_y=support_y,\n target_x=query_x,\n metadata=metadata,\n )\n )\n if not skip_validation:\n validate(query_y, metadata['labels'])\n predictions.update(\n _convert_fit_and_predict_result_to_predictions(\n query_y=query_y,\n scores=scores,\n query_question_ids=metadata['query_question_ids']\n )\n )\n pbar.update(1)\n else:\n for batch in grouper(\n batch_size,\n self.get_tasks(\n start_task_index=start_task_index,\n stop_task_index=stop_task_index,\n )\n ):\n support_x, support_y, query_x, metadata = zip(*(b for b in batch if b is not None))\n n_tasks_in_batch = len(support_x)\n query_y, scores = _parse_fit_and_predict_result(\n model.fit_and_predict(\n support_x=support_x,\n support_y=support_y,\n target_x=query_x,\n metadata=metadata,\n )\n )\n try:\n query_y = flatten(query_y)\n scores = flatten(scores) if scores is not None else None\n except TypeError:\n # Already flattened\n pass\n query_question_ids_flat = flatten(m['query_question_ids'] for m in metadata)\n if not skip_validation:\n validate(query_y, metadata['labels'])\n predictions.update(\n _convert_fit_and_predict_result_to_predictions(\n query_y=query_y,\n scores=scores,\n query_question_ids=query_question_ids_flat,\n )\n )\n pbar.update(n_tasks_in_batch)\n return predictions", "def correct_age_predictions(train_preds, train_age, test_preds, test_age):\n lr = LinearRegression()\n\n train_resids = np.array(train_preds - train_age)\n test_resids = np.array(test_preds - test_age)\n\n # fit model\n lr.fit(train_age[:,np.newaxis], train_resids)\n\n # predict test residuals using age\n pred_resid = lr.predict(test_age[:,np.newaxis])\n\n # correct model predictions\n corrected_predictions = test_preds - pred_resid\n\n return corrected_predictions", "def test_fit_predict() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def fit_predict_single_fold(\n self, train: TabularDataset, valid: TabularDataset\n ) -> Tuple[TorchBasedLinearEstimator, np.ndarray]:\n if type(train) is PandasDataset:\n train = train.to_numpy()\n valid = valid.to_numpy()\n\n model = self._infer_params()\n\n model.fit(\n train.data,\n train.target,\n train.weights,\n valid.data,\n valid.target,\n valid.weights,\n )\n\n val_pred = model.predict(valid.data)\n\n return model, val_pred", "def run(self, orig_target_df):\n\n # For each fold\n for fold_idx, (fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df) in enumerate(self._generate_validation_fold()):\n train_test_date_split = fold_training_set_df[\"date\"].max()\n eval_start_date = train_test_date_split - timedelta(days = self.test_nb_days)\n date_to_predict = train_test_date_split + timedelta(days = 1)\n print(\"Warning: date_to_predict offset should be computed dynamically. Currently fixed to 1.\")\n\n # For each prediction method\n for process, process_name in zip(self.process_lst, self.process_names_lst):\n print(\"Running validation for process:\", process_name, \"on fold:\", fold_idx, \"...\")\n\n # Train the model\n with open(self.data_cache_path_str + \"data_bkp.pkl\", \"wb\") as f:\n pickle.dump((fold_training_set_df, fold_testing_set_df, fold_target_df, fold_truth_df), f)\n\n y_train = fold_target_df[\"demand\"].reset_index(drop = True)\n model = process(train_test_date_split, eval_start_date)\n model.fit(fold_training_set_df, y_train)\n\n # Generate predictions for validation set\n preds = model.predict(fold_testing_set_df, date_to_predict)\n preds[\"demand\"] = (orig_target_df[\"shifted_demand\"] + preds[\"demand\"]).apply(np.expm1)\n\n # Score the predictions\n preds2 = preds.copy()\n preds2.columns = [\"id\", \"date\", \"preds\"]\n preds_rmse_by_date_df = preds2.merge(fold_truth_df, how = \"left\", on = [\"id\", \"date\"])\n preds_rmse_by_date_df = preds_rmse_by_date_df[[\"date\", \"preds\", \"demand\"]].groupby(\"date\").apply(lambda x: self._rmse(x[\"demand\"], x[\"preds\"])).reset_index()\n preds_rmse_by_date_df.columns = [\"date\", \"preds_rmse\"]\n\n best_preds_piv = preds[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv = fold_truth_df[[\"id\", \"date\", \"demand\"]].pivot(index = \"id\", columns = \"date\", values = \"demand\").reset_index()\n truth_piv.set_index(\"id\", inplace = True)\n best_preds_piv.set_index(\"id\", inplace = True)\n best_preds_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n truth_piv.columns = [\"F\" + str(i) for i in range(1, 29)]\n validation_WRMSSE = round(model.evaluator.wrmsse(best_preds_piv, truth_piv, score_only = True), 6)\n\n # Save result for later use\n self.scores[process_name].append((fold_idx, preds_rmse_by_date_df, validation_WRMSSE))\n \n if self.verbose == True: \n print(process_name, \"had a score of\", validation_WRMSSE, \"on validation period\", fold_testing_set_df[\"date\"].min(), \"to\", fold_testing_set_df[\"date\"].max())\n\n metrics_lst = []\n for process_name, content in self.scores.items():\n for fold_idx, preds_rmse_by_date_df, validation_WRMSSE in content:\n preds_rmse_by_date_df[\"process_name\"] = process_name\n preds_rmse_by_date_df[\"fold_idx\"] = fold_idx\n preds_rmse_by_date_df[\"WRMSSE\"] = validation_WRMSSE\n metrics_lst.append(preds_rmse_by_date_df)\n\n metrics_df = pd.concat(metrics_lst, axis = 0)\n metrics_df.set_index(\"date\", inplace = True)\n\n return metrics_df", "def __train_and_predict(self, X_train, y, X_test):\n self.model.fit(X_train, y, eval_metric='auc')\n prediction_probs = self.model.predict_proba(X_train)[:, 1]\n print \"Training auc = %f\" % roc_auc_score(y, prediction_probs)\n self.__write_csv(prediction_probs,\n X_train.shape[0], self.train_out_file)\n\n prediction_probs = self.model.predict_proba(X_test)[:, 1]\n self.__write_csv(prediction_probs,\n X_test.shape[0], self.test_out_file)\n\n self.feature_imp()", "def train(\n self, training_data: Dataset, validation_data: Optional[Dataset] = None\n ) -> Predictor:\n raise NotImplementedError", "def predict_4(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_accuracy = trained_model.score(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_acc)", "def train_predict(model_list,X_train, X_test, y_train, y_test):\n P = np.zeros((y_test.shape[0], len(model_list)))\n P = pd.DataFrame(P)\n\n print(\"Fitting models.\")\n cols = list()\n for i, (name, m) in enumerate(models.items()):\n print(\"%s...\" % name, end=\" \", flush=False)\n m.fit(X_train, y_train)\n P.iloc[:, i] = m.predict_proba(X_test)[:, 1]\n cols.append(name)\n print(\"done\")\n\n P.columns = cols\n print(\"Done.\\n\")\n return P", "def generate_predictions(inputs, model, tokenizer):\n # Generate model results\n outputs = model(**inputs)\n\n # Convert logit outputs into predictions for table cells and aggregation operators\n predicted_table_cell_coords, predicted_aggregation_operators = tokenizer.convert_logits_to_predictions(\n inputs,\n outputs.logits.detach(),\n outputs.logits_aggregation.detach()\n )\n\n print(predicted_table_cell_coords)\n print(predicted_aggregation_operators)\n\n # Return values\n return predicted_table_cell_coords, predicted_aggregation_operators", "def test_predict(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n true, predictions = model.predict('test')\n expected_size = ((dataset.num_examples('test') //\n model.batch_size) * model.batch_size)\n self.assertEqual(true.shape[0], expected_size)\n self.assertEqual(true.shape, predictions.shape)", "def predict_2(trained_model, X_test, y_test):\n # Predict with test data\n start_time = timeit.default_timer()\n test_prediction = trained_model.predict(X_test)\n end_time = timeit.default_timer()\n time = end_time - start_time\n speed = int(X_test.shape[0] / time)\n \n # Get loss and accuracy\n test_loss, test_accuracy = trained_model.evaluate(X_test, y_test)\n \n # Prepare results messages\n msg_time = \"prediction time: {:.3f}s ({}px/s)\\n\".format(time, speed)\n msg_test_loss = \"test_loss: {:.3f}\\n\".format(test_loss)\n msg_test_acc = \"test_accuracy: {:.3f}\\n\\n\".format(test_accuracy)\n \n # Write results messages\n with open(OUTPUT_FILE, 'a') as f:\n f.write(msg_time)\n f.write(msg_test_loss)\n f.write(msg_test_acc)", "def fit_predict(self, train_x: pd.DataFrame, train_y: pd.Series, test_x: pd.DataFrame, test_y: pd.Series) -> dict:\n self.evaluator.fit(train_x, train_y, test_x, test_y)\n predictions = self.evaluator.predict(test_x)\n print(predictions)\n metrics = metrics_stat(predictions, test_y)\n return metrics", "def predict(model, dataset_info, args):\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n fill_pred_op_info(dataset_info, model, args, model_info)\n # fill_topic_op(args, model_info)\n\n str_ = 'Predictions of the given text data of dataset %s using different ' \\\n 'saved models:' % args.predict_dataset\n labels = [str(i) for i in dataset_info[args.predict_dataset]['labels']]\n if len(labels) == 2 or args.task == 'regression':\n # TODO currently just hard code for binary\n header = 'id\\tlabel\\t' + str(1) + '\\n'\n else:\n header = 'id\\tlabel\\t' + '\\t'.join(labels) + '\\n'\n\n saver = tf.train.Saver(max_to_keep=100)\n\n model_names = args.datasets\n if len(args.datasets) > 1:\n model_names.append('MULT')\n\n for model_name in model_names:\n # load the saved best model\n str_ += '\\nUsing the model that performs the best on (%s)\\n' % model_name\n\n output = header\n str_ += header\n\n data = []\n\n with tf.Session() as sess:\n if model_name == 'MULT':\n checkpoint_path = os.path.join(args.checkpoint_dir, 'MULT',\n 'model')\n else:\n checkpoint_path = model_info[model_name]['checkpoint_path']\n\n saver.restore(sess, checkpoint_path)\n\n dataset_name = args.predict_dataset\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n _pred_op = model_info[dataset_name]['pred_pred_op']\n _pred_iter = model_info[dataset_name]['pred_iter']\n _ids, _predictions, _scores = get_all_pred_res(sess, _pred_op,\n _pred_iter, args)\n\n for id, pred, score in zip(_ids, _predictions, _scores):\n record = {\n 'id': id,\n 'label': pred\n }\n if args.task == 'classification':\n for l, s in zip(labels, score):\n record[str(l)] = s\n else:\n record['score'] = score[0]\n data.append(record)\n\n # output positive score for binary classification\n\n if len(score) == 2:\n score = str(score[1])\n else:\n score = '\\t'.join([str(i) for i in score])\n str_ += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n output += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n\n make_dir(args.predict_output_folder)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.tsv',\n 'w') as file:\n # for i in _predictions:\n # file.write(str(i))\n file.write(output)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.json',\n 'wt') as file:\n json.dump(data, file, ensure_ascii=False)\n\n logging.info(str_)", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def load_and_predict(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n\n path = filedialog.askopenfilename()\n with open(path, 'rb') as file:\n Trainer.model = pickle.load(file)\n\n scale = DataLoader.data['out'].max() - DataLoader.data['out'].min()\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaler.fit(DataLoader.data)\n data_scaled = pd.DataFrame(scaler.transform(DataLoader.data), columns=DataLoader.data.columns)\n\n Trainer.y_pred = batch_predict(Trainer.model, data_scaled.drop(columns=['out']))\n Trainer.y_true = data_scaled['out']\n\n self.test_rmse = scale * math.sqrt(mean_squared_error(Trainer.y_pred, Trainer.y_true))\n print(self.test_rmse)\n self.r_squared = np.corrcoef(Trainer.y_pred * scale, data_scaled['out'] * scale)[0, 1] ** 2\n print(self.r_squared)\n\n models = Trainer.model.get_models()\n param_string = f'Component Function Trained Parameters:\\n'\n for i in range(len(models)):\n param_string += \"length scale: {:.4f}\".format(models[i].kernel_.k1.length_scale) + ' ' + \\\n \"noise level: {:.4e}\".format(models[i].kernel_.k2.noise_level) + '\\n'\n param_string += f'\\nRMSE on the test set: {self.test_rmse}\\n'\n param_string += f'R^2 value on the test set: {self.r_squared}'\n display_params = ttk.Label(self, text=param_string, width=40)\n display_params.grid(row=24 + 7, column=0, columnspan=2, sticky=tk.W + tk.E)", "def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict", "def predictions(self, model):\n return get_predictions_from_df(\n model=model, df=self.prediction_df,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n )", "def predict(self, inputs):\n return self.model.predict(inputs)", "def predict_only(self):", "def _predict(cls, model, is_log_transformed,\n raw_actual, interpolated_actual,\n training_end=None, seasonal_feature_scoring=None, pred_date=None, order_of_diff=None,\n training_tail=None, ext_training_features=None, pred_len=None, freq=None,\n include_holidays_exog=None):\n\n import numpy as np\n import pandas as pd\n import scipy.stats as st\n from numpy.linalg import LinAlgError\n import math\n\n alpha = cls._sig_level\n alpha_extreme = cls._sig_level_extreme\n\n include_holidays_exog = include_holidays_exog if ext_training_features else 0\n\n index = pd.date_range(start=training_end, end=pred_date, freq=freq)[1:] # Holidays are always daily.\n\n de_obj = DataExploration()\n pred_exog = de_obj._get_exog_data(pred_date, pred_date, index) if include_holidays_exog else None\n\n if pred_exog is not None and set(pred_exog.columns.values) != set(ext_training_features):\n missing_col_list = list(set(ext_training_features) - set(pred_exog.columns.values))\n common_cols = list(set(ext_training_features).intersection(set(pred_exog.columns.values)))\n temp_df = pred_exog[common_cols]\n missing_feat_df = pd.DataFrame(np.zeros([len(pred_exog), len(missing_col_list)]),\n columns=missing_col_list, index=pred_exog.index.values)\n pred_exog = pd.concat([temp_df, missing_feat_df], axis=1)\n pred_exog = pred_exog[ext_training_features]\n\n freq = \"1\" + freq if not any(char.isdigit() for char in freq) else freq\n\n forecast_ndays = int((pred_date - pd.Timestamp(training_end)) / pd.Timedelta(freq))\n model_freshness = forecast_ndays / float(pred_len)\n\n try:\n if forecast_ndays > pred_len:\n raise ValueError('Current trained model object expired')\n\n float_min = 1e-10\n\n # set exogenous (holiday) variables for input data\n if include_holidays_exog:\n pred_exog = pred_exog.loc[pd.Timestamp(training_end) + pd.Timedelta(freq): pred_date]\n else:\n pred_exog = None\n\n if seasonal_feature_scoring:\n if not include_holidays_exog:\n pred_exog = seasonal_feature_scoring[:forecast_ndays]\n else:\n pred_exog['fourier_feature'] = seasonal_feature_scoring[:forecast_ndays]\n\n forecast = list(model.forecast(steps=forecast_ndays, alpha=alpha, exog=pred_exog))\n interpolated_training_data = list(zip(*training_tail))[1]\n\n for order in list(reversed(range(order_of_diff))):\n training_data_diff = np.diff(interpolated_training_data,\n order) if order > 0 else interpolated_training_data\n\n forecast_diff_mean = [training_data_diff[-1]]\n forecast_diff_ci = []\n\n for i in range(forecast_ndays):\n forecast_diff_mean.append(forecast_diff_mean[-1] + forecast[0][i])\n forecast_diff_ci.append([forecast_diff_mean[-1] -\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i]),\n forecast_diff_mean[-1] +\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i])])\n forecast[0] = forecast_diff_mean[1:]\n forecast[2] = forecast_diff_ci\n\n if is_log_transformed:\n transformed_back_forecast = np.exp(forecast[0][-1] + ((forecast[1][-1] ** 2) / 2.0)) - 1\n transformed_back_std_err = np.sqrt((np.exp(forecast[1][-1] ** 2) - 1) * (np.exp((2 * forecast[0][-1]) +\n (forecast[1][\n -1] ** 2))))\n transformed_back_CILower = transformed_back_forecast - \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_CIUpper = transformed_back_forecast + \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_interpolated_actual = float(np.exp(interpolated_actual) - 1)\n if np.sum(np.isnan(forecast[0][-1])) or np.isnan(forecast[1][-1]):\n raise ValueError('Predicted null value')\n\n if is_log_transformed:\n zscore = (transformed_back_interpolated_actual -\n transformed_back_forecast) / max(float(transformed_back_std_err), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(transformed_back_CILower) \\\n or math.isnan(transformed_back_CIUpper):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN under log transform')\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'LogTransformedAdjustedActual': interpolated_actual,\n 'LogTransformedPrediction': float(forecast[0][-1]),\n 'LogTransformedStdErr': float(forecast[1][-1]),\n 'LogTransformedCILower': float(forecast[2][-1][0]),\n 'LogTransformedCIUpper': float(forecast[2][-1][1]),\n 'AdjustedActual': transformed_back_interpolated_actual,\n 'Prediction': float(transformed_back_forecast) if not float(\n transformed_back_forecast) == float('inf') else 0.0,\n 'StdErr': float(transformed_back_std_err) if not float(\n transformed_back_std_err) == float('inf') else 0.0,\n 'CILower': float(transformed_back_CILower) if not float(\n transformed_back_CILower) == float('-inf') else 0.0,\n 'CIUpper': float(transformed_back_CIUpper) if not float(\n transformed_back_CIUpper) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n else:\n zscore = (interpolated_actual - forecast[0][-1]) / max(float(forecast[1][-1]), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(forecast[2][-1][0]) or math.isnan(forecast[2][-1][1]):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN')\n\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'AdjustedActual': interpolated_actual,\n 'Prediction': float(forecast[0][-1]) if not float(\n forecast[0][-1]) == float('inf') else 0.0,\n 'StdErr': float(forecast[1][-1]) if not float(\n forecast[1][-1]) == float('inf') else 0.0,\n 'CILower': float(forecast[2][-1][0]) if not float(\n forecast[2][-1][0]) == float('-inf') else 0.0,\n 'CIUpper': float(forecast[2][-1][1]) if not float(\n forecast[2][-1][1]) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n except (LinAlgError, ValueError, LADStructuralError) as e:\n result = {'Success': False,\n 'AdjustedActual': interpolated_actual,\n 'ErrorMessage': str(e)}\n\n return result", "def prediction(self, test_path, dest_path):\n logger.info(f\"prediction on files from {test_path}\")\n\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n save_path = data_functions.create_path(dest_path, self.train_time)\n save_path = data_functions.create_path(save_path, 'raw_pred')\n logger.info(f\"saving predictions to {save_path}\")\n\n test_gen = self.test_generator(test_path)\n for img, img_entry, orig_shape in test_gen:\n logger.info(f\"getting prediction for {img_entry.name}\")\n pred_raw = self.model.predict(img, batch_size=1)[0]\n pred_raw_resized = cv2.resize(pred_raw, orig_shape)\n\n file_name = img_entry.name.rsplit('.', 1)[0] + '.npy'\n npy_file_save_path = os.path.join(save_path, file_name)\n np.save(npy_file_save_path, pred_raw_resized, allow_pickle=True)\n\n pred_image = (255 * pred_raw_resized).astype(np.uint8)\n cv2.imwrite(os.path.join(save_path, img_entry.name), pred_image)\n\n return save_path", "def predict(data, model_predict):\n # Execute any steps you need to do before scoring\n\n # This method makes predictions against the raw, deserialized model\n #predictions = model_predict(data)\n\n data.to_csv(\"/opt/code/chemprop_folder/for_scoring.csv\", index=False)\n\n args = PredictArgs().parse_args([\n '--test_path', '/opt/chemprop_folder/for_scoring.csv',\n '--checkpoint_path', '/opt/code/model.pth',\n '--preds_path', '/opt/chemprop_folder/preds.csv'\n ])\n\n make_predictions(args)\n\n preds_df = pds.read_csv(\"/opt/chemprop_folder/preds.csv\")\n sh = str(preds_df.shape)\n print(sh)\n\n preds_df = preds_df.rename(columns = {\"p_np\": \"positive_class_label\"})\n preds_df = preds_df.drop(columns=['smiles'])\n preds_df[\"negative_class_label\"] = 1 - preds_df[\"positive_class_label\"]\n\n print(preds_df.head())\n\n # Execute any steps you need to do after scoring\n # Note: To properly send predictions back to DataRobot, the returned DataFrame should contain a\n # column for each output label for classification or a single value column for regression\n return preds_df", "def fit_predict_score(self, train_reviews: List[ParsedText],\n test_reviews: List[ParsedText], test_reviews_pred: List[ParsedText],\n **kwargs) -> List[ParsedText]:\n\n self.fit(train_texts=train_reviews, val_texts=test_reviews, **kwargs)\n test_reviews_pred = self.predict(test_reviews_pred)\n logging.info(f'Score: {self.score(texts=test_reviews, texts_pred=test_reviews_pred)}')\n return test_reviews_pred", "def predict_single_fold(self, model: TorchBasedLinearEstimator, dataset: TabularDataset) -> np.ndarray:\n pred = model.predict(dataset.data)\n\n return pred", "def predict(self, testx=None):\n if self.best_model is None:\n raise Exception(\"Train a model first\")\n\n if testx is None:\n testx = self.test_X\n\n return self._predict(testx)", "def predict(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict(data, version)\n return self.model_service.predict(data, version)", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def _predict(self, testX):\n pass", "def eval_epoch(self, final=False, save_predictions=False):\n t1 = time()\n output = {'tp': [], 'fp': [], 'fn': [], 'tn': [], 'loss': [], 'preds': [],'truth': [], 'true': 0,'true_sep':np.zeros(self.rel_size)}\n test_info = []\n test_result = []\n self.model.eval()\n test_iter = self.iterator(self.data['test'], batch_size=self.params['batch'], shuffle_=False)\n # preds=[]\n # truths=[]\n for batch_idx, batch in enumerate(test_iter):\n batch = self.convert_batch(batch, istrain=False, save=True)\n\n with torch.no_grad():\n loss, stats, predictions, select, pred_pairs, multi_truths, mask, _ = self.model(\n batch) # pred_pairs <#pair, relations_num>\n pred_pairs = torch.sigmoid(pred_pairs)\n\n output['loss'] += [loss.item()]\n output['tp'] += [stats['tp'].to('cpu').data.numpy()]\n output['fp'] += [stats['fp'].to('cpu').data.numpy()]\n output['fn'] += [stats['fn'].to('cpu').data.numpy()]\n output['tn'] += [stats['tn'].to('cpu').data.numpy()]\n output['preds'] += [predictions.to('cpu').data.numpy()]\n # preds.extend(predictions.to('cpu').data.numpy())\n # truths.extend(truth.to('cpu').data.numpy())\n\n if True:\n test_infos = batch['info'][select[0].to('cpu').data.numpy(),\n select[1].to('cpu').data.numpy(),\n select[2].to('cpu').data.numpy()][mask.to('cpu').data.numpy()]\n test_info += [test_infos]\n\n pred_pairs = pred_pairs.data.cpu().numpy()\n multi_truths = multi_truths.data.cpu().numpy()\n output['true'] += multi_truths.sum() - multi_truths[:, self.loader.label2ignore].sum()\n output['true_sep'] = output['true_sep'] +multi_truths.sum(axis=0)\n if save_predictions:\n assert test_infos.shape[0] == len(pred_pairs), print(\n \"test info=%d, pred_pair=%d\" % (len(test_infos.shape[0]), len(pred_pairs)))\n for pair_id in range(len(pred_pairs)):\n multi_truth = multi_truths[pair_id] #第pair_id个实体对的true\n for r in range(0, self.rel_size):\n if r == self.loader.label2ignore:\n continue\n\n test_result.append((int(multi_truth[r]) == 1, float(pred_pairs[pair_id][r]),\n test_infos[pair_id]['intrain'],test_infos[pair_id]['cross'], self.loader.index2rel[r], r,\n len(test_info) - 1, pair_id))\n\n\n # estimate performance\n total_loss, scores = self.performance(output)\n # pairs*rel_size*batch\n test_result.sort(key=lambda x: x[1], reverse=True)\n\n input_theta, w, f1,p,r,scores_class = self.tune_f1_theta(test_result, output['true'],output['true_sep'], self.params['input_theta'], isTest=save_predictions)\n\n t2 = time()\n if not final:\n self.test_res['loss'] += [total_loss]\n # self.test_res['score'] += [scores[self.primary_metric]]\n self.test_res['score'] += [f1]\n self.test_res['p'] = p\n self.test_res['r'] = r\n print(' TEST | LOSS = {:.05f}, '.format(total_loss), end=\"\")\n print_results(scores, scores_class, self.show_class, t2 - t1)\n # print(\"不同类别:\")\n # t = classification_report(truths, preds,target_names=[\"NA\",\"父母子女\", \"祖孙\", \"兄弟姐妹\", \"叔伯姑舅姨\", \"夫妻\", \"其他亲戚\", \"好友\", \"上下级\", \"师生\", \"合作\", \"情侣\", \"对立\", \"共现\", \"同学\", \"同门\"])\n # print(t)\n\n if save_predictions:\n\n test_result = test_result[: w + 1]\n test_result_pred = []\n test_result_info = []\n for item in test_result:\n test_result_pred.append([(item[-3], item[1])]) #预测的关系是的概率\n test_result_info.append([test_info[item[-2]][item[-1]]])\n assert (item[-3] in test_info[item[-2]][item[-1]]['rel']) == item[0], print(\"item\\n\", item, \"\\n\",\n test_info[item[-2]][\n item[-1]])\n write_errors(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel, type=\"theta\")\n write_preds(test_result_pred, test_result_info, self.preds_file, map_=self.loader.index2rel)\n # f1_score_t=f1_score(truths, preds, average='micro')\n # print(f1, scores['micro_f'], f1_score_t)\n\n return f1, scores['micro_f'],input_theta,p,r", "def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)", "def predict(self, batched_features, model_load_dir=None):\n\n previous_mode = self._mode\n self._mode = 'predict'\n\n if model_load_dir is None:\n model_load_dir = self._save_dir\n logger.info('Model is lodded from {}'.format(model_load_dir))\n\n if not self._is_graph_build:\n logger.info('Initializing the model for prediction...')\n self.compile()\n\n gpu_options = tf.GPUOptions(allow_growth=True)\n sess_config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, log_device_placement=True)\n with tf.Session(config=sess_config) as sess:\n saver = tf.train.Saver()\n logger.info(\"Getting latest checkpoint in {}\".format(model_load_dir))\n last_checkpoint = tf.train.latest_checkpoint(model_load_dir)\n logger.info(\"Attempting to load checkpoint at {}\".format(last_checkpoint))\n saver.restore(sess, last_checkpoint)\n logger.info(\"Successfully loaded {}!\".format(last_checkpoint))\n\n feed_dict = self._get_test_feed_dict(batched_features)\n y_pred = sess.run(self.prediction, feed_dict=feed_dict)\n\n self._mode = previous_mode\n return y_pred", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def predict(self, data):\n return self.result.predict(data)", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def predict(dataset):\n import capsnet\n\n # Load (standardized) input data and associated file names\n test_x, _, names = _load_data(dataset)\n\n # Predict class probabilities for each model (epoch)\n at_preds, sed_preds = [], []\n\n for epoch in _determine_epochs(cfg.prediction_epochs):\n model = _load_model(epoch)\n at_pred, sed_pred = utils.timeit(\n lambda: capsnet.gccaps_predict(test_x, model),\n '[Epoch %d] Predicted class probabilities' % epoch)\n\n at_preds.append(at_pred)\n sed_preds.append(sed_pred)\n\n # Average predictions to give an overall output\n total_at_pred = np.mean(at_preds, axis=0)\n total_sed_pred = np.mean(sed_preds, axis=0)\n\n # Ensure output directory exists and set file path format\n os.makedirs(os.path.dirname(cfg.predictions_path), exist_ok=True)\n predictions_path = cfg.predictions_path.format('%s', dataset.name)\n\n # Save free parameters to disk\n utils.log_parameters({'prediction_epochs': cfg.prediction_epochs},\n os.path.join(os.path.dirname(cfg.predictions_path),\n 'parameters.json'))\n\n # Write predictions to disk\n utils.write_predictions(names, total_at_pred, predictions_path % 'at')\n utils.write_predictions(names, total_sed_pred, predictions_path % 'sed')", "def predict(self, model, context, data):\n pass", "def test_predict(self):\n\n docs = self.docs\n for m in self.models:\n preds = m.predict(docs)\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertEqual(len(preds), len(docs))\n self.assertEqual(preds.dtype, int)\n\n preds = m.predict(docs, output_type=\"probability\")\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertTrue(len(preds) == len(docs))\n s = preds.apply(lambda x: sum(x))\n self.assertTrue((s.apply(lambda x: abs(x - 1)) < 0.000001).all())\n\n # Test predictions when docs have new words\n new_docs = turicreate.SArray([{\"-1,-1\": 3.0, \"0,4\": 5.0, \"0,3\": 2.0}])\n preds = m.predict(new_docs)\n self.assertEqual(len(preds), len(new_docs))\n\n # Test additional burnin. Ideally we could show that things\n # converge as you increase burnin.\n preds_no_burnin = m.predict(docs, output_type=\"probability\", num_burnin=0)\n self.assertEqual(len(preds_no_burnin), len(docs))", "def test_fit_predict() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def train_model():\n return model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels), shuffle='True')", "def predict_model(args):\n print(args)\n\n if args.cuda:\n print(\"=====> use gpu id: '{}'\".format(args.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n if not torch.cuda.is_available():\n raise Exception(\"no GPU found or wrong gpu id, please run without --cuda\")\n\n # build the model\n model = build_model(args.model, num_classes=args.classes)\n\n if args.cuda:\n model = model.cuda() # using GPU for inference\n cudnn.benchmark = True\n\n if not os.path.exists(args.save_seg_dir):\n os.makedirs(args.save_seg_dir)\n\n # load the test set\n datas, testLoader = build_dataset_test(args.dataset, args.num_workers, none_gt=True)\n\n if args.checkpoint:\n if os.path.isfile(args.checkpoint):\n print(\"=====> loading checkpoint '{}'\".format(args.checkpoint))\n checkpoint = torch.load(args.checkpoint)\n model.load_state_dict(checkpoint['model'])\n # model.load_state_dict(convert_state_dict(checkpoint['model']))\n else:\n print(\"=====> no checkpoint found at '{}'\".format(args.checkpoint))\n raise FileNotFoundError(\"no checkpoint found at '{}'\".format(args.checkpoint))\n\n print(\"=====> beginning testing\")\n print(\"test set length: \", len(testLoader))\n predict(args, testLoader, model)", "def make_predict_step(self):\n return self.make_eval_step()", "def predict(self, X_test, **kwargs):\n\n # Normalize inputs\n if self.normalize_input:\n X_, _, _ = zero_mean_unit_var_normalization(X_test, self.X_mean, self.X_std)\n else:\n X_ = X_test\n\n # Sample a number of predictions for each given point\n # Generate mean and variance for each given point from sampled predictions\n\n X_ = torch.Tensor(X_)\n self.model.eval()\n Yt_hat = self.model(X_).data.cpu().numpy()\n\n if self.normalize_output:\n Yt_hat = zero_mean_unit_var_denormalization(Yt_hat, self.y_mean, self.y_std)\n\n logger.debug(f\"Generated final outputs array of shape {Yt_hat.shape}\")\n\n return Yt_hat", "def sequence_predict(self, load_script=False, variant=\"predict\"):\n\n if variant != 'internal':\n # Open an existing model and get the input dataset. \n # Target for historical data are expected if using previous targets as a feature.\n request_data = self._get_model_and_data(ordered_data=True) \n if type(request_data) == list:\n X, y = request_data\n else:\n X = request_data\n else:\n X = self.X_test.copy()\n y = self.y_test.copy()\n\n # Scale the targets and increase stationarity if required\n if variant != 'internal' and self.model.lag_target and (self.model.scale_target or self.model.make_stationary):\n # If using differencing, we retain original y values for inversing the transformation later\n y_orig = y.values.ravel() if self.model.make_stationary=='difference' else None\n # Apply the transformer to the targets\n y = self.model.target_transformer.transform(y)\n # Drop samples where y cannot be transformed due to insufficient lags\n X = X.iloc[len(X)-len(y):]\n\n # Set the number of periods to be predicted\n prediction_periods = self.model.prediction_periods\n # Set the number of rows required for one prediction\n self.rows_per_pred = 1\n self.diff_lags = max(self.model.stationarity_lags) if self.model.lag_target and self.model.make_stationary=='difference' else 0\n # Set property depending on whether the current sample will be included as an input, or if we only use lag observations for predictions\n self.first_pred_modifier = 1 if self.model.current_sample_as_input else 0 \n\n # Check that the input data includes history to meet any lag calculation requirements\n if self.model.lags:\n # An additional lag observation is needed if previous targets are being added to the features\n self.rows_per_pred = self.model.lags+self.first_pred_modifier+1 if self.model.lag_target else self.model.lags+self.first_pred_modifier\n # If the target is being lagged and made stationary through differencing additional lag periods are required\n if self.model.lag_target and self.model.make_stationary=='difference':\n extra_msg = \" plus an additional {} periods for making the target stationary using differencing\".format(self.diff_lags)\n # For multi-step predictions we only expect lag values, not the current period's values\n # self.rows_per_pred = self.rows_per_pred-1 if prediction_periods > 1 else self.rows_per_pred\n assert len(X) >= self.rows_per_pred + self.diff_lags, \"Insufficient input data as the model requires {} lag periods for each prediction\".format(self.rows_per_pred) + extra_msg\n\n if variant != 'internal':\n # Prepare the response DataFrame\n # Initially set up with the 'model_name' and 'key' columns and the same index as request_df\n self.response = self.request_df.drop(columns=['n_features'])\n \n # Set up a list to contain predictions and probabilities if required\n predictions = []\n get_proba = False\n if variant == 'predict_proba':\n get_proba = True\n probabilities = [] \n\n # Refresh the keras model to avoid tensorflow errors\n if self.model.using_keras:\n self._keras_refresh()\n\n if prediction_periods > 1:\n if not self.model.lag_target:\n y = None\n\n # Check that we can generate 1 or more predictions of prediction_periods each\n n_samples = len(X)\n assert (n_samples - self.rows_per_pred) >= prediction_periods, \\\n \"Cannot generate predictions for {} periods with {} rows, with {} rows required for lag observations. You may need to provide more historical data or sufficient placeholder rows for future periods.\"\\\n .format(prediction_periods, n_samples, self.rows_per_pred)\n \n # For multi-step predictions we can add lag observations up front as we only use actual values\n # i.e. We don't use predicted y values for further predictions \n if self.model.lags or self.model.lag_target:\n X = self._add_lags(X, y=y, extrapolate=self.first_pred_modifier) \n\n # We start generating predictions from the first row as lags will already have been added to each sample\n start = 0\n else:\n # We start generating predictions from the point where we will have sufficient lag observations\n start = self.rows_per_pred\n \n if self.model.lag_target or prediction_periods > 1:\n # Get the predictions by walking forward over the data\n for i in range(start, len(X) + self.first_pred_modifier, prediction_periods): \n # For multi-step predictions we take in self.rows_per_pred rows of X to generate predictions for prediction_periods\n if prediction_periods > 1:\n batch_X = X.iloc[[i]]\n \n if not get_proba:\n # Get the prediction. \n pred = self.model.pipe.predict(batch_X)\n # Flatten the predictions for multi-step outputs and add to the list\n pred = pred.ravel().tolist()\n predictions += pred\n else:\n # Get the predicted probability for each sample \n proba = self.model.pipe.predict_proba(batch_X)\n proba = proba.reshape(-1, len(self.model.pipe.named_steps['estimator'].classes_))\n probabilities += proba.tolist()\n # For walk forward predictions with lag targets we use each prediction as input to the next prediction, with X values avaialble for future periods.\n else:\n batch_X = X.iloc[i-self.rows_per_pred : i] \n # Add lag observations\n batch_y = y.iloc[i-self.rows_per_pred : i]\n batch_X = self._add_lags(batch_X, y=batch_y, extrapolate=self.first_pred_modifier)\n\n # Get the prediction. We only get a prediction for the last sample in the batch, the remaining samples only being used to add lags.\n pred = self.model.pipe.predict(batch_X.iloc[[-1],:])\n\n # Add the prediction to the list. \n predictions.append(pred)\n \n # Add the prediction to y to be used as a lag target for the next prediction\n y.iloc[i - self.first_pred_modifier, 0] = pred\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities.append(self.model.pipe.predict_proba(batch_X.iloc[[-1],:]))\n else:\n # Add lag observations to the samples if required\n if self.model.lags:\n X = self._add_lags(X, extrapolate=self.first_pred_modifier)\n\n # Get prediction for X\n predictions = self.model.pipe.predict(X)\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities = self.model.pipe.predict_proba(X)\n \n # Set the number of placeholders needed in the response\n # These are samples for which predictions were not generated due to insufficient lag periods or for meeting multi-step prediction period requirements\n self.placeholders = self.rows_per_pred + self.diff_lags - self.first_pred_modifier\n\n # Transform probabilities to a readable string\n if get_proba:\n # Add the required number of placeholders at the start of the response list\n y = [\"\\x00\"] * self.placeholders\n \n # Truncate multi-step predictions if the (number of samples - self.rows_per_pred) is not a multiple of prediction_periods\n if prediction_periods > 1 and ((n_samples-self.rows_per_pred) % prediction_periods) > 0: \n probabilities = probabilities[:-len(probabilities)+(n_samples-self.rows_per_pred)]\n \n for a in probabilities:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i += 1\n y.append(s[2:])\n\n # Prepare predictions\n else:\n if prediction_periods > 1:\n # Set the value to use for nulls\n null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n\n # Truncate multi-step predictions if the (number of samples - self.placeholders) is not a multiple of prediction_periods\n if (n_samples-self.rows_per_pred) % prediction_periods > 0:\n predictions = predictions[:-len(predictions)+(n_samples-self.rows_per_pred)]\n\n # Add null values at the start of the response list to match the cardinality of the input from Qlik\n y = np.array(([null] * (self.rows_per_pred - self.first_pred_modifier)) + predictions)\n elif self.model.lag_target: \n # Remove actual values for which we did not generate predictions due to insufficient lags\n if is_numeric_dtype(y.iloc[:, 0].dtype):\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = np.NaN\n else:\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = \"\\x00\"\n # Flatten y to the expected 1D shape\n y = y.values.ravel()\n else:\n y = np.array(predictions)\n \n # Inverse transformations on the targets if required \n if variant != 'internal' and (self.model.scale_target or self.model.make_stationary):\n # Take out placeholder values before inverse transform of targets\n null_values = y[:self.rows_per_pred - self.first_pred_modifier] if prediction_periods > 1 or self.model.lag_target else []\n # Add placeholders for samples removed during differencing\n if self.model.make_stationary=='difference':\n null_values = np.append(null_values, np.repeat(null_values[0], self.diff_lags))\n y = y if len(null_values) == 0 else y[-len(predictions):]\n # Add untransformed lag values for differencing if required\n end = self.placeholders\n start = end - self.diff_lags\n y = y if y_orig is None else np.append(y_orig[start : end], y)\n\n # Apply the transformer to the test targets\n y = self.model.target_transformer.inverse_transform(y) \n\n # Remove lags used for making the series stationary in case of differencing\n if self.model.make_stationary == 'difference':\n y = y[self.diff_lags:]\n\n # Replace lags used for making the series stationary with nulls in case of differencing\n # if self.model.make_stationary == 'difference':\n #null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n # y = np.append(np.array([null]*self.diff_lags), y[self.diff_lags:])\n \n # Add back the placeholders for lag values\n if len(null_values) > 0:\n y = np.append(null_values, y)\n \n if variant == 'internal':\n return y\n\n # Add predictions / probabilities to the response\n self.response['result'] = y\n\n # Reindex the response to reset to the original sort order\n self.response = self.response.reindex(self.original_index)\n \n if load_script:\n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def predict(self, instances):\r\n raise NotImplementedError", "def predict(self, num_samples = BATCH_SIZE, display_predictions = True):\n if self.model is None:\n raise Exception(\"Model is empty, either build or load it\")\n\n print(\"==> Prediction on model from\", self.model_dir)\n file_names, file_labels = self.get_sample_files(num_samples)\n self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))\n self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)\n self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)\n\n self.predictions = self.model.predict(self.predict_dataset)\n\n if display_predictions:\n self.display_predictions()", "def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)", "def predict(self, inputs):\n\n return self.model.predict(inputs)", "def predict_and_evaluate(config, workdir, ckpt_path=None):\n logging.info('Starting testing at %s', workdir)\n tf.io.gfile.makedirs(workdir)\n\n rng = jax.random.PRNGKey(config.seed)\n # Build input pipeline.\n rng, data_rng = jax.random.split(rng)\n data_rng = jax.random.fold_in(data_rng, jax.process_index())\n test_ds = []\n for split in config.dataset.test_splits:\n ds = input_pipeline.create_val_dataset(\n config.dataset, split, config.dataset.test_per_device_batch_size,\n config.dataset.test_pad_last_batch)\n test_ds.append(ds)\n\n # Initialize model.\n inputs = train_utils.get_init_inputs(test_ds[0])\n rng, model_rng = jax.random.split(rng)\n predict_config = models.TransformerConfig(**config.model.to_dict())\n predict_config = predict_config.replace(decode=True)\n model = models.Model(predict_config)\n state = train_utils.create_train_state(\n model, config, model_rng, inputs=inputs)\n\n writer = metric_writers.create_default_writer(\n workdir, just_logging=jax.process_index() > 0)\n\n # Set up checkpointing of the model and the input pipeline.\n checkpoint_dir = os.path.join(workdir, 'checkpoints')\n ckpt = checkpoint.MultihostCheckpoint(checkpoint_dir, max_to_keep=3)\n\n logging.info('Testing and evaluating checkpoint %s', ckpt_path)\n try:\n state = ckpt.restore(state, ckpt_path)\n except FileNotFoundError:\n state = ckpt.restore_or_initialize(state)\n step = int(state.step)\n\n p_pred_step = jax.pmap(\n functools.partial(predict_step, config=predict_config),\n axis_name='batch',\n static_broadcasted_argnums=(3,))\n p_init_cache = jax.pmap(\n functools.partial(init_cache, config=predict_config), axis_name='batch')\n\n # Distribute testing.\n state = flax_utils.replicate(state)\n with metric_writers.ensure_flushes(writer):\n test_metrics = {}\n for ds, split in zip(test_ds, config.dataset.test_splits):\n ds_metrics = evaluate_sequence_accuracy(p_pred_step, p_init_cache, state,\n ds, config, split, workdir,\n config.num_test_steps)\n ds_metrics = {f'{k}_{split}': v for k, v in ds_metrics.items()}\n test_metrics.update(ds_metrics)\n writer.write_scalars(step, test_metrics)", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result", "def predict(self, testloader, field=None):\n model_name = str(field).lower()\n\n assert field == HOME or field == AWAY, 'ERROR - model predict: WRONG model name. Give \"home\" or \"away\"'\n\n preds = {}\n\n for i, model in enumerate(self.models):\n if (model_name == HOME):\n # logger.info('> Calling Home Network')\n field_net = model.model.home_network\n elif (model_name == AWAY):\n # logger.info('> Calling Away Network')\n field_net = model.model.away_network\n else:\n raise ValueError('Model - predict: Wrong model name')\n\n model_preds = []\n with torch.no_grad():\n\n for x in testloader:\n x = torch.Tensor(x).to(self.device)\n out = field_net(x)\n\n out = out.squeeze()\n\n model_preds.append(out.item())\n\n preds[i] = model_preds\n\n return preds[i]" ]
[ "0.7163424", "0.69594693", "0.6906497", "0.6859476", "0.67500716", "0.6679552", "0.6597463", "0.654829", "0.65226644", "0.6456303", "0.643875", "0.6437849", "0.6430144", "0.6410656", "0.6405512", "0.6405512", "0.6370887", "0.6341422", "0.63307375", "0.6271287", "0.6262299", "0.6240893", "0.62325644", "0.62218654", "0.62156755", "0.6215209", "0.62057686", "0.619975", "0.6193283", "0.6188175", "0.6182176", "0.6181253", "0.6163572", "0.6162134", "0.6156047", "0.614601", "0.61432296", "0.6138808", "0.6131204", "0.61294997", "0.61189586", "0.6114997", "0.6110694", "0.6075934", "0.6071032", "0.6064739", "0.6062961", "0.60594016", "0.60593134", "0.60550773", "0.6049336", "0.6027752", "0.6022168", "0.6022107", "0.60118306", "0.60088384", "0.6006876", "0.5999732", "0.599522", "0.59908664", "0.5987105", "0.59767514", "0.5975781", "0.5964823", "0.5959551", "0.5958922", "0.5958591", "0.59579647", "0.5948274", "0.59462225", "0.594403", "0.5940711", "0.5935464", "0.5932445", "0.59323466", "0.59292895", "0.5928166", "0.5926356", "0.5925943", "0.59232163", "0.5922889", "0.59179926", "0.59178865", "0.5908666", "0.5906437", "0.58996207", "0.589621", "0.58924586", "0.5890479", "0.5887225", "0.5883389", "0.588304", "0.58798987", "0.58772606", "0.5871457", "0.58656967", "0.5864669", "0.5863566", "0.5863164", "0.5858163" ]
0.6127523
40
Compose the largest number out of a set of integers.
def largest_number(digits): res = "" while digits: max_digit = None for digit in digits: if max_digit is None or \ is_greater_or_equal_than(digit, max_digit): max_digit = digit res += max_digit digits.remove(max_digit) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def largest_int(numbers):\n\n if numbers == []:\n return \n max_int = numbers[0]\n for number in numbers:\n if number > max_int:\n max_int = number\n \n return max_int", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def max_num(num_list):\n\n return max(num_list)", "def find_greatest_number(incoming_list: list):\n return max(incoming_list)", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def find_greatest_number(incoming_list):\n #magiclownumber= none\n #retval= magiclownumber\n #for value in incoming_list:\n #if not retval:\n #retval = value\n # if value> retvale\n #retval= value\n #return retval\n greatest_number = max(incoming_list)\n return greatest_number", "def largestNumber(self, nums): \n def string_comp(item1, item2):\n return 1 if str(item1) + str(item2) < str(item2) + str(item1) else -1\n res_list = sorted(nums, key=cmp_to_key(string_comp))\n\n # Catch edge case where list of 0s will produce \"000..\" instead of a single \"0\"\n if set(res_list) == {0}:\n return \"0\"\n return \"\".join([str(i) for i in res_list])", "def question_24(list_num: int) -> int:\n return max(list_num, key=list_num.count)", "def find_greatest_number(incoming_list):\n retval = max(incoming_list)\n return retval", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n most: int = input[0]\n n: int = 1\n while n < len(input):\n if input[n] > most:\n most = input[n]\n n += 1 \n return most", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def maxNumber(x):\n maxVal = x[0]\n for num in x:\n if maxVal <num:\n maxVal=num\n return maxVal", "def find_greatest_number(incoming_list):\n #return_value = max(incoming_list)\n #return return_value\n\n MAGIC_LOW_NUMBER = None\n retval = MAGIC_LOW_NUMBER\n\n # 1,2,3,4,5,1\n # MAGIC_LOW_NUMBER, 1 ->STORE 1\n #1 , 2 ->STORE 2\n #2, , 3 ->STORE 3\n #3, , 4 ->STORE 4 \n #4, , 5 ->STORE 5\n #5, , 1 ->??? nothing \n for value in incoming_list:\n if not retval:\n retval = value\n if value > retval:\n retval = value", "def max(input: list[int]) -> int:\n i = 0\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n\n else:\n while i < len(input):\n j = i + 1\n while j < len(input):\n if input[i] > input[j]:\n if j == len(input) - 1:\n if input[i] >= input[len(input) - 1]:\n return input[i]\n j += 1\n else:\n j += len(input)\n i += 1\n return input[len(input) - 1]", "def largest_sum(data: Iterator[str]) -> int:\n numbers = parse_input(data)\n return max(n.magnitude for n in possible_sums(numbers))", "def largest_product(digits, size):\n # Why does a blank set of digits have a maximum product of 1?\n slice_list = slices(digits, size)\n def mult_reduce(items):\n total = 1\n for i in items:\n total *= i\n return total\n slice_list = [mult_reduce(l) for l in slice_list]\n return max(slice_list)", "def largest_number(a):\n\n array = new_quick_sort(a)\n\n largest_number = \"\"\n\n for number in array:\n largest_number += str(number)\n\n return largest_number", "def max(self, num_list):\n try:\n max = int(num_list[0])\n\n for number in num_list:\n try:\n if number > max:\n max = number\n except Exception as e:\n print(\"Error\", e)\n\n except Exception as e:\n print(\"Error:\", e)\n\n return max", "def maxi(a, b):\n return max(a, b)", "def greatest_difference(num_list):", "def highest_product(list_of_ints):\n\t# Initialize lists of highest positive integers, # max len 3\n\t#\t\t\t\t lowest positive integers # max len 2\n\t# \t\t\t\t highest negative integers, # max len 2\n\t# lowest negative integers # max len 3\n\t# \t Boolean for zero exists in list\n\thighest_pos = []\n\thighest_neg = []\n\tlowest_neg = []\n\tlowest_pos = []\n\tzero_int_exists = False\n\n\t# Loop thru input list of integers:\n\t# \tBuild lists initialized above, and set zero_int_exists\n\n\tfor i in range(len(list_of_ints)):\n\t\t# Process zero\n\t\tif list_of_ints[i] == 0: \n\t\t\tzero_int_exists = True\n\n\t\t# Process positive integer\n\t\telif list_of_ints[i] > 0:\n\t\t\tif len(highest_pos) < 3:\n\t\t\t\thighest_pos.append(list_of_ints[i])\n\t\t\telif list_of_ints[i] > min(highest_pos):\n\t\t\t\thighest_pos.remove(min(highest_pos))\n\t\t\t\thighest_pos.append(list_of_ints[i])\n\n\t\t\tif len(lowest_pos) < 2:\n\t\t\t\tlowest_pos.append(list_of_ints[i])\n\t\t\telif list_of_ints[i] < max(lowest_pos):\n\t\t\t\tlowest_pos.remove(max(lowest_pos))\n\t\t\t\tlowest_pos.append(list_of_ints[i])\n\t\t# Process negative integer\n\t\telse:\n\t\t\tif len(lowest_neg) < 2:\n\t\t\t\tlowest_neg.append(list_of_ints[i])\n\t\t\telse:\n\t\t\t\tif list_of_ints[i] < max(lowest_neg):\n\t\t\t\t\tlowest_neg.remove(max(lowest_neg))\n\t\t\t\t\tlowest_neg.append(list_of_ints[i])\n\n\t\t\tif len(highest_neg) < 3:\n\t\t\t\thighest_neg.append(list_of_ints[i])\n\t\t\telse:\n\t\t\t\tif list_of_ints[i] > min(highest_neg):\n\t\t\t\t\thighest_neg.remove(min(highest_neg))\n\t\t\t\t\thighest_neg.append(list_of_ints[i]) \n\n\t# Sort lists\n\thighest_pos.sort()\n\tlowest_pos.sort()\n\tlowest_neg.sort()\n\thighest_neg.sort()\n\n\t# Print input list, sub-lists\n\tprint(\"\\n\", list_of_ints)\n\tprint(\"zero_int_exists, highest/lowest pos, highest/lowest neg: \", \n\t\t\"\\n\", zero_int_exists, highest_pos, lowest_pos, highest_neg, lowest_neg)\n\n\t# Build high product candidates list\n\tpossible_high_prods = []\n\n\t# Add positive products to high product candidates list\n\tif len(highest_pos) == 3:\n\t\tpossible_high_prods.append(highest_pos[0] * highest_pos[1] * highest_pos[2])\n\n\tif len(lowest_neg) == 2 and len(highest_pos) >= 1:\n\t\tpossible_high_prods.append(lowest_neg[0] * lowest_neg[1] * highest_pos[-1])\n\n\t# If no high product candidates, append zero and negative products to high product candidates\n\tif len(possible_high_prods) == 0:\n\t\tif zero_int_exists:\n\t\t\tpossible_high_prods.append(0)\n\t\telse:\n\t\t\tif len(lowest_pos) == 0:\n\t\t\t\tpossible_high_prods.append(highest_neg[0] * highest_neg[1] * highest_neg[2])\n\t\t\telse:\n\t\t\t\tpossible_high_prods.append(lowest_pos[0] * lowest_pos[1] * lowest_neg[-1])\n\t\n\tprint(possible_high_prods)\n\treturn max(possible_high_prods)", "def largest(n,xs):\n return sorted(xs, reverse = True)[:n][::-1]", "def find_largest_5_digit_number(digits):\r\n return max(int(digits[i:i + 5]) for i, v in enumerate(digits))", "def max_in_list(list):\n x=list[0] #set x be the first number in the list\n for i in range(0,len(list)):#go over the number in the list\n if x<=list[i]: #if the second one is bigger than the first\n x=list[i] #assign x to the bigger one\n else:\n continue#repeat until find the max number\n return x", "def max_val(t): \n \n result = []\n if isinstance(t, int):\n result.append(t)\n return result\n elif isinstance (t, (tuple,list)):\n def update_result(t):\n for item in t:\n if isinstance(item, (tuple,list)):\n update_result(item)\n else:\n result.append(item)\n update_result(t)\n return max(result)", "def find_largest_number_in_list(self, list_with_numbers):\n return 0", "def maximumProduct1(self, nums: List[int]) -> int:\n s_nums = sorted(nums, reverse=True)\n return max(s_nums[0] * s_nums[1] * s_nums[2], s_nums[0] * s_nums[-1] * s_nums[-2])", "def _max_factor(number, factors):\n return max(n for n in factors if n <= number)", "def maximum(self):\n return max(self.numbers)", "def max_num_in_list(a_list):\n max_number = max(a_list)\n return max_number", "def four():\r\n \r\n i = 999\r\n j = i\r\n largest = 0\r\n \r\n while i > 0:\r\n while j > 0:\r\n number = str(i * j)\r\n forward = str(number)\r\n reverse = \"\"\r\n for char in number:\r\n reverse = char + reverse\r\n if forward == reverse:\r\n if largest < i * j:\r\n largest = i * j\r\n break\r\n else:\r\n j = j - 1\r\n i = i - 1\r\n j = i\r\n return largest", "def r_max(nxs):\n largest = None\n for i,e in enumerate(nxs):\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if i == 0 or val > largest:\n largest = val\n\n return largest", "def get_max(num_one, num_two):\n temp_a = int(str(num_one) + str(num_two))\n temp_b = int(str(num_two) + str(num_one))\n if temp_a >= temp_b:\n return num_one\n else:\n return num_two", "def largest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a > b, a, b)\r\n else:\r\n return max(stack(*args), axis=0)", "def fuction_call(chest):\n\n for i in chest:\n max_i = maximum(chest,i)\n if max_i >= 2:\n print(\"The maximum size of a set Matyoshka Dolls with outermost doll\",i,\"is\",max_i)", "def maximum(iterable):\n max_item = 0\n if type(iterable) == list or type(iterable) == tuple:\n for i in iterable:\n if type(i)==int:\n if max_item<i:\n max_item = i\n else:\n max_item = -999\n break\n else:\n\n max_item=-999\n return max_item", "def findMaxProduct(n):\n large = 0\n for i in range(len(s)):\n p = 1\n number = s[i:i+n]\n for iteration in range(len(number)):\n h = number[iteration]\n p = p * int(h)\n if p > large:\n large = p\n\n \n return large", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def max(scores):\n return __builtin__.max(scores) if len(scores) else 0", "def my_max(in_list):\n biggest = in_list[0]\n for l in in_list:\n if l > biggest:\n biggest = l\n return biggest", "def largestPrimeFactor(number):\n factorlist = primeFactors(number)\n maximumfactor = max(factorlist)\n return maximumfactor", "def largest_non_adjacent_sum(ints):\n # Initialize the \"with\" and \"without\" last value\n w = 0\n wo = 0\n\n for val in ints:\n # Whatever is higher, that's your highest possible without `val`\n next_wo = max(w, wo)\n\n # \"Without last\" added to val is highest non-adjacent sum\n w = wo + val\n\n # Set the next \"Without last\"\n wo = next_wo\n\n # Return the max of thet two\n return max(w, wo)", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == []:\n return None\n elif int_list == None:\n raise ValueError\n max_int = int_list[0]\n \n for i in int_list:\n if i > max_int:\n max_int = i\n return max_int", "def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans", "def highest_product(arr):\n\n product = 1\n\n for i in range(3):\n # find the max value in the list, get the index, pop it, and mulitply\n product *= arr.pop(arr.index(max(arr)))\n\n return product", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None: # error handling\n raise ValueError\n elif len(int_list) == 0: # when the list is empty\n return None\n else:\n max_num = int_list[0]\n for num in int_list:\n if num > max_num:\n max_num = num\n return max_num", "def main():\n\n s = set()\n\n while True:\n n = input('Enter a number: ')\n if n == -99:\n break\n\n s.add(n)\n\n l = list(s)\n\n if len(l) < 2:\n print 'sorry but the list is too small'\n exit(1)\n\n l.sort()\n print 'The second largest number is', l[-2]", "def maximum(some_list):\n return max(some_list)", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def v6_multimax(iterable):\n maximums = []\n for item in iterable:\n if not maximums or maximums[0] == item:\n maximums.append(item)\n elif item > maximums[0]:\n maximums = [item]\n return maximums", "def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)", "def v3_multimax(iterable):\n max_item = None\n for item in iterable:\n if max_item is None or item > max_item:\n max_item = item\n return [\n item\n for item in iterable\n if item == max_item\n ]", "def get_max_num_onsets():\r\n \"\"\" based on the numbers above, should equal to 932945... \"\"\"\r\n c1 = len(gen_onset_c1())\r\n c2 = len(gen_onset_c2())\r\n c3 = len(gen_onset_c3_c4())\r\n c4 = len(gen_onset_c3_c4())\r\n temp = c1\r\n temp = temp + ( c1 * c2 )\r\n temp = temp + ( c1 * c3 )\r\n temp = temp + ( c1 * c2 * c3 )\r\n temp = temp + ( c1 * c3 * c4 )\r\n temp = temp + ( c1 * c2 * c3 * c4 )\r\n return temp", "def get_min_max(ints):\n if len(ints) == 0:\n return None, None\n \n min_e = ints[0]\n max_e = ints[-1]\n for e in ints:\n if isinstance(e, int) == False: # if the list includes non-integer number, do not find min, max \n return None,None\n if e < min_e:\n min_e = e\n if e > max_e:\n max_e = e\n return min_e, max_e", "def max():\n return KeeperOfMinOrMax(int.__lt__)", "def return_max(lst, highest=None):\n if highest is None and len(lst) > 0:\n highest = lst[0]\n if len(lst) <= 1:\n return highest\n highest = max(highest, lst[0])\n return return_max(lst[1:], highest)", "def find_max_tidy_num(s_number):\n\n len_input = len(s_number) - 1\n\n if len_input == 0:\n return s_number\n\n for i in range(0, len_input):\n if int(s_number[i]) > int(s_number[i+1]):\n\n final_str = '9' * (len_input - i)\n s_number = s_number[:(i+1)]\n\n return ''.join([find_max_tidy_num(str(int(s_number)-1)), final_str])\n\n return s_number", "def findMaximal(freqSet):", "def get_min_max(nums):\n assert(type(nums) == list), \"nums has to be a list\"\n assert(len(nums) > 0), \"get_min_max() arg is an empty sequence\"\n min_ = nums[0]\n max_ = nums[0]\n for n in nums:\n assert(type(n) == int), \"numbers in the list have to be an integer\"\n if n < min_:\n min_ = n\n if n > max_:\n max_ = n\n return (min_, max_)", "def maxi(a,b):\n\tif a > b: \n\t\treturn a\n\treturn b", "def get_max_salary(n, numbers):\n temp_n = n\n result = \"\"\n while temp_n != 0:\n max_num = numbers[0]\n temp_index = 0\n for i in range(0, n):\n max_num = get_max(max_num, numbers[i])\n if(max_num == numbers[i]):\n temp_index = i\n result += str(max_num)\n numbers[temp_index] = 0\n temp_n -= 1\n print(result)", "def find_max_sum(triangle):\n while len(triangle) > 1:\n _reduce_triangle(triangle)\n return triangle[0][0]", "def get_max_sum4(a):\n return max(get_max_sum2(a), 0)", "def max_val(t): \n maxVal = False\n \n def helper(obj):\n nonlocal maxVal\n for el in obj:\n if isinstance(el, int):\n if maxVal == False or maxVal < el:\n maxVal = el\n else:\n helper(el)\n \n helper(t)\n return maxVal", "def get_min_max(ints):\n current_max = None\n current_min = None\n\n if (len(ints) == 0) or (ints is None):\n return tuple([current_min, current_max])\n\n for i, n in enumerate(ints):\n if i == 0:\n current_max = n\n current_min = n\n else:\n if n > current_max:\n current_max = n\n elif n < current_min:\n current_min = n\n\n return tuple([current_min, current_max])", "def max_product(s):\n # if s == []:\n # return 1\n # elif s[0] in s[2:]:\n # products = [s[0] ** s.count(s[0])]\n # else:\n # products = [s[0] * max(s[2:])]\n # return max(products)\n if s == []:\n return 1\n # elif len(s) == 1:\n # return s[0]\n else:\n return max(s[0] * max_product(s[2:]), max_product(s[1:]))", "def max_pairwise_product_fast(numbers):\n num_list = numbers.copy()\n max_num_1 = max(num_list)\n num_list.remove(max_num_1)\n max_num_2 = max(num_list)\n ans = max_num_1*max_num_2\n return ans", "def test_max_integer(self):\n self.assertEqual(max_integer([1, 2, 3, 4, 5]), 5)\n self.assertEqual(max_integer([4, 6, 2, 10, 1]), 10)", "def second_largest(values: List[int]) -> int:\n try:\n return sorted(set(values))[-2]\n except IndexError:\n raise ValueError(\"second_largest() needs at least two distinct values\")", "def maxn(a,b):\n\n if a>b:\n return a\n else:\n return b", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def max_list_iter(int_list: Optional[List]) -> Optional[int]:\r\n if int_list is None:\r\n raise ValueError\r\n elif len(int_list) == 0:\r\n return None\r\n elif len(int_list) == 1:\r\n return int_list[0]\r\n else:\r\n maxVal = int_list[0]\r\n for value in int_list:\r\n if value > maxVal:\r\n maxVal = value\r\n return value", "def problem41():\n for i in range(len(PANDIGITAL), 1, -1):\n cur_max = 0\n for p in itertools.permutations(PANDIGITAL[:i]):\n n = int(\"\".join(p))\n if pelib.is_prime(n) and n > cur_max:\n cur_max = n\n\n if cur_max > 0:\n return cur_max", "def v7_multimax(iterable):\n iterable = list(iterable)\n max_item = max(iterable, default=None)\n return [\n item\n for item in iterable\n if item == max_item\n ]", "def largest_item(list):\n pass", "def max_list_iter(int_list): # must use iteration not recursion\n if int_list == None:\n raise ValueError(\"Must be list\")\n else:\n if len(int_list) < 1:\n return None\n max = int_list[0]\n for val in int_list:\n if val > max:\n max = val\n return max", "def get_min_max(ints):\r\n if len(ints) == 0:\r\n return None\r\n max = ints[0]\r\n min = ints[0]\r\n\r\n for int in ints:\r\n if int < min:\r\n min = int\r\n if int > max:\r\n max = int\r\n \r\n return min, max", "def largest_element(a):\n\n return None", "def v0_multimax(iterable):\n max_item = None\n for item in iterable:\n if max_item is None or item > max_item:\n max_item = item\n items = []\n for item in iterable:\n if item == max_item:\n items.append(item)\n return items", "def solution(resources, args):\n largest_prime_factor = 1\n number = args.number\n prime_generator = primes.get_prime_generator()\n\n while number > 1:\n prime = next(prime_generator)\n if number % prime == 0:\n number /= prime\n largest_prime_factor = prime\n\n if largest_prime_factor == 1:\n largest_prime_factor = args.number\n\n return largest_prime_factor", "def max(x):\n pass", "def get_min_max(ints):\n if not ints:\n return\n max = ints[0]\n min = ints[0]\n\n\n for i in ints:\n if i > max:\n max = i\n if i < min:\n min = i\n return (min, max)", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)", "def allmax(iterable, key=None):\n result, maxcal = [], None\n key = key or (lambda x: x)\n for x in iterable:\n xval = key(x)\n if not result or xval > maxval:\n result, maxval = [x], xval\n elif xval == maxval:\n result.append(x)\n if len(result) == 1:\n result = result[0]\n return result", "def get_highest_seat_id(seat_ids):\n\n return max(seat_ids)", "def max3(stdin):\n # return max(map(float, stdin.split()))\n return float(run(\"./max3\", [], stdin)[1])", "def r_max(nxs):\n largest = None\n first_time = True\n for e in nxs:\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if first_time or val > largest:\n largest = val\n first_time = False\n\n return largest", "def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i", "def most_abundant(ids, seqs='ignored'):\r\n id_groups = [len(groups[seq_to_group[i]]) for i in ids]\r\n return ids[argmax(id_groups)]", "def max(self, fn=lambda x: x):\n return _(max(*self._, key=fn))", "def max_list_iter(int_list): # must use iteration not recursion\n max = \"blank\"\n if int_list is None:\n raise ValueError\n elif len(int_list) == 0:\n return None\n for i in int_list:\n if max == \"blank\":\n max = i\n elif i > max:\n max = i\n return max", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def multimax(iterable, key=lambda x: x):\n\n maxes = []\n largest = None\n for element in iterable:\n if largest is None or key(element) > key(largest):\n maxes = [element]\n largest = element\n elif key(element) == key(largest):\n maxes.append(element)\n return maxes", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index" ]
[ "0.7238424", "0.68917364", "0.6735405", "0.66264164", "0.66255736", "0.6614427", "0.66113424", "0.65993583", "0.6570257", "0.6552687", "0.65461093", "0.65238184", "0.64182717", "0.63805205", "0.63039565", "0.6295779", "0.62524813", "0.6228118", "0.6196727", "0.61448497", "0.6130623", "0.6117291", "0.60730374", "0.6063948", "0.6002691", "0.5999068", "0.5992838", "0.5965339", "0.5955259", "0.5923953", "0.5910738", "0.5883439", "0.5850895", "0.584759", "0.5834415", "0.58312804", "0.58267516", "0.5811956", "0.58103985", "0.58023214", "0.57993406", "0.57965", "0.5780224", "0.57713234", "0.57699716", "0.5766857", "0.57666296", "0.5753352", "0.57525694", "0.57521755", "0.57521755", "0.5751405", "0.57511014", "0.5743666", "0.5743331", "0.57398784", "0.5738172", "0.57294744", "0.5718056", "0.5716913", "0.57158893", "0.5708848", "0.5701768", "0.57012486", "0.5687569", "0.56838703", "0.5682839", "0.568168", "0.5664108", "0.5663767", "0.5661589", "0.5657534", "0.5651706", "0.5641533", "0.56404275", "0.563531", "0.5629351", "0.5615578", "0.56028336", "0.5592997", "0.5584714", "0.5572069", "0.55655944", "0.55635446", "0.55583596", "0.5555175", "0.55545044", "0.5552045", "0.55520016", "0.55491394", "0.55469674", "0.5544446", "0.5523525", "0.55170614", "0.5516624", "0.55043536", "0.55043536", "0.54919314", "0.54892516", "0.54879713" ]
0.6804858
2
A function to build the neural network of the required size using the weights and biases provided. Instead of doing this, can we use a simple constructor method and initalize them post the construction? That would be sensible and faster.
def neural_net(self, layers): model = nn.Sequential() for l in range(0, len(layers) - 1): model.add_module("layer_"+str(l), nn.Linear(layers[l],layers[l+1], bias=True)) if l != len(layers) - 2: model.add_module("tanh_"+str(l), nn.Tanh()) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, sizes):\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes):\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n\r\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [cp.array(cp.random.randn(y, 1)) for y in sizes[1:]]\n self.weights = [cp.array(cp.random.randn(y, x))\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes: list):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, input_size, neurons):\n super().__init__()\n self.input_size = input_size\n self.neurons = neurons\n self.params[\"w\"] = np.random.randn(input_size, neurons)\n self.params[\"b\"] = np.random.randn(1, neurons)\n self.grads = {}", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def __init__(self, sizes, cost=CrossEntropyCost):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.default_weight_initializer()\n self.cost=cost", "def __init__(self, sizes):\n self.sizes = sizes\n self.biases = [random_matrix(1, y)[0] for y in sizes[1:]]\n self.weights = [random_matrix(y, x)\n for x, y in zip(sizes[:-1], sizes[1:])]\n\n self.weighted_layer = []\n self.activations = []", "def __init__(self,\n input_size,\n output_size,\n hidden_size=[50, 50],\n weights_initializer=tf.initializers.glorot_uniform(),\n bias_initializer=tf.initializers.zeros(),\n optimizer=tf.optimizers.Adam,\n **optimizer_kwargs):\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n\n np.random.seed(41)\n\n self.initialize_weights(weights_initializer, bias_initializer)\n self.optimizer = optimizer(**optimizer_kwargs)", "def __init__(self, input_size, hidden_sizes, output_size, std=1e-4, bstd=1e-4):\r\n num_hidden_layers = len(hidden_sizes)\r\n \r\n # initialize weight matrices\r\n self.weights = []\r\n if num_hidden_layers > 0:\r\n for i in xrange(num_hidden_layers):\r\n if i == 0:\r\n self.weights.append(std * np.random.randn(input_size, hidden_sizes[0]))\r\n else:\r\n self.weights.append(std * np.random.randn(hidden_sizes[i-1], hidden_sizes[i]))\r\n self.weights.append(std * np.random.randn(hidden_sizes[-1], output_size))\r\n else:\r\n self.weights.append(std * np.random.randn(input_size, output_size))\r\n \r\n # initialize bias vectors\r\n self.biases = []\r\n for i in xrange(num_hidden_layers):\r\n self.biases.append(bstd * np.random.randn(hidden_sizes[i]))\r\n self.biases.append(bstd * np.random.randn(output_size))", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def __init__(self, input_size, output_size, hidden_sizes=[],\r\n learning_rate=2.0, weight_scale=0.1, converge=0.01,\r\n random_seed=None):\r\n if random_seed is not None:\r\n seed(random_seed)\r\n\r\n weight_func = partial(normal, 0, weight_scale)\r\n self.converge = converge\r\n self.layers = []\r\n self.layers.append([InputNode(\"in_\" + str(i)) for i in range(input_size)])\r\n for i,layer_size in enumerate(hidden_sizes):\r\n self.layers.append([SigmoidNode(\"hidden_\"+str(i)+\"-\"+str(j),\r\n learning_rate) for j in range(layer_size)])\r\n self.layers.append([SigmoidNode(\"out_\"+str(i), learning_rate) for i\r\n in range(output_size)])\r\n\r\n # densely connect consecutive layers\r\n for source_layer, dest_layer in zip(self.layers, self.layers[1:]):\r\n for source, dest in product(source_layer, dest_layer):\r\n edge = Edge(source, dest, weight_func)\r\n source.out_edges.append(edge)\r\n dest.in_edges.append(edge)\r\n\r\n # connect each node to bias\r\n self.bias = BiasNode()\r\n for layer in self.layers[1:]:\r\n for node in layer:\r\n e = Edge(self.bias, node, weight_func)\r\n node.in_edges.append(e)\r\n self.bias.out_edges.append(e)", "def __init__(self, layer_sizes, act_func=Sigmoid, cost_func=CrossEntropy, metric=AccuracyMetric):\n np.random.seed(1) # Used for constant weights and biases initialization. Fell free to change it.\n\n self.layers_num = len(layer_sizes)\n self.act_func = act_func\n self.cost_func = cost_func\n self.metric = metric\n self.biases = [np.random.random(i) for i in layer_sizes[1:]]\n self.weights = [np.random.normal(loc=0, scale=(1 / np.sqrt(layer_sizes[0])), size=(j, i))\n for j, i in zip(layer_sizes[1:], layer_sizes[:-1])]\n self.costs = []\n self.accuracies = []\n self.eta = 0\n self.lambda_r = 0", "def __init__(self, num_units, input_size=None,\n nonlinearity=tf.nn.tanh,\n W_init=tf.random_normal_initializer(stddev=0.15),\n b_init=tf.constant_initializer(0.0, dtype=tf.float32),\n weightnorm=False):\n self._num_units = num_units\n self._input_size = input_size or num_units\n self._W_init = W_init\n self._b_init = b_init\n self._weightnorm = weightnorm\n self._nonlin = nonlinearity", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def __init__(\n self,\n input_size: int,\n hidden_sizes: Sequence[int],\n output_size: int,\n num_layers: int,\n optimizer: Type[Optimizer] = SGD(),\n norm_weights: bool = False\n ):\n self.input_size = input_size\n self.hidden_sizes = hidden_sizes\n self.output_size = output_size\n self.num_layers = num_layers\n self.norm_weights = norm_weights\n\n assert len(hidden_sizes) == (num_layers - 1)\n assert num_layers >= 1\n\n activated_layer_sizes = [input_size] + hidden_sizes\n activated_layers = [LinearLayer(n_in, n_out, activation_func=ReLU) for n_in, n_out in zip(activated_layer_sizes, activated_layer_sizes[1:])]\n final_layer = LinearLayer(activated_layer_sizes[-1], self.output_size, activation_func=Softmax)\n self.layers = activated_layers + [final_layer]\n\n self.optimizer = optimizer", "def initialize_weights(self, weights_initializer, bias_initializer):\n wshapes = [\n [self.input_size, self.hidden_size[0]],\n [self.hidden_size[0], self.hidden_size[1]],\n [self.hidden_size[1], self.output_size]\n ]\n\n bshapes = [\n [1, self.hidden_size[0]],\n [1, self.hidden_size[1]],\n [1, self.output_size]\n ]\n\n self.weights = [init_weights(s, weights_initializer) for s in wshapes]\n self.biases = [init_weights(s, bias_initializer) for s in bshapes]\n\n self.trainable_variables = self.weights + self.biases", "def __init__(self, weights, path, trained, asGraph):\n \n _weights = np.asarray(weights)\n\n numLayers = int(_weights.shape[0]/2)\n wghts = []\n biases = []\n\n for i in range(numLayers):\n j = 2*i\n# print(j,(_weights[j].T).shape)\n wghts.append(_weights[j])\n j = 2*i + 1\n# print(j,(_weights[j].T).shape)\n biases.append(_weights[j])\n #enddo\n\n self.numLayers = numLayers\n self.wghts = np.asarray(wghts)\n self.asGraph = asGraph\n self.wghts = wghts\n self.path = path\n self.trained = trained", "def __init__(self, inputSize, hiddenSize, outputSize, epochs = 100, debug = False):\n self.inputSize = inputSize\n self.hiddenSize = hiddenSize\n self.outputSize = outputSize\n self.epochs = epochs\n self.debug = debug\n\n #weights\n self.W1 = np.random.randn(self.inputSize, self.hiddenSize) \n self.W2 = np.random.randn(self.hiddenSize, self.outputSize)", "def __init__(self, inputSize, outputSize, hiddenSize): \n\n self.inputSize = inputSize\n self.outputSize = outputSize\n self.hiddenSize = hiddenSize \n \n # Initialize random weight with range [-0.5, 0.5]\n self.weight = np.matrix(np.random.uniform(-0.5, 0.5, (self.hiddenSize, self.inputSize)))\n\n # Initialize random bias with range [0, 1]\n self.bias = np.matrix(np.random.uniform(0, 1, (1, self.hiddenSize)))\n \n self.H = 0\n self.beta = 0", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):\n\n self.params = {}\n self.params['W1'] = weight_init_std * \\\n np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = weight_init_std * \\\n np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)", "def create_neural_network(mode, layer_sizes, use_stored_weights, weights_path):\n\n\tif verbose: print('model_tensorflow.create_neural_network() called')\n\n\tnum_layers = len(layer_sizes) - 1\n\tweights = {}\n\tbiases = {}\n\n\tif verbose: print('creating a DNN with', str(num_layers-1), 'hidden layers of size', \n\t\t\t\t\t str(layer_sizes[1:len(layer_sizes)-1]))\n\n\t# Initialise the weights\n\t# (a) Create new weights and biases\n\tif not use_stored_weights:\n\t\tfor i in range(num_layers): \n\t\t\t# Layer l has dimensions (|l-1|, |l|) for weights and (|l|) for biases\n\t\t\tw_name = 'W' + str(i+1)\n\t\t\tweights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n\t\t\tb_name = 'b' + str(i+1)\n\t\t\tbiases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\n\t\t# Initialise all existing global variables \n\t\tsess.run(tf.global_variables_initializer())\n\n\t\t# Save weights and biases\n\t\tsaver = tf.train.Saver()\n\t\tsave_path = saver.save(sess, weights_path + 'weights/' + 'init.ckpt') \n\t# (b) Restore existing weights and biases\n\telse:\n\t\tfor i in range(num_layers):\n\t\t\t# Prepare variable\n\t\t\tw_name = 'W' + str(i+1)\n\t\t\tb_name = 'b' + str(i+1)\n\t\t\tweights[w_name] = tf.get_variable(w_name, [layer_sizes[i], layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\t\t\tbiases[b_name] = tf.get_variable(b_name, [layer_sizes[i+1]], \n\t\t\t\tinitializer = tf.zeros_initializer(), dtype=tf.float32)\n\n\t\t# Initialise all existing global variables \n\t\tsess.run(tf.global_variables_initializer())\n\n\t\t# Restore weights and biases\n\t\tsaver = tf.train.Saver()\n\t\tif mode == trn:\n\t\t\tsaver.restore(sess, weights_path + 'weights/' + 'init.ckpt') \n\t\telif mode == tst or mode == app:\n\t\t\tsaver.restore(sess, weights_path + 'weights/' + 'trained.ckpt')\n\n\twb = {'weights': weights, 'biases': biases}\n\treturn wb", "def construct(self, weights):\n in_weights = weights # first to append zero column as the last output (no out-degree)\n weights = np.zeros((weights.shape[0], weights.shape[1]+1))\n weights[:,:-1] = in_weights\n din, dout, dhid = self.dim_in, self.dim_out, self.dim_hid # the max dim\n hid = weights.shape[0] - dout # this hidden dim\n if not (weights.shape[1]-din-dout==hid and (0<hid<=dhid)):\n raise self.ANNException('weight matrix hidden nodes not matching')\n if not (dout < weights.shape[0] <= hid+dout):\n raise self.ANNException('weight matrix row shape not matching')\n if not (din+dout < weights.shape[1] <= din+hid+dout):\n raise self.ANNException('weight matrix column shape not matching')\n\n self.weight[din:din+hid,:din+hid] = weights[:hid,:din+hid]\n self.weight[din:din+hid,din+dhid:] = weights[:hid,din+hid:]\n self.weight[din+dhid:,:din+hid] = weights[hid:,:din+hid]\n self.weight[din+dhid:,din+dhid:] = weights[hid:,din+hid:]\n\n for i in range(hid):\n self.connectivity[din+i,:din+i] = True\n self.connectivity[din+dhid:,:din+hid] = True\n for i in range(dout):\n self.connectivity[din+dhid+i,din+dhid:din+dhid+i] = True\n\n self.hidden[:hid] = True", "def __init__(self, sizes, afunc): \n\t\tself.num_layers = len(sizes)\n\t\tself.sizes = sizes\n\t\tself.afunc = afunc;\n\t\tself.initialize_weights_uniform()\n\t\t#self.initialize_weights_gaussian(0.1)\n\t\t#self.initialize_weights_xavier()", "def __init__(self, inputLayerSize, outputLayerSize, \\\n hiddenLayerSize):\n #Network hyperparameters - neurons per layer - **not altered by training**\n self.inputLayerSize = inputLayerSize\n self.outputLayerSize = outputLayerSize\n self.hiddenLayerSize = hiddenLayerSize\n self.num_params = inputLayerSize * hiddenLayerSize + \\\n hiddenLayerSize * outputLayerSize + hiddenLayerSize \\\n + outputLayerSize\n #--Weights--\n #w_ih - weights of synapses linking input -> hidden\n self.w_ih = np.random.randn( self.inputLayerSize, \\\n self.hiddenLayerSize)\n #w_ho - weights of synapses linking hidden -> output\n self.w_ho = np.random.randn( self.hiddenLayerSize, \\\n self.outputLayerSize)\n \n #--Biases--\n #b_h - biases of hidden layer\n self.b_h = np.random.randn( self.hiddenLayerSize )\n #b_o - biases of output layer\n self.b_o = np.random.randn( self.outputLayerSize )", "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def __init__(self, weights, alphas):\n self._neurons = [Neuron(a, w) for w, a in zip(weights, alphas)]", "def initialize(self, input_size, n_classes):\n\n self.n_classes = n_classes\n self.input_size = input_size\n\n n_hidden_layers = len(self.sizes)\n #############################################################################\n # Allocate space for the hidden and output layers, as well as the gradients #\n #############################################################################\n self.hs = []\n self.grad_hs = []\n for h in range(n_hidden_layers):\n self.hs += [np.zeros((self.sizes[h],))] # hidden layer\n self.grad_hs += [np.zeros((self.sizes[h],))] # ... and gradient\n self.hs += [np.zeros((self.n_classes,))] # output layer\n self.grad_hs += [np.zeros((self.n_classes,))] # ... and gradient\n\n ##################################################################\n # Allocate space for the neural network parameters and gradients #\n ##################################################################\n self.weights = [np.zeros((self.input_size, self.sizes[0]))] # input.csv to 1st hidden layer weights\n self.grad_weights = [np.zeros((self.input_size, self.sizes[0]))] # ... and gradient\n\n self.biases = [np.zeros((self.sizes[0]))] # 1st hidden layer biases\n self.grad_biases = [np.zeros((self.sizes[0]))] # ... and gradient\n\n for h in range(1, n_hidden_layers):\n self.weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # h-1 to h hidden layer weights\n self.grad_weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # ... and gradient\n\n self.biases += [np.zeros((self.sizes[h]))] # hth hidden layer biases\n self.grad_biases += [np.zeros((self.sizes[h]))] # ... and gradient\n\n self.weights += [np.zeros((self.sizes[-1], self.n_classes))] # last hidden to output layer weights\n self.grad_weights += [np.zeros((self.sizes[-1], self.n_classes))] # ... and gradient\n\n self.biases += [np.zeros((self.n_classes))] # output layer biases\n self.grad_biases += [np.zeros((self.n_classes))] # ... and gradient\n\n #########################\n # Initialize parameters #\n #########################\n\n self.rng = np.random.mtrand.RandomState(self.seed) # create random number generator\n # biases are initialized to zero\n # ... and weights according to the slides\n for m in range(len(self.weights)):\n b = (6 ** 0.5) / ((self.weights[m].shape[0] + self.weights[m].shape[1]) ** 0.5)\n for ind, val in np.ndenumerate(self.weights[m]):\n self.weights[m][ind] = self.rng.uniform(-b, b, 1)\n\n\n self.n_updates = 0 # To keep track of the number of updates, to decrease the learning rate", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def __init__(self, n_layers, layers_sizes, activation='sigmoid', learning_rate=0.1, weight_init='normal', batch_size=1000, num_epochs=100):\n self.layers_sizes=layers_sizes[1:]\n self.activation=activation\n self.learning_rate=learning_rate\n self.weight_init=weight_init\n self.batch_size=batch_size\n self.num_epochs=num_epochs\n self.weights={}\n self.n_layers=len(self.layers_sizes)\n self.num_samples=0\n self.training_loss_values=[]\n self.testing_loss_values=[]\n self.gg=0.01\n self.XTEST=None\n self.YTEST=None\n self.TTTT=None\n\n if activation not in self.acti_fns:\n raise Exception('Incorrect Activation Function')\n\n if weight_init not in self.weight_inits:\n raise Exception('Incorrect Weight Initialization Function')\n pass", "def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])", "def __init__(self, N_sym, n_nodes, activations, N_element, bias = True, scaling = None):\n super(MultiLayerNet, self).__init__()\n N_layers = len(n_nodes)\n if N_layers == 0:\n self.net = torch.nn.Linear(N_sym, N_element, bias = bias)\n else:\n layers = []\n for n in range(N_layers):\n if n == 0:\n layers += [torch.nn.Linear(N_sym, n_nodes[n], bias = bias)]\n layers += [activations[n]]\n else:\n layers += [torch.nn.Linear(n_nodes[n-1], n_nodes[n], bias = bias)]\n layers += [activations[n]]\n layers += [torch.nn.Linear(n_nodes[-1], N_element, bias = bias)]\n self.net = torch.nn.Sequential(*layers)\n \n self.scaling = scaling", "def _build_network(self, h_size=16, l_rate=0.001):\n with tf.variable_scope(self.net_name):\n self._X = tf.placeholder(tf.float32, [None, self.input_size], name=\"input_x\")\n net = self._X\n\n net = tf.layers.dense(net, h_size, activation=tf.nn.relu)\n net = tf.layers.dense(net, self.output_size)\n self._Qpred = net\n\n self._Y = tf.placeholder(tf.float32, shape=[None, self.output_size])\n self._loss = tf.losses.mean_squared_error(self._Y, self._Qpred)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=l_rate)\n self._train = optimizer.minimize(self._loss)", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n \n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = numpy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n pass", "def __initialize_weights__(num_neurons: int) -> np.ndarray:\n\n weights = np.zeros((num_neurons, num_neurons))\n\n return weights", "def __init__(self, num_units, input_size=None,\n nonlinearity=tf.nn.sigmoid,\n initializer=tf.random_normal_initializer(0.0, 0.1)):\n self._input_size = num_units if not input_size else input_size\n self._num_units = num_units\n self._nonlin = nonlinearity\n self._initializer = initializer", "def __init__(self, input_dimensions=2, number_of_nodes=4, transfer_function=\"Hard_limit\"):\r\n self.input_dimensions = input_dimensions\r\n self.number_of_nodes = number_of_nodes\r\n self.transfer_function = transfer_function\r\n self.initialize_weights()", "def _setup(self) -> None:\n #TODO: type\n self.activation = self.params['activation']\n\n self.batchsize: int = self.params['batchsize']\n\n self.input_shape: Tuple[int,int,int] = self.params['input_shape']\n\n self.d: int = self.input_shape[1]\n assert(not self.d == 0)\n\n self.n: int = int(sqrt(self.input_shape[2]))\n assert(not self.n == 0)\n\n self.dtype: type = self.params['dtype']\n\n # initialize weights\n self.W: List[tf.Tensor] = []\n \n for i in range(3):\n #TODO: type\n w_init = self.params['initializer_w']\n if self.params['initializer_w' + str(i)] is not None:\n w_init = self.params['initializer_w' + str(i)]\n\n w_stddev: float = self.params['stddev_w']\n if self.params['stddev_w' + str(i)] is not None:\n w_stddev = self.params['stddev_w' + str(i)]\n\n self.W.append(tf.get_variable(\"weights_\" + str(i),\n shape = (self.d, (self.d if i < 2 else 2 * self.d)),\n dtype = self.dtype,\n initializer = w_init(stddev=w_stddev)))\n\n #TODO: type\n b_init = self.params['initializer_b']\n b_stddev = self.params['stddev_b']\n self.B: tf.Tensor = tf.get_variable(\"biases\", shape = (1, self.d, 1),\n dtype = self.dtype,\n initializer = b_init(stddev=b_stddev))\n\n # create/load expand matrix\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n self.expand: tf.Tensor = tf.get_variable(\n \"expand\" + str(self.n),\n shape = (self.n, self.n * self.n),\n dtype = self.dtype,\n initializer = tf.constant_initializer(make_expand(self.n)))\n\n # create/load tile matrix\n tile: np.ndarray = np.array([([1] + [0]*(self.n-1))*self.n])\n for i in range(1, self.n):\n tile = np.append(tile, [([0]*i + [1] + [0]*(self.n-1-i))*self.n], 0)\n\n self.tile: tf.Tensor = tf.constant(tile, self.dtype)", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def __init__(self, input_dimensions=2, number_of_nodes=4, transfer_function=\"Hard_limit\"):\n self.input_dimensions = input_dimensions\n self.number_of_nodes = number_of_nodes\n self.transfer_function = transfer_function\n self.initialize_weights()", "def __init__(self, emsize):\n super(GatingNetwork, self).__init__()\n self.weight = nn.Linear(emsize, 1)", "def __init__(self, params = None, layer_sizes = None, scale = 0.1, rs=npr.RandomState(0)):\n self.layer_sizes = layer_sizes\n\n if params is not None:\n self.param = params\n else:\n if layer_sizes is None:\n raise Exception(\"Please provide the layer sizes\")\n\n self.params = [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def __init__(self, weights, biases):\n super().__init__()\n\n self.weights = weights\n self.biases = biases", "def __init__(self, size: Tuple[int, int], is_output: bool) -> None:\n self.g = Math.sigmoid if is_output else Math.relu\n self.w: List[List[float]] = \\\n [[random.random() * 0.1 for _ in range(size[1])]\n for _ in range(size[0])]\n self.b: List[float] = [0.0] * size[0]\n\n # use of below attributes is optional but recommended\n self.z: List[float] = [0.0] * size[0]\n self.a: List[float] = [0.0] * size[0]\n self.dw: List[List[float]] = \\\n [[0.0 for _ in range(size[1])] for _ in range(size[0])]\n self.db: List[float] = [0.0] * size[0]", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self, num_units, input_size=None,\n nonlinearity=tf.nn.tanh,\n hh_init=tf.random_normal_initializer(stddev=0.15),\n xh_init=tf.random_normal_initializer(stddev=0.15),\n b_init=tf.constant_initializer(0.0, dtype=tf.float32),\n weight_noise=0.0,\n keep_prob=1.0,\n weightnorm=False,\n orthreg=False):\n self._num_units = num_units\n self._input_size = input_size or num_units\n self._nonlinearity = nonlinearity\n self._hh_init = hh_init\n self._xh_init = xh_init\n self._b_init = b_init\n self._weightnorm = weightnorm\n self._keep_prob = keep_prob\n self._weight_noise = weight_noise\n self._orthreg = orthreg", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def default_weight_initializer(self):\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def __init__(self, input_size, hidden_size, num_layers, nonlinearity=torch.tanh):\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n # input weights\n self.weight_ih_l0 = Parameter(torch.Tensor(3*hidden_size, input_size))\n\n # hidden weights\n self.weight_hh_l0 = Parameter(torch.Tensor(3*hidden_size, hidden_size))\n\n # bias\n self.bias_ih_l0 = Parameter(torch.Tensor(3*hidden_size)) # input\n self.bias_hh_l0 = Parameter(torch.Tensor(3*hidden_size)) # hidden\n\n self.f = nonlinearity\n\n self.init_weights()", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def __init__(self, layer_size, activation_function=linear,\n derivative_function=dlinear,\n forward_function=propagate_forward,\n backward_function=propagate_backward_irpropm,\n init_weights_function=fully_connected_weights, bias=True):\n self.forward_propagation = forward_function\n self.back_propagation = backward_function\n self.activation_function = activation_function\n self.derivative_function = derivative_function\n self.bias = bias\n\n # the activations of these nodes\n bias_add = 0\n if self.bias:\n bias_add = 1\n self.visible = numpy.ones(layer_size + bias_add)\n self.init_weights_function = init_weights_function", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n reg=0.0, weight_scale=1e-2, dtype=np.float32):\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # Initialize the network parameters with different weights and biases for #\n # network layers #\n ############################################################################\n \n key = ['W' + str(1), 'b' + str(1)]\n self.params[key[0]] = weight_scale * np.random.randn(input_dim, hidden_dims[0])\n self.params[key[1]] = np.zeros(hidden_dims[0])\n \n for i in range(1, len(hidden_dims)):\n key = ['W' + str(i+1), 'b' + str(i+1)]\n \n self.params[key[0]] = weight_scale * np.random.randn(hidden_dims[i-1], hidden_dims[i])\n self.params[key[1]] = np.zeros(hidden_dims[i])\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n self.params[key[0]] = weight_scale * np.random.randn(hidden_dims[len(hidden_dims)-1], num_classes)\n self.params[key[1]] = np.zeros(num_classes)\n\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def __init__(self, dimensions=2):\n assert dimensions > 0\n for d in range(0,dimensions+1):\n self.weight.append(0)", "def __init__(self, input_size, hidden_size, en_bias):\n self.en_bias = en_bias\n # weight matrix and bias vector\n self.u = self.random(-np.sqrt(1.0/input_size),\n np.sqrt(1.0/input_size), (hidden_size, input_size))\n self.w = self.random(-np.sqrt(1.0/hidden_size),\n np.sqrt(1.0/hidden_size), (hidden_size, hidden_size))\n self.v = self.random(-np.sqrt(1.0/hidden_size),\n np.sqrt(1.0/hidden_size), (hidden_size, hidden_size))\n if en_bias:\n self.b = self.random(-0.1, 0.1, (hidden_size,))\n else:\n self.b = np.zeros(hidden_size)\n # error gradient for weight matrix and bias vector\n self.dLdu = np.zeros(self.u.shape)\n self.dLdw = np.zeros(self.w.shape)\n self.dLdv = np.zeros(self.v.shape)\n self.dLdb = np.zeros(self.b.shape)", "def __init__(self, sizes, beta=1, momentum=0.9):\n self.beta = beta\n self.momentum = momentum\n\n self.nin = sizes[0] # number of features in each sample\n self.nhidden1 = sizes[1] # number of neurons in the first hidden layer\n self.nhidden2 = sizes[2] # number of neurons in the second hidden layer\n self.nout = sizes[3] # number of classes / the number of neurons in the output layer\n\n\n # Initialise the network of two hidden layers\n self.weights1 = (np.random.rand(self.nin+1,self.nhidden1)-0.5)*2/np.sqrt(self.nin) # hidden layer 1\n self.weights2 = (np.random.rand(self.nhidden1+1,self.nhidden2)-0.5)*2/np.sqrt(self.nhidden1) # hidden layer 2\n self.weights3 = (np.random.rand(self.nhidden2+1,self.nout)-0.5)*2/np.sqrt(self.nhidden2) # output layer", "def __init__(self, dims):\n\t\tself.layersNumber = len(dims) - 1\n\t\tself.weights = []\n\t\tself.biases = []\n\t\tnp.random.seed(42)\n\t\tfor d in range(self.layersNumber):\n\t\t\tself.weights.append(np.random.randn(dims[d+1], dims[d]))\n\t\t\tself.biases.append(np.random.randn(dims[d+1], 1))", "def __init__(self, InputDimentions, OutputDimentions, numberOfLayers, WeightArray, BiasArray):\r\n self.WeightMatrixT = WeightArray\r\n self.WeightMatrixT.shape = (numberOfLayers, OutputDimentions, InputDimentions)\r\n self.BiasVector = BiasArray\r\n self.BiasVector.shape = (numberOfLayers, 1, OutputDimentions)", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = cupy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = cupy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: 1 / (1 + cupy.exp(x) ** (-1))", "def __init__(self, N):\n self.data = list(range(N))\n self.weights = [1]*N", "def init_three_layer_neuralnet(weight_scale=1, bias_scale=0, input_feat_dim=786,\n num_classes=10, num_neurons=(20, 30)):\n \n assert len(num_neurons) == 2, 'You must provide number of neurons for two layers...'\n\n model = {}\n #model['W1'] = np.random.randn((num_neurons[0],(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)) # Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n \n model['W1'] = (np.random.rand(input_feat_dim,num_neurons[0])*weight_scale) * math.sqrt(2.0/input_feat_dim)\n model['b1'] = np.zeros(num_neurons[0])# Initialize with zeros\n \n #model['W2'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n #print ((model['W1'])[0,:]).shape\n #numcols = len(input[0])\n t=len((model['W1'])[0])\n #print t\n model['W2'] = (np.random.rand(num_neurons[0],num_neurons[1])*weight_scale) * math.sqrt(2.0/t)\n model['b2'] = np.zeros(num_neurons[1])# Initialize with zeros\n\n t=len((model['W2'])[0])\n #model['W3'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n model['W3'] = (np.random.rand(num_neurons[1],num_classes)*weight_scale) * math.sqrt(2.0/t)\n model['b3'] = np.zeros(num_classes)# Initialize with zeros\n\n return model", "def build_neural_net(net_def):\n\n populated_def = net_def.copy()\n\n for layer in populated_def['layers']:\n for n in range(0, layer['num_neurons']):\n weights = layer['weights'][n]\n bias = layer['bias'][n]\n\n neuron = Neuron(weights, bias, layer['activation'])\n layer['neurons'].append(neuron)\n\n\n return populated_def", "def __init__(self, n=0, w=0, weighting=1):\n self.n = n\n self.w = w\n self.weighting = weighting\n self.timestep = 0\n self.Xw = np.zeros([n,w])\n self.Yw = np.zeros([n,w])\n self.A = np.zeros([n,n])\n self.P = np.zeros([n,n])", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives", "def create_params(self, layer_sizes):\n weights = [[None]]\n bias = []\n bias.append([1] * layer_sizes[0])\n\n for layer in range(1, len(layer_sizes)):\n weights.append(numpy.random.rand(layer_sizes[layer-1], layer_sizes[layer]))\n bias.append([0] * layer_sizes[layer])\n\n bias[len(layer_sizes)-1] = [0] * layer_sizes[-1]\n\n return weights, bias", "def build(self, weight = 0.5):\n \n self.weight = weight\n \n # Defining weighting matrixes\n self.processing_fw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_bw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_last_ht = torch.randn((self._hidden_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n # These will only be applied to the intermediate hidden states\n self.linear_fw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n self.linear_bw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n \n self.compression = torch.randn((self._expansion_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n if self._activation_fn == 'tanh' or isinstance(self._activation_fn, torch.nn.modules.activation.Tanh):\n self._activation_fn = nn.Tanh()\n elif self._activation_fn == 'sigmoid' or isinstance(self._activation_fn, torch.nn.modules.activation.Sigmoid):\n self._activation_fn = nn.Sigmoid()\n elif self._activation_fn == 'leaky ReLU' or isinstance(self._activation_fn, torch.nn.modules.activation.LeakyReLU):\n self._activation_fn = nn.LeakyReLU()\n else:\n raise ValueError('Non-linear activation function must be \"tanh\", \"sigmoid\" or \"leaky ReLU\"')\n \n # Passing it onto the relevant device\n self._activation_fn = self._activation_fn.to(self.device)", "def __init__(self, hidden_layers, input_size=784,\n num_classes=10, learning_rate=0.1):\n hidden_layers = [input_size] + hidden_layers + [num_classes]\n self.layers = []\n self.Layer = collections.namedtuple('Layer', ['weights', 'biases'])\n for i in range(len(hidden_layers) - 1):\n layer = self.Layer(np.random.randn(hidden_layers[i],\n hidden_layers[i + 1]).astype('float32'),\n np.random.randn(hidden_layers[i + 1]).astype('float32'))\n self.layers.append(layer)\n\n # normalization for stable training\n\n self.learning_rate = learning_rate", "def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n mean = 0\n std_dev = 0.0001\n #print(in_features)\n #print(out_features)\n # create weight matrices\n weight = np.random.normal(mean, std_dev, (out_features, in_features))\n #print(weight.shape)\n grad_weight = np.zeros((in_features, out_features))\n\n # create biases (in batches)\n bias = np.zeros(out_features)\n grad_bias = np.zeros(out_features)\n\n self.params = {'weight': weight, 'bias': bias}\n self.grads = {'weight': bias, 'bias': grad_bias}\n\n ########################\n # END OF YOUR CODE #\n #######################", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def __init__(self, num_learners: int):\n self.num_learners = num_learners\n self.learners = []\n self.learner_weights = np.ones(num_learners)", "def build(self, input_shape):\n assert len(input_shape) == 4\n assert input_shape[1] == input_shape[2]\n self.out_channel = input_shape[3]\n # Create the weight vector\n self.W_shape = (input_shape[1], self.out_dim)\n if self.initial_weights is not None:\n self.set_weights(self.initial_weights)\n del self.initial_weights\n else:\n self.W = self.init(self.W_shape, name='{}_W'.format(self.name))\n self.trainable_weights = [self.W]\n self.built = True", "def make_neural_net_basic():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n w1A = Weight('w1A', 1)\n w2A = Weight('w2A', 1)\n wA = Weight('wA', 1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n P = PerformanceElem(A, 0.0)\n\n net = Network(P,[A])\n return net", "def __init__(self, input_dim=(1, 28, 28), num_classes=10):\n self.params = {}\n\n #######################################################################\n # TODO: Initialize weights and biases for the convolutional neural #\n # network. Weights should be initialized from a Gaussian distribution;#\n # biases should be initialized to zero. All weights and biases should #\n # be stored in the dictionary self.params. #\n #######################################################################\n\n filter_size = 5\n weight_scale = 1e-2\n num_filters = 6\n hidden_dim = 784\n\n #****** THIS WAS TO TEST OUT FASTER NETWORKS *******\n\n self.params['W1'] = np.random.normal(scale=weight_scale, size=(num_filters, input_dim[0], filter_size, filter_size))\n # self.params['W2'] = np.random.normal(scale=weight_scale, size=(num_filters, 6, filter_size, filter_size))\n self.params['W3'] = np.random.normal(scale=weight_scale, size=(864, num_classes))\n\n # self.params['W3'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n # self.params['W4'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n\n self.params['b1'] = np.zeros(num_filters)\n # self.params['b2'] = np.zeros(num_filters)\n self.params['b3'] = np.zeros(num_classes)\n\n # self.params['b3'] = np.zeros(num_classes)\n # self.params['b4'] = np.zeros(num_classes)", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def init_weights(num_tilings, tiles_per_dim, num_dims, num_actions):\n weights = np.zeros((num_tilings*tiles_per_dim**num_dims*num_actions))\n return weights", "def init_weights_(self):\n raise NotImplementedError", "def __init__(self, structure, weights = [], activationType = \"sigmoid\"):\n self.weights = []\n self.activationType = activationType\n self.activations = []\n self.derivatives = []\n self.delta = []\n self.bias = []\n\n #Either randomises weights or uses starting weights that have been supplied. \n for i in range(len(structure)-1):\n if len(weights) != 0:\n w = np.zeros((structure[i], structure[i + 1]))\n for j in range(structure[i]):\n w[j, :] = weights[(j * structure[i + 1]) : (j * structure[i + 1]) + structure[i + 1]]\n else:\n w = np.random.rand(structure[i], structure[i + 1])\n self.weights.append(w)\n \n #Initialises activations array, delta array and bias array. \n for i in range(len(structure)):\n a = np.zeros(structure[i])\n self.activations.append(a)\n self.delta.append(a)\n self.bias.append(a)\n \n \n #Initialises derivatives array.\n for i in range(len(structure) - 1):\n d = np.zeros((structure[i], structure[i + 1]))\n self.derivatives.append(d)\n \n return", "def __init__(self, num_visible, num_hidden, act_func='logistic'):\n\n print('Initializing network... ', end='')\n sys.stdout.flush()\n\n self.num_visible = num_visible\n self.num_hidden = num_hidden\n \n #self.reconstructed = np.zeros((self.num_examples, self.num_visible))\n\n self.weights = 0.1 * np.random.randn(num_visible, num_hidden)\n self.v_bias = np.zeros((1, num_visible))\n self.h_bias = -4.0 * np.ones((1, num_hidden))\n\n self.w_inc = np.zeros((num_visible, num_hidden))\n self.v_inc = np.zeros((1, num_visible))\n self.h_inc = np.zeros((1, num_hidden))\n\n if act_func == 'chaotic':\n self.act_func = self.chaotic_logistic\n else:\n self.act_func = self.logistic\n\n print('Done!')\n return", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])", "def __init__(self, input_size, hidden_sizes, output_size=1,\n batchnorm_bool=False,\n dropout_bool=False):\n super(NeuralNet, self).__init__()\n self.input_size = input_size\n sizes = [input_size] + hidden_sizes + [output_size]\n self.layers = nn.ModuleList(\n [nn.Linear(in_f, out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.bns = nn.ModuleList(\n [nn.BatchNorm1d(out_f) for in_f, out_f in zip(sizes, sizes[1:])])\n self.dps = nn.ModuleList(\n [nn.Dropout(p=0.5) for _ in range(len(self.layers))])\n self.relus = nn.ModuleList(\n [nn.ReLU() for _ in range(len(self.layers))])\n self.sigmoid = nn.Sigmoid()\n\n self.batchnorm_bool = batchnorm_bool\n self.dropout_bool = dropout_bool", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def __init__(self, nInputs, nOutputs, hiddenLayersDims, outputActivationFunctions = None, outputActivationDerivatives = None, hiddenActivationFunctions = None,\\\n\t\t\t\t hiddenActivationDerivatives = None): \n\n\t\tself._nInputs = nInputs\n\t\tself._nOutputs = nOutputs\n\n\t\tself._nHiddenLayers, self._nUnitsPerLayer = hiddenLayersDims\n\n\t\tself._outputActivationFunctions = outputActivationFunctions\n\t\tself._outputActivationDerivatives = outputActivationDerivatives\n\n\t\tself._hiddenActivationFunctions = hiddenActivationFunctions\n\t\tself._hiddenActivationDerivatives = hiddenActivationDerivatives\n\n\t\tself.initialiseActivationFunctions()\n\n\t\tself.initialiseNetwork()\n\n\t\tself._nBranches = len(self.collectAllBranches())", "def __init__(self, batch_size=1, epochs=None, learning_rate=None, momentum=None, weights_name=''):\n self.batch_size = batch_size\n self.epochs = epochs\n self.model = None\n self.optimizer = None\n self.cb = None\n self.lr = learning_rate\n self.momentum = momentum\n self.weights_name = weights_name", "def init_weights(n_layers, layer_sizes):\n\n params = {}\n\n for i in range(n_layers):\n wn = 'W{}'.format(i)\n bn = 'b{}'.format(i)\n\n params[wn] = tf.get_variable(\n name=wn,\n shape=layer_sizes[i * 2],\n initializer=tf.contrib.layers.xavier_initializer(seed=42)\n )\n\n params[bn] = tf.get_variable(\n name=bn,\n shape=layer_sizes[(i * 2) + 1],\n initializer=tf.zeros_initializer()\n )\n\n return params", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def __init__(self, input_size, output_size, activation=torch.nn.functional.relu, left_to_right=True):\n super(GraphConvolutionalLayer, self).__init__()\n self.w = torch.nn.Parameter(torch.rand([input_size, output_size]))\n self.activation = activation\n self.left_to_right = left_to_right", "def __init__(self, lengths):\n self.lengths = lengths\n\n # weights: index 1 = layer index, index 2 = feature index\n # last index in each layer is bias weight\n self.weights = np.array()\n self.activations = np.array()\n for i,length in enumerate(self.lengths):\n self.weights[i] = np.random.rand(1,length+1)\n self.activations[i] = np.random.rand(1,length+1)", "def __init__(self, size, parameters):\n\n self.weights = self.init_weights(size)\n self.alpha = parameters['alpha']\n self.epsilon = parameters['epsilon']\n self.gamma = parameters['gamma']\n self.value = 0.0 #np.random.random()", "def __init__(self, input_dims, output_dims, extra_dims):\n super().__init__()\n self.input_dims = input_dims\n self.output_dims = output_dims\n self.extra_dims = extra_dims\n\n self.weight = nn.Parameter(torch.zeros(*extra_dims, output_dims, input_dims))\n self.bias = nn.Parameter(torch.zeros(*extra_dims, output_dims))\n\n nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')", "def __init__(self, output_dim, adjecancies,\n init='glorot_uniform',\n weights=None, W_regularizer=None,\n b_regularizer=None, bias=False, **kwargs):\n\n self.init = initializers.get(init)\n self.output_dim = output_dim # number of features per node\n\n self.adjecancies = adjecancies\n\n allDst = set()\n allSrc = set()\n for rel in adjecancies:\n for (src, dest) in rel:\n allSrc.add(src)\n allDst.add(dest)\n allIndices = allSrc.union(allDst)\n\n self.allSrc = allSrc\n self.allDst = allDst\n\n if len(allIndices) > 0 and min(allIndices) < 0:\n raise Exception(\"Index lower than 0 in adjecancies\")\n self.maxIndexInAdjecencies = - \\\n 1 if len(allIndices) == 0 else max(allIndices)\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.bias = bias\n self.initial_weights = weights\n\n # these will be defined during build()\n self.input_dim = None\n self.W = None\n self.W_comp = None\n self.b = None\n self.num_nodes = None\n\n super(GraphConvolution, self).__init__(**kwargs)" ]
[ "0.7550142", "0.7535491", "0.75056463", "0.74940723", "0.7493714", "0.7344416", "0.7279779", "0.7265629", "0.7218069", "0.72046214", "0.71778685", "0.71470296", "0.7120793", "0.70849985", "0.7019694", "0.70176697", "0.69794846", "0.69722265", "0.6920543", "0.6879415", "0.6876073", "0.68726945", "0.6866499", "0.6864688", "0.68437016", "0.68381244", "0.67863643", "0.6778829", "0.67667735", "0.6764995", "0.6751022", "0.67010856", "0.6698876", "0.6696529", "0.6691007", "0.66866696", "0.66846126", "0.6679412", "0.667502", "0.666627", "0.66609716", "0.6656913", "0.6649386", "0.66405845", "0.6633995", "0.66323686", "0.661759", "0.66086", "0.66070163", "0.66042805", "0.65966916", "0.6593298", "0.65889704", "0.6568752", "0.656513", "0.6563131", "0.6562176", "0.6561839", "0.65594405", "0.65392673", "0.6529379", "0.6525524", "0.65232414", "0.6521053", "0.65173566", "0.65153545", "0.65150005", "0.65120596", "0.651083", "0.65053177", "0.6492661", "0.647939", "0.64676726", "0.64634955", "0.64606917", "0.6452544", "0.64515257", "0.64460826", "0.6441553", "0.643813", "0.6414887", "0.6401515", "0.63858724", "0.6370019", "0.63662416", "0.6364602", "0.63625604", "0.63608056", "0.6358678", "0.6351422", "0.63466585", "0.6341455", "0.63344973", "0.633385", "0.6324847", "0.63139725", "0.6313668", "0.63021725", "0.6301569", "0.62915355", "0.6289022" ]
0.0
-1
Initialize the neural network with the required layers, the weights and the biases. The input "layers" in an array that contains the number of nodes (neurons) in each layer.
def initialize_NN(self, m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) # print(m.weight)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def _init_layers(self) -> None:\n weight_nums, bias_nums = [], []\n for i in range(self.num_layers):\n if i == 0:\n weight_nums.append((self.in_channels + 2) * self.feat_channels)\n bias_nums.append(self.feat_channels)\n elif i == self.num_layers - 1:\n weight_nums.append(self.feat_channels * 1)\n bias_nums.append(1)\n else:\n weight_nums.append(self.feat_channels * self.feat_channels)\n bias_nums.append(self.feat_channels)\n\n self.weight_nums = weight_nums\n self.bias_nums = bias_nums\n self.num_params = sum(weight_nums) + sum(bias_nums)", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def __init__(self, weights, alphas):\n self._neurons = [Neuron(a, w) for w, a in zip(weights, alphas)]", "def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives", "def __init__(self, n_layers, layers_sizes, activation='sigmoid', learning_rate=0.1, weight_init='normal', batch_size=1000, num_epochs=100):\n self.layers_sizes=layers_sizes[1:]\n self.activation=activation\n self.learning_rate=learning_rate\n self.weight_init=weight_init\n self.batch_size=batch_size\n self.num_epochs=num_epochs\n self.weights={}\n self.n_layers=len(self.layers_sizes)\n self.num_samples=0\n self.training_loss_values=[]\n self.testing_loss_values=[]\n self.gg=0.01\n self.XTEST=None\n self.YTEST=None\n self.TTTT=None\n\n if activation not in self.acti_fns:\n raise Exception('Incorrect Activation Function')\n\n if weight_init not in self.weight_inits:\n raise Exception('Incorrect Weight Initialization Function')\n pass", "def __init__(self, weights=[], alphas=[]):\n self._layers = [Layer(w, a) for w, a in zip(weights, alphas)]", "def __init__(self, N_sym, n_nodes, activations, N_element, bias = True, scaling = None):\n super(MultiLayerNet, self).__init__()\n N_layers = len(n_nodes)\n if N_layers == 0:\n self.net = torch.nn.Linear(N_sym, N_element, bias = bias)\n else:\n layers = []\n for n in range(N_layers):\n if n == 0:\n layers += [torch.nn.Linear(N_sym, n_nodes[n], bias = bias)]\n layers += [activations[n]]\n else:\n layers += [torch.nn.Linear(n_nodes[n-1], n_nodes[n], bias = bias)]\n layers += [activations[n]]\n layers += [torch.nn.Linear(n_nodes[-1], N_element, bias = bias)]\n self.net = torch.nn.Sequential(*layers)\n \n self.scaling = scaling", "def __init__(self, layers=[2, 2, 1], activation_function=\"bentidentity\"):\n self.layers = layers\n self.activation_function = th.activation_functions[activation_function]\n self.activation_derivative = th.activation_derivatives[\n activation_function]\n self.weights = self._generate_weights()", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, nInputs, nOutputs, hiddenLayersDims, outputActivationFunctions = None, outputActivationDerivatives = None, hiddenActivationFunctions = None,\\\n\t\t\t\t hiddenActivationDerivatives = None): \n\n\t\tself._nInputs = nInputs\n\t\tself._nOutputs = nOutputs\n\n\t\tself._nHiddenLayers, self._nUnitsPerLayer = hiddenLayersDims\n\n\t\tself._outputActivationFunctions = outputActivationFunctions\n\t\tself._outputActivationDerivatives = outputActivationDerivatives\n\n\t\tself._hiddenActivationFunctions = hiddenActivationFunctions\n\t\tself._hiddenActivationDerivatives = hiddenActivationDerivatives\n\n\t\tself.initialiseActivationFunctions()\n\n\t\tself.initialiseNetwork()\n\n\t\tself._nBranches = len(self.collectAllBranches())", "def __init__(self, hidden_layers, input_size=784,\n num_classes=10, learning_rate=0.1):\n hidden_layers = [input_size] + hidden_layers + [num_classes]\n self.layers = []\n self.Layer = collections.namedtuple('Layer', ['weights', 'biases'])\n for i in range(len(hidden_layers) - 1):\n layer = self.Layer(np.random.randn(hidden_layers[i],\n hidden_layers[i + 1]).astype('float32'),\n np.random.randn(hidden_layers[i + 1]).astype('float32'))\n self.layers.append(layer)\n\n # normalization for stable training\n\n self.learning_rate = learning_rate", "def __init__(self, layer_neuron):\n\n self.num_layers = len(layer_neuron)\n self.layer_neuron = layer_neuron\n #a list of numpy ndarrays\n self.weights = []\n self.input_len = 0\n self.target_vals = []\n self.current_guess = 0\n\n self.layer_inputs = [[]]*(len(layer_neuron))\n self.layer_outputs = [[]]*(len(layer_neuron))\n #deltas: don't include input layer but still put it in for spacing ie self.deltas[0] should always be empty\n self.deltas = [[]]*((len(layer_neuron)))\n\n #make the weight matrices, each matrix nXm matrix with m = # nodes in the ith layer (incl bias) and n = # nodes in the (i+1)th layer\n #each row represents the set of weights from all m neurons in the ith layer to a single neuron in the (i+1)th layer\n #conversely, each column is all the output weights from a node in the ith layer, to each n nodes in the (i+1)th layer\n #the right-most column represents output weights from the bias node\n for i in range(len(self.layer_neuron)-1 ):\n np.random.seed(0)\n self.weights.append(np.random.normal( scale = 0.2, size = (self.layer_neuron[i+1], self.layer_neuron[i] + 1)))", "def initialize_weights(self, weights_initializer, bias_initializer):\n wshapes = [\n [self.input_size, self.hidden_size[0]],\n [self.hidden_size[0], self.hidden_size[1]],\n [self.hidden_size[1], self.output_size]\n ]\n\n bshapes = [\n [1, self.hidden_size[0]],\n [1, self.hidden_size[1]],\n [1, self.output_size]\n ]\n\n self.weights = [init_weights(s, weights_initializer) for s in wshapes]\n self.biases = [init_weights(s, bias_initializer) for s in bshapes]\n\n self.trainable_variables = self.weights + self.biases", "def __init__(self, layers, r_min, r_max, learn_rate):\n\n if not isinstance(layers, list) or len(layers) < 3:\n raise ValueError('invalid layer parammeter')\n self.layers = layers\n n_layer = len(layers)\n self.n_layer = n_layer\n self.r_min = r_min\n self.r_max = r_max\n self.learn_rate = learn_rate\n\n # initialize ws\n self.ws = []\n for layer_idx in range(n_layer - 1):\n layer_size = (layers[layer_idx] + 1, layers[layer_idx + 1])\n w = self.init_w(layer_size)\n self.ws.append(w)", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass", "def init_three_layer_neuralnet(weight_scale=1, bias_scale=0, input_feat_dim=786,\n num_classes=10, num_neurons=(20, 30)):\n \n assert len(num_neurons) == 2, 'You must provide number of neurons for two layers...'\n\n model = {}\n #model['W1'] = np.random.randn((num_neurons[0],(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)) # Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n \n model['W1'] = (np.random.rand(input_feat_dim,num_neurons[0])*weight_scale) * math.sqrt(2.0/input_feat_dim)\n model['b1'] = np.zeros(num_neurons[0])# Initialize with zeros\n \n #model['W2'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n #print ((model['W1'])[0,:]).shape\n #numcols = len(input[0])\n t=len((model['W1'])[0])\n #print t\n model['W2'] = (np.random.rand(num_neurons[0],num_neurons[1])*weight_scale) * math.sqrt(2.0/t)\n model['b2'] = np.zeros(num_neurons[1])# Initialize with zeros\n\n t=len((model['W2'])[0])\n #model['W3'] = (np.random.randn(input_feat_dim) * weight_scale) * math.sqrt(2.0/input_feat_dim)# Initialize from a Gaussian With scaling of sqrt(2.0/fanin)\n model['W3'] = (np.random.rand(num_neurons[1],num_classes)*weight_scale) * math.sqrt(2.0/t)\n model['b3'] = np.zeros(num_classes)# Initialize with zeros\n\n return model", "def initialize_network(self, model, num_init=None, **net_args):\n\n self.net_args = net_args\n\n if num_init is None:\n self.num_init = 1\n else:\n self.num_init = num_init\n\n nets = []\n for i in range(self.num_init):\n nets.append( model(dim_inp=self.dim_inp, \n dim_out=self.dim_out, **net_args) )\n\n return nets", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n # set number of nodes in each input, hidden, output layer\n self.inodes = inputnodes\n self.hnodes = hiddennodes\n self.onodes = outputnodes\n\n #learning rate\n self.lr = learningrate", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def __init__(self, n_classes, num_nodes=None, dropouts=None, activation=\"relu\"):\n # parameters initialization\n self.n_classes = n_classes\n if self.n_classes == 1:\n self.output_act = \"sigmoid\"\n else:\n self.output_act = \"softmax\"\n self.num_nodes = num_nodes if num_nodes != None else [1024, 512]\n self.dropouts = dropouts if dropouts != None else [0.5, 0.5]\n self.activation = activation\n\n # Check if number of layers and number of dropouts have same dimension\n if not len(self.num_nodes) == len(self.dropouts):\n raise AssertionError()", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def build_cnn(input_var=None, w_init=None, n_layers=(4, 2, 1), n_filters_first=32, imsize=32, n_colors=3):\n weights = [] # Keeps the weights for all layers\n count = 0\n # If no initial weight is given, initialize with GlorotUniform\n if w_init is None:\n w_init = [lasagne.init.GlorotUniform()] * sum(n_layers)\n # Input layer\n network = InputLayer(shape=(None, n_colors, imsize, imsize),\n input_var=input_var)\n for i, s in enumerate(n_layers):\n for l in range(s):\n network = Conv2DLayer(network, num_filters=n_filters_first * (2 ** i), filter_size=(3, 3),\n W=w_init[count], pad='same')\n count += 1\n weights.append(network.W)\n network = MaxPool2DLayer(network, pool_size=(2, 2))\n return network, weights", "def __init__(self, sizes: list):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])", "def __initialize_weights__(num_neurons: int) -> np.ndarray:\n\n weights = np.zeros((num_neurons, num_neurons))\n\n return weights", "def __init__(self, layer_size, activation_function=linear,\n derivative_function=dlinear,\n forward_function=propagate_forward,\n backward_function=propagate_backward_irpropm,\n init_weights_function=fully_connected_weights, bias=True):\n self.forward_propagation = forward_function\n self.back_propagation = backward_function\n self.activation_function = activation_function\n self.derivative_function = derivative_function\n self.bias = bias\n\n # the activations of these nodes\n bias_add = 0\n if self.bias:\n bias_add = 1\n self.visible = numpy.ones(layer_size + bias_add)\n self.init_weights_function = init_weights_function", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.input_layer = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu = ReLULayer()\n self.output_layer = FullyConnectedLayer(hidden_layer_size, n_output)\n self.W_in = None\n self.W_out = None\n self.B_in = None\n self.B_out = None\n # TODO Create necessary layers", "def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')", "def init_weights(net, init_gain=0.02):\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n init.normal_(m.weight.data, 0.0, init_gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network')\n net.apply(init_func) # apply the initialization function <init_func>", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def init_learner(self,**kwargs):\r\n \r\n if self.learn_type == 'nn':\r\n #initialize neural network\r\n shape = kwargs[\"shape\"]\r\n #initialize input layer\r\n model = Sequential() \r\n #add hidden layers\r\n for i in range(len(shape)):\r\n if i == 0:\r\n nb_input = self.size\r\n else:\r\n nb_input = shape[i -1]\r\n nb_output = shape[i]\r\n model.add(Dense(nb_input,nb_output,init=\"he_normal\",\r\n activation = \"tanh\"))\r\n model.add(Dropout(.5))\r\n model.add(Dense(shape[-1],1,init = \"he_normal\",\r\n activation = \"linear\"))\r\n model.compile(loss = 'mean_squared_error',optimizer = 'rmsprop')\r\n self.learner = model\r\n \r\n elif self.learn_type == 'linear':\r\n #initialize parameter\r\n self.learner = Linear(self.size,**kwargs)", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes):\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, num_units=512, num_layers=4):\n\n self.num_units = num_units\n self.num_layers = num_layers\n \n self.layers = []\n for idx in range(self.num_layers):\n lstm_layer = tf.contrib.cudnn_rnn.CudnnLSTM(num_units=self.num_units,\n direction='unidirectional',\n num_layers=1)\n self.layers.append(lstm_layer)", "def init(self,topology,learningRate=0.01,momentum=0.1,name='Network',add_bias=True):\n self.topology = topology\n self.learningRate = learningRate\n self.momentum = momentum\n self.name = name\n self.size = len(self.topology)-1 #The size of the network will be the number of weeight matrices between layers, instead of the number of layers itself\n #self._hiddenActiv_fun_key = 'tanh'\n #self._outActiv_fun_key = 'tanh'\n #self.output_activation = self.set_outActivation_fun(func=self._outActiv_fun_key)\n #self.hidden_activation = self.set_hiddenactivation_fun(func=self._hiddenActiv_fun_key)\n\n # Initialize random weights, and create empty matrices to store the previous changes in weight (for momentum):\n if add_bias:\n #self.weights = [np.random.normal(loc=0,scale=0.6,size=(topology[i]+1, topology[i+1])) for i in range(self.size)]\n self.weights = [np.random.normal(loc=0,\n scale=0.6,\n size=(topology[i]+1, topology[i+1]+1)) for i in range(self.size-1)] #we are only generating matrices for inital and hidden layers\n #Create matrix for output layer\n f_idx = self.size-1 #use this index for the final layer matrix below\n self.weights.append(np.random.normal(loc=0,\n scale=0.6,\n size=(topology[f_idx]+1,topology[f_idx+1])))\n else:\n raise NotImplemented(\"Currently the network only works when bias nodes are used\")\n self.weights = [np.random.normal(loc=0,scale=0.6,size=(topology[i], topology[i])) for i in range(self.size)]\n\n self.Gradients = [None] * self.size", "def __init__(self, input_size, hidden_size, num_layers, nonlinearity=torch.tanh):\n super().__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n # input weights\n self.weight_ih_l0 = Parameter(torch.Tensor(3*hidden_size, input_size))\n\n # hidden weights\n self.weight_hh_l0 = Parameter(torch.Tensor(3*hidden_size, hidden_size))\n\n # bias\n self.bias_ih_l0 = Parameter(torch.Tensor(3*hidden_size)) # input\n self.bias_hh_l0 = Parameter(torch.Tensor(3*hidden_size)) # hidden\n\n self.f = nonlinearity\n\n self.init_weights()", "def __init__(self, *args, **kwargs):\n self.params = kwargs\n self.output_len = kwargs['num_neurons']\n self.input_len = kwargs['input_len']\n self.weights = Vector(data=np.random.randn(self.output_len, self.input_len))\n self.biases = Vector(data=np.zeros((self.output_len, 1)))\n self.input_activations = None\n self.output_activations = Vector()", "def initialize(self, input_size, n_classes):\n\n self.n_classes = n_classes\n self.input_size = input_size\n\n n_hidden_layers = len(self.sizes)\n #############################################################################\n # Allocate space for the hidden and output layers, as well as the gradients #\n #############################################################################\n self.hs = []\n self.grad_hs = []\n for h in range(n_hidden_layers):\n self.hs += [np.zeros((self.sizes[h],))] # hidden layer\n self.grad_hs += [np.zeros((self.sizes[h],))] # ... and gradient\n self.hs += [np.zeros((self.n_classes,))] # output layer\n self.grad_hs += [np.zeros((self.n_classes,))] # ... and gradient\n\n ##################################################################\n # Allocate space for the neural network parameters and gradients #\n ##################################################################\n self.weights = [np.zeros((self.input_size, self.sizes[0]))] # input.csv to 1st hidden layer weights\n self.grad_weights = [np.zeros((self.input_size, self.sizes[0]))] # ... and gradient\n\n self.biases = [np.zeros((self.sizes[0]))] # 1st hidden layer biases\n self.grad_biases = [np.zeros((self.sizes[0]))] # ... and gradient\n\n for h in range(1, n_hidden_layers):\n self.weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # h-1 to h hidden layer weights\n self.grad_weights += [np.zeros((self.sizes[h - 1], self.sizes[h]))] # ... and gradient\n\n self.biases += [np.zeros((self.sizes[h]))] # hth hidden layer biases\n self.grad_biases += [np.zeros((self.sizes[h]))] # ... and gradient\n\n self.weights += [np.zeros((self.sizes[-1], self.n_classes))] # last hidden to output layer weights\n self.grad_weights += [np.zeros((self.sizes[-1], self.n_classes))] # ... and gradient\n\n self.biases += [np.zeros((self.n_classes))] # output layer biases\n self.grad_biases += [np.zeros((self.n_classes))] # ... and gradient\n\n #########################\n # Initialize parameters #\n #########################\n\n self.rng = np.random.mtrand.RandomState(self.seed) # create random number generator\n # biases are initialized to zero\n # ... and weights according to the slides\n for m in range(len(self.weights)):\n b = (6 ** 0.5) / ((self.weights[m].shape[0] + self.weights[m].shape[1]) ** 0.5)\n for ind, val in np.ndenumerate(self.weights[m]):\n self.weights[m][ind] = self.rng.uniform(-b, b, 1)\n\n\n self.n_updates = 0 # To keep track of the number of updates, to decrease the learning rate", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [cp.array(cp.random.randn(y, 1)) for y in sizes[1:]]\n self.weights = [cp.array(cp.random.randn(y, x))\n for x, y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, sizes):\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n\r\n self.biases = [np.random.randn(y, 1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(sizes[:-1], sizes[1:])]", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n \n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = numpy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n pass", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def lenet_network(name, input_shape, output_count):\n network = NeuralNetwork(name, input_shape, output_count)\n\n # normal distribution parameters for random weights\n mean = 0.0\n stddev = 0.1\n\n # General convolution shapes and parameters common to all convolutional layers\n conv_stride_shape = (1, 1)\n conv_pad_shape = (0, 0)\n conv_pad_type = 'VALID'\n\n pool_stride_shape = (2, 2)\n pool_shape = (2, 2)\n pool_pad_type = 'VALID'\n\n activation = 'relu'\n\n # Kernel depths and sizes for each convolution layer\n depths = [6, 16]\n kernel_shapes = [(5, 5, depths[0]), (5, 5, depths[1])]\n conv_layer_count = len(depths)\n\n # Create convolutional layers\n conv = None\n for i in range(conv_layer_count):\n name = 'l{:d}'.format(i)\n if i > 0:\n input_shape = conv.output_shape\n conv = ConvolutionalLayer(name, input_shape, kernel_shapes[i], conv_stride_shape, \\\n conv_pad_shape, conv_pad_type, activation)\n conv.add_pooling('max', pool_shape, pool_stride_shape, pool_pad_type)\n network.add_layer(conv, mean, stddev)\n\n # Linear layer dimensions\n linear_input_sizes = [400, 120, 84]\n linear_output_sizes = [120, 84, 10]\n linear_activations = ['relu', 'relu', None]\n\n # Create linear layers\n for i, input_size in enumerate(linear_input_sizes):\n layer_index = i + conv_layer_count\n name = 'l{:d}'.format(layer_index)\n linear = LinearLayer(name, input_size, linear_output_sizes[i], linear_activations[i])\n linear.init_weights_and_biases(mean, stddev)\n network.add_layer(linear, mean, stddev)\n\n network.define_network()\n\n learning_rate = 0.001\n network.define_operations(learning_rate, 'adam')\n\n return network", "def _build_network(self,\n input_dim,\n dense_layers,\n nodes_per_layer=None,\n hidden_act='relu',\n output_act='sigmoid',\n dropout_layers=None):\n\n if nodes_per_layer is None:\n nodes = [10] * dense_layers\n else:\n nodes = nodes_per_layer\n\n if dropout_layers is None:\n do_layers = [0] * dense_layers\n else:\n do_layers = dropout_layers\n\n self.model.add(Dense(nodes[0], input_dim=input_dim,\n activation=hidden_act))\n\n if dense_layers > 1:\n for l in range(1, dense_layers - 1):\n if do_layers[l - 1] != 0:\n self.model.add(Dropout(do_layers[l - 1]))\n\n self.model.add(Dense(nodes[l], activation=hidden_act))\n\n self.model.add(Dense(1, activation=output_act))", "def test_init() -> None:\n neural_net = NeuralNetwork()\n assert neural_net.model.get_layer('output_layer').output_shape, (None, 4)", "def __init__(self, inputLayerSize, outputLayerSize, \\\n hiddenLayerSize):\n #Network hyperparameters - neurons per layer - **not altered by training**\n self.inputLayerSize = inputLayerSize\n self.outputLayerSize = outputLayerSize\n self.hiddenLayerSize = hiddenLayerSize\n self.num_params = inputLayerSize * hiddenLayerSize + \\\n hiddenLayerSize * outputLayerSize + hiddenLayerSize \\\n + outputLayerSize\n #--Weights--\n #w_ih - weights of synapses linking input -> hidden\n self.w_ih = np.random.randn( self.inputLayerSize, \\\n self.hiddenLayerSize)\n #w_ho - weights of synapses linking hidden -> output\n self.w_ho = np.random.randn( self.hiddenLayerSize, \\\n self.outputLayerSize)\n \n #--Biases--\n #b_h - biases of hidden layer\n self.b_h = np.random.randn( self.hiddenLayerSize )\n #b_o - biases of output layer\n self.b_o = np.random.randn( self.outputLayerSize )", "def __init__(self, number_of_neurons: int, activation_function, inputs=None):\n self.uid = None\n self.number_of_neurons = number_of_neurons\n self.activation_function = activation_function\n self.inputs = inputs\n self.shape = None\n self.data_shape = None\n self.weights = None\n self.__set_weights()", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def build_neural_net(net_def):\n\n populated_def = net_def.copy()\n\n for layer in populated_def['layers']:\n for n in range(0, layer['num_neurons']):\n weights = layer['weights'][n]\n bias = layer['bias'][n]\n\n neuron = Neuron(weights, bias, layer['activation'])\n layer['neurons'].append(neuron)\n\n\n return populated_def", "def set_layers(self, sizes, init='random'):\n\t\tself.init_weights(sizes, init, None, None)", "def __init__(self, rate, sigmoid, hidden, examples, variables, layers, rule, dropout):\n self.rate = rate\n self.sigmoid = sigmoid\n self.inputs = variables \n self.vis_layer = []\n self.hidden_layers = []\n self.hidden = hidden\n self.variables = variables\n self.data = BOOLEAN(examples, self.variables)\n self.layers = layers-1\n self.rule = rule\n self.dropout = dropout\n self.length = int(math.pow(2, self.variables))\n for _ in xrange(self.hidden):\n self.vis_layer.append(Neuron(self.rate, self.sigmoid, self.inputs+1, dropout))\n for layer in xrange(self.layers):\n self.hidden_layers.append([])\n for _ in xrange(self.hidden):\n self.hidden_layers[layer].append(Neuron(self.rate, self.sigmoid, self.hidden+1, dropout))\n if self.hidden > 0:\n self.output_neuron = Neuron(self.rate, self.sigmoid, self.hidden+1, dropout)\n else:\n self.output_neuron = Neuron(self.rate, self.sigmoid, self.inputs+1, dropout)", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n self.retina_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4, 3, padding=1)", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.img_conv1 = conv2d(3, 16, kernel_size=7, stride=2)\n self.img_conv2 = conv2d(16, 32, kernel_size=5, stride=2)\n self.img_conv3 = conv2d(32, 64, kernel_size=5, stride=2)\n self.img_conv4 = conv2d(64, 64, stride=2)\n self.img_conv5 = conv2d(64, 128, stride=2)\n self.img_conv6 = conv2d(128, self.z_dim, stride=2)\n self.img_encoder = nn.Linear(4 * self.z_dim, 2 * self.z_dim)\n self.flatten = Flatten()\n\n if initailize_weights:\n init_weights(self.modules())", "def __init__(self,\n neurons_list: list,\n hidden_activation: str = None,\n out_activation: str = None,\n error_func: str = None):\n self.__layers = []\n for i in range(1, len(neurons_list)):\n prev_layer = neurons_list[i - 1]\n cur_layer = neurons_list[i]\n\n out_activ = lin\n out_activ_deriv = lin_deriv\n\n if out_activation == \"sigmoid\":\n out_activ = sigmoid\n out_activ_deriv = sigmoid_deriv\n elif out_activation == \"relu\":\n out_activ = relu\n out_activ_deriv = relu_deriv\n elif out_activation == \"l_relu\":\n out_activ = l_relu\n out_activ_deriv = l_relu_deriv\n\n hidden_activ = lin\n hidden_activ_deriv = lin_deriv\n\n if hidden_activation == \"sigmoid\":\n hidden_activ = sigmoid\n hidden_activ_deriv = sigmoid_deriv\n elif hidden_activation == \"relu\":\n hidden_activ = relu\n hidden_activ_deriv = relu_deriv\n elif hidden_activation == \"l_relu\":\n hidden_activ = l_relu\n hidden_activ_deriv = l_relu_deriv\n\n if i == len(neurons_list) - 1:\n layer = NeuralNetLayer(cur_layer, prev_layer, out_activ, out_activ_deriv)\n else:\n layer = NeuralNetLayer(cur_layer, prev_layer, hidden_activ, hidden_activ_deriv)\n self.__layers.append(layer)\n\n error_func_f = squared_error\n error_deriv = squared_error_der\n\n if error_func == \"mse\":\n error_func_f = squared_error\n error_deriv = squared_error_der\n elif error_func == \"kl\":\n error_func_f = kl_divergence\n error_deriv = kl_divergence_der\n\n self.__error_func = error_func_f\n self.__error_deriv = error_deriv", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.depth_conv1 = conv2d(1, 32, kernel_size=3, stride=2)\n self.depth_conv2 = conv2d(32, 64, kernel_size=3, stride=2)\n self.depth_conv3 = conv2d(64, 64, kernel_size=4, stride=2)\n self.depth_conv4 = conv2d(64, 64, stride=2)\n self.depth_conv5 = conv2d(64, 128, stride=2)\n self.depth_conv6 = conv2d(128, self.z_dim, stride=2)\n\n self.depth_encoder = nn.Linear(4 * self.z_dim, 2 * self.z_dim)\n self.flatten = Flatten()\n\n if initailize_weights:\n init_weights(self.modules())", "def _nn_initial_values(structure):\n\n # Use Xavier uniform initializer\n initializer=tf.glorot_uniform_initializer()\n\n output=[]\n last_width=None\n\n # Add biases & weights per layer\n for l in structure:\n output.append(tf.zeros(shape=[l])) # layer l biases\n if last_width is not None: # Exclude weights from layer 0\n output.append(initializer(shape=[last_width, l])) # layer l weights\n last_width=l\n\n return output", "def initialize_parameters_for_layers(layer_list, training_examples):\n W = [None] * len(layer_list)\n B = [None] * len(layer_list)\n X = np.random.randn(layer_list[0], training_examples)\n for j in range(1, len(layer_list)):\n W[j] = np.random.randn(layer_list[j], layer_list[j-1])\n B[j] = np.random.randn(layer_list[j], 1)\n return X, W, B", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = cupy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = cupy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: 1 / (1 + cupy.exp(x) ** (-1))", "def __init__(self, num_units, input_size=None,\n nonlinearity=tf.nn.tanh,\n W_init=tf.random_normal_initializer(stddev=0.15),\n b_init=tf.constant_initializer(0.0, dtype=tf.float32),\n weightnorm=False):\n self._num_units = num_units\n self._input_size = input_size or num_units\n self._W_init = W_init\n self._b_init = b_init\n self._weightnorm = weightnorm\n self._nonlin = nonlinearity", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def initialize_layers(self, layers_config: dict, inputs=None):\n layers_config = layers_config.copy()\n input_lyrs = []\n initiated_layers = OrderedDict()\n wrp_layer = None # indicator for wrapper layers\n first_layer = True\n\n for lyr, lyr_args in layers_config.items():\n\n lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)\n\n lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)\n\n if K.BACKEND == 'pytorch':\n\n if first_layer:\n first_layer = False\n\n if callable(lyr_config):\n lyr_initiated = lyr_config\n else:\n lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)\n setattr(self, lyr, lyr_initiated)\n initiated_layers[lyr] = {\"layer\": lyr_initiated, \"named_outs\": named_outs, 'call_args': call_args,\n 'inputs': lyr_inputs}\n\n else:\n # may be user has defined layers without input layer, in this case add Input layer as first layer\n if first_layer:\n if inputs is not None: # This method was called by providing it inputs.\n assert isinstance(inputs, tf.Tensor)\n # since inputs have been defined, all the layers that will be added will be next to first layer\n first_layer = False\n layer_outputs = inputs\n initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}\n\n elif lyr_name != \"Input\":\n if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer\n initialized_layer = LAYERS[\"Input\"](shape=lyr_config['input_shape'])\n else:\n # for simple dense layer based models, lookback will not be used\n def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)\n initialized_layer = LAYERS[\"Input\"](shape=def_shape)\n\n # first layer is built so next iterations will not be for first layer\n first_layer = False\n # put the first layer in memory to be used for model compilation\n # add th layer which the user had specified as first layer\n initiated_layers[initialized_layer.name] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n\n # The inputs to the layer have not been specified, so either it is an Input layer\n if lyr_inputs is None:\n # or it uses the previous outputs as inputs\n if lyr_name == \"Input\":\n # it is an Input layer, hence should not be called\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'tf_name': lyr_name}\n input_lyrs.append(initialized_layer)\n else:\n # it is executable and uses previous outputs as inputs\n if lyr_name in ACTIVATION_LAYERS:\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n # lyr_config is serialized lambda layer, which needs to be deserialized\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n # layers_config['lambda']['config'] still contails lambda, so we need to replace the python\n # object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n if lyr_name == \"TemporalFusionTransformer\":\n lyr_config['return_attention_components'] = True\n initialized_layer = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n else: # The inputs to this layer have been specified so they must exist in lyr_cache.\n # it is an executable\n if lyr_name in ACTIVATION_LAYERS:\n\n layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])\n initiated_layers[lyr_config['name']] = {'layer': layer_outputs,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n elif lyr_name in ['TimeDistributed', 'Bidirectional']:\n wrp_layer = LAYERS[lyr_name]\n # because wrapper layer name is property\n initiated_layers[lyr_config['name']] = {'layer': wrp_layer,\n 'tf_name': lyr_name}\n continue\n elif \"LAMBDA\" in lyr_name.upper():\n initialized_layer = tf.keras.layers.deserialize(lyr_config)\n layers_config[lyr]['config'] = lyr_config\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n else:\n if wrp_layer is not None:\n initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))\n initiated_layers[lyr_config['name']] = {'layer': initialized_layer,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n wrp_layer = None\n else:\n layer_initialized = LAYERS[lyr_name](*args, **lyr_config)\n initiated_layers[lyr_config['name']] = {'layer': layer_initialized,\n 'named_outs': named_outs,\n 'call_args': call_args,\n 'inputs': lyr_inputs,\n 'tf_name': lyr_name}\n\n if activation is not None: # put the string back to dictionary to be saved in config file\n lyr_config['activation'] = activation\n\n first_layer = False\n\n self.jsonize_lyr_config(lyr_config)\n\n # inputs = [] todo, indentify input layers\n # for k,v in lyr_cache.items():\n # since the model is not build yet and we have access to only output tensors of each list, this is probably\n # # the only way to know that how many `Input` layers were encountered during the run of this method. Each\n # tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if k.upper() != \"TIMEDISTRIBUTED\" and hasattr(v, 'op'):\n # if hasattr(v.op, 'inputs'):\n # _ins = v.op.inputs\n # if len(_ins) == 0:\n # inputs.append(v)\n # else: # not sure if this is the proper way of checking if a layer receives an input or not!\n # if hasattr(v, '_keras_mask'):\n # inputs.append(v)\n\n setattr(self, 'initiated_layers', initiated_layers)\n setattr(self, 'input_lyrs', input_lyrs)\n\n\n # todo,\n # # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use\n # # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning\n # if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:\n # if len(layer_outputs.op.inputs) < 1:\n # print(\"Warning: the output is of Input tensor class type\")\n # else:\n # if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node\n # print(\"Warning: the output is of Input tensor class type\")\n\n # outs = None\n #if BACKEND == 'tensorflow':\n # outs = self.call(input_lyrs)\n # setattr(self, 'output_lyrs', outs)\n # if BACKEND == 'tensorflow':\n # ## Reinitial\n # super(Model, self).__init__(\n # inputs=input_lyrs,\n # outputs=outs)\n #MODEL.__init__(self, inputs=inputs, outputs=outs)\n\n return input_lyrs # , outs", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.cross_attn = MultiheadAttention(**self.cross_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(3)\n ]\n self.norms = ModuleList(norms_list)", "def bnn_serial(*layers):\n nlayers = len(layers)\n init_funs, apply_funs = zip(*layers)\n\n def init_fun(rng, input_shape):\n params = []\n for init_fun in init_funs:\n rng, layer_rng = jax.random.split(rng)\n input_shape, param = init_fun(layer_rng, input_shape)\n params.append(param)\n return input_shape, params\n\n def apply_fun(params, inputs, **kwargs):\n rng = kwargs.pop('rng', None)\n rngs = jax.random.split(rng, nlayers) if rng is not None else (None,) * nlayers\n total_kl = 0\n infodict = {}\n for fun, param, rng in zip(apply_funs, params, rngs):\n output = fun(param, inputs, rng=rng, **kwargs)\n if len(output) == 2:\n inputs, layer_kl = output\n elif len(output) == 3:\n inputs, layer_kl, info = output\n infodict.update(info)\n else:\n raise RuntimeError(f\"Expected 2 or 3 outputs but got {len(output)}.\")\n total_kl = total_kl + layer_kl\n return inputs, total_kl, infodict\n\n return Layer(init_fun, apply_fun)", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fulllayer1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.reglayer1 = ReLULayer()\n self.fulllayer2 = FullyConnectedLayer(hidden_layer_size, n_output)", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def init(InputUnits, OutputUnits, numHiddenLayer, HiddenUnits=None):\n global HiddenUnit\n all_weights = []\n if HiddenUnits is None:\n HiddenUnits = []\n elif isinstance(HiddenUnits, int):\n HiddenUnits = [HiddenUnits]\n\n # for InputLayer\n\n parameters = generate(HiddenUnits[0], InputUnits)\n allWeights = mat.r_[parameters.flatten()]\n\n if numHiddenLayer > 1:\n for i in range(numHiddenLayer):\n if i < numHiddenLayer-1:\n parameters = generate(HiddenUnits[i+1], HiddenUnits[i])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n else:\n parameters = generate(OutputUnits, HiddenUnits[i])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n\n else:\n # for output layer\n parameters = generate( OutputUnits, HiddenUnits[0])\n allWeights = mat.r_[allWeights, parameters.flatten()]\n\n return allWeights", "def __init__(self, layers = (1, 1), speed = 0.5):\n \n self.structure = layers # Structure of each layer\n self.activationFunc = np.vectorize(lambda x: 1.0 / (1 + np.exp(-x))); # Activatin function\n self.derivativeFunc = lambda x: x * (1 - x) # Derivative from activation function\n self.theta = [None] # Matrix of weights for each layer\n self.a = None # Activation level from each layer\n self.delta = None # Error rate for each level\n self.speed = speed # Speed of learning\n \n # for each layer exclude input layer\n for i in xrange(1, len(layers)):\n # create n * (M+1) matrix (+1 = adding bias) with random floats in range [-1; 1]\n self.theta.append(np.mat(np.random.uniform(-1, 1, (layers[i], layers[i - 1] + 1))))\n #self.theta.append(np.matrix(np.ones((layers[i], layers[i - 1] + 1))))\n \n return", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self, dims):\n\t\tself.layersNumber = len(dims) - 1\n\t\tself.weights = []\n\t\tself.biases = []\n\t\tnp.random.seed(42)\n\t\tfor d in range(self.layersNumber):\n\t\t\tself.weights.append(np.random.randn(dims[d+1], dims[d]))\n\t\t\tself.biases.append(np.random.randn(dims[d+1], 1))", "def init_bias(n_hidden_layer, n_hidden_layer_2, n_output_layer):\n bias_W1, bias_W2, bias_W3 = None, None, None\n\n # Create empty arrays of the desired size given by the number of neurons per layer\n # Arrays are populated with 0's\n if n_hidden_layer > 0:\n bias_W1 = np.zeros((n_hidden_layer,1))\n\n if n_hidden_layer_2 > 0:\n bias_W2=np.zeros((n_hidden_layer_2,1)) \n bias_W3=np.zeros((n_output_layer,1))\n\n else:\n bias_W2 = np.zeros((n_output_layer,1))\n\n else:\n bias_W1 = np.zeros((n_output_layer,1))\n\n return bias_W1, bias_W2, bias_W3", "def __init__(self, input_size, neurons):\n super().__init__()\n self.input_size = input_size\n self.neurons = neurons\n self.params[\"w\"] = np.random.randn(input_size, neurons)\n self.params[\"b\"] = np.random.randn(1, neurons)\n self.grads = {}", "def __init__(self, layer_dims=(5, 1), learning_rate=0.005, max_iter=500, activate_fn='relu', init_weights_coef=0.01,\n regularization=\"l2\", lambd=0.5, keep_prob=0.9, random_state=None, verbose=True):\n self.layer_dims = layer_dims\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n self.activate_fn = activate_fn\n self.activate_fns = {\n \"sigmoid\": self.__sigmoid,\n \"tanh\": self.__tanh,\n \"relu\": self.__relu,\n \"lrelu\": self.__leaky_relu\n }\n self.derivative_activate_fns = {\n \"sigmoid\": self.__derivative_sigmoid,\n \"tanh\": self.__derivative_tanh,\n \"relu\": self.__derivative_relu,\n \"lrelu\": self.__derivative_leaky_relu\n }\n self.init_weights_coef = init_weights_coef\n self.regularization = regularization\n self.lambd = lambd\n self.keep_prob = keep_prob\n self.random_state = random_state\n self.verbose = verbose\n self.weights = defaultdict()\n\n self.costs = [] # 记录训练过程的损失", "def __init__(self, layer_sizes, act_func=Sigmoid, cost_func=CrossEntropy, metric=AccuracyMetric):\n np.random.seed(1) # Used for constant weights and biases initialization. Fell free to change it.\n\n self.layers_num = len(layer_sizes)\n self.act_func = act_func\n self.cost_func = cost_func\n self.metric = metric\n self.biases = [np.random.random(i) for i in layer_sizes[1:]]\n self.weights = [np.random.normal(loc=0, scale=(1 / np.sqrt(layer_sizes[0])), size=(j, i))\n for j, i in zip(layer_sizes[1:], layer_sizes[:-1])]\n self.costs = []\n self.accuracies = []\n self.eta = 0\n self.lambda_r = 0", "def __init__(\n self,\n input_size: int,\n hidden_sizes: Sequence[int],\n output_size: int,\n num_layers: int,\n optimizer: Type[Optimizer] = SGD(),\n norm_weights: bool = False\n ):\n self.input_size = input_size\n self.hidden_sizes = hidden_sizes\n self.output_size = output_size\n self.num_layers = num_layers\n self.norm_weights = norm_weights\n\n assert len(hidden_sizes) == (num_layers - 1)\n assert num_layers >= 1\n\n activated_layer_sizes = [input_size] + hidden_sizes\n activated_layers = [LinearLayer(n_in, n_out, activation_func=ReLU) for n_in, n_out in zip(activated_layer_sizes, activated_layer_sizes[1:])]\n final_layer = LinearLayer(activated_layer_sizes[-1], self.output_size, activation_func=Softmax)\n self.layers = activated_layers + [final_layer]\n\n self.optimizer = optimizer", "def new_initial_layer(self, nodes, inputs, alpha=0.1):\n weights = [[1 for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.insert(0, Layer(weights, alphas))", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def __init__(\n self,\n numpy_rng,\n train_set_x,\n train_set_y,\n hidden_layers_sizes,\n n_ins=784,\n n_outs=10\n ):\n\n self.sigmoid_layers = []\n self.AE_layers = []\n self.params = []\n self.n_layers = len(hidden_layers_sizes)\n self.train_set_x = train_set_x\n self.train_set_y = train_set_y\n\n assert self.n_layers > 0\n\n self.x = T.matrix('x') # the data is presented as rasterized images\n self.y = T.ivector('y') # the labels are presented as 1D vector of\n\n for i in xrange(self.n_layers): # used to be n layers\n\n # construct the sigmoid layer = encoder stack\n if i == 0:\n layer_input = self.x\n else:\n layer_input = self.sigmoid_layers[-1].output\n\n sigmoid_layer = HiddenLayer(rng=numpy_rng,\n input=layer_input,\n n_in=(n_ins if i == 0 else\n hidden_layers_sizes[i-1]),\n n_out=hidden_layers_sizes[i],\n activation=T.nnet.sigmoid)\n\n # add the layer to our list of layers\n self.sigmoid_layers.append(sigmoid_layer)\n self.params.extend(sigmoid_layer.params)\n\n # init the DA_layer, takes weights from sigmoid layer\n AE_layer = AutoEncoder(\n numpy_rng=numpy_rng,\n input=layer_input,\n n_visible=(n_ins if i == 0 else hidden_layers_sizes[i-1]),\n n_hidden=hidden_layers_sizes[i],\n W=sigmoid_layer.W,\n bhid=sigmoid_layer.b)\n\n self.AE_layers.append(AE_layer)\n\n # on top of the layers\n # log layer for fine-tuning\n self.logLayer = LogisticRegression(\n input=self.sigmoid_layers[-1].output,\n n_in=hidden_layers_sizes[-1],\n n_out=n_outs\n )\n self.params.extend(self.logLayer.params)\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\n self.errors = self.logLayer.errors(self.y)", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)", "def __init__(self, num_learners: int):\n self.num_learners = num_learners\n self.learners = []\n self.learner_weights = np.ones(num_learners)", "def _initialize_cnn(image_size, action_size, learning_rate=None):\n\n # All layers are randomly initialized using Glorot initializer\n\n # Create the Inputs of the Neural Network\n image_input = Input(shape=(image_size, image_size, 1))\n scalar_input = Input(shape=(2,))\n\n # Create the first layer of convolution\n conv1 = Conv2D(filters=16,\n kernel_size=5,\n activation=\"relu\")(image_input)\n\n pool1 = MaxPooling2D(pool_size=3)(conv1)\n\n # Create the second layer of convolution\n conv2 = Conv2D(filters=16,\n kernel_size=3,\n activation=\"relu\")(pool1)\n pool2 = MaxPooling2D(pool_size=3)(conv2)\n\n # Create the third layer of convolution\n conv3 = Conv2D(filters=16,\n kernel_size=3,\n activation=\"relu\")(pool2)\n pool3 = MaxPooling2D(pool_size=2)(conv3)\n\n # Flatten the input, so it can be used with dense layers\n flatten = Flatten()(pool3)\n\n # Merge the results of the convolutional layers with the scalar input\n merge = concatenate([flatten, scalar_input])\n\n # Create the dense layers\n # (256 neurons, ReLU)\n dense1 = Dense(256,\n activation=\"relu\",\n kernel_initializer=\"glorot_uniform\")(merge)\n dense2 = Dense(256,\n activation=\"relu\",\n kernel_initializer=\"glorot_uniform\")(dense1)\n\n # Create the output layer (action_size outputs, Lineal)\n # Note that the output MUST be lineal (instead of the typical sigmoid function)\n # for Deep Reinforcement Learning\n output = Dense(action_size,\n activation=\"linear\",\n kernel_initializer=\"glorot_uniform\")(dense2)\n\n # Create and compile the model of the full CNN (Adam optimizer, MSE)\n # Mean Square Error is used (instead of more typical cross-entropy values) due to Deep Reinforcement Learning\n # (since MSE is the value trying to be minimized)\n model = Model(inputs=[image_input, scalar_input],\n outputs=output)\n\n # Note that beta_1 is set to 0.99 to improve the GPU performance while training\n model.compile(optimizer=Adam(beta_1=0.99,\n learning_rate=learning_rate) if learning_rate\n else Adam(beta_1=0.99),\n loss=\"mse\",\n run_eagerly=True)\n\n return model", "def __init__(self, input_dimensions=2, number_of_nodes=4, transfer_function=\"Hard_limit\"):\n self.input_dimensions = input_dimensions\n self.number_of_nodes = number_of_nodes\n self.transfer_function = transfer_function\n self.initialize_weights()", "def __init__(self, number_hidden_layers=1, n_inputs=0, input_dim=1, output_dim=1, init_fs=None, act_fs=None, dropout=(), dropout_probs=(), batch_norm=()):\n self.number_hidden_layers = number_hidden_layers\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.init_functions = init_fs\n self.act_functions = act_fs\n self.batch_norm = batch_norm\n self.dropout = dropout\n self.dropout_probs = dropout_probs\n self.n_inputs = n_inputs", "def make_neural_net_basic():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n w1A = Weight('w1A', 1)\n w2A = Weight('w2A', 1)\n wA = Weight('wA', 1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n P = PerformanceElem(A, 0.0)\n\n net = Network(P,[A])\n return net", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def _init_layers(self) -> None:\n self.self_attn = MultiheadAttention(**self.self_attn_cfg)\n self.embed_dims = self.self_attn.embed_dims\n self.ffn = FFN(**self.ffn_cfg)\n norms_list = [\n build_norm_layer(self.norm_cfg, self.embed_dims)[1]\n for _ in range(2)\n ]\n self.norms = ModuleList(norms_list)", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def __init__(self, weights, path, trained, asGraph):\n \n _weights = np.asarray(weights)\n\n numLayers = int(_weights.shape[0]/2)\n wghts = []\n biases = []\n\n for i in range(numLayers):\n j = 2*i\n# print(j,(_weights[j].T).shape)\n wghts.append(_weights[j])\n j = 2*i + 1\n# print(j,(_weights[j].T).shape)\n biases.append(_weights[j])\n #enddo\n\n self.numLayers = numLayers\n self.wghts = np.asarray(wghts)\n self.asGraph = asGraph\n self.wghts = wghts\n self.path = path\n self.trained = trained" ]
[ "0.7665592", "0.75891453", "0.7549877", "0.7288455", "0.7238649", "0.72074854", "0.7202781", "0.71520114", "0.7109726", "0.70575273", "0.6969205", "0.69633704", "0.68633807", "0.6848898", "0.6821339", "0.68000925", "0.677125", "0.67679054", "0.6746093", "0.6742651", "0.6729055", "0.6666361", "0.6662787", "0.66476506", "0.6646941", "0.6638234", "0.6634028", "0.6603814", "0.65982187", "0.6594358", "0.6585001", "0.6581009", "0.6575865", "0.65739566", "0.6569843", "0.6567062", "0.6548683", "0.65439975", "0.6538684", "0.6535428", "0.6529185", "0.65281767", "0.6527593", "0.6512578", "0.65079015", "0.6499464", "0.6498534", "0.6498499", "0.6478818", "0.6478449", "0.64643586", "0.6464072", "0.64588547", "0.64576095", "0.64449865", "0.64446276", "0.6437191", "0.64316356", "0.6431304", "0.6428258", "0.64184", "0.64147735", "0.641303", "0.64026666", "0.63971806", "0.6390459", "0.6385555", "0.63847315", "0.63833493", "0.63811773", "0.63803136", "0.6377859", "0.6372071", "0.63591164", "0.6355802", "0.6354959", "0.6353455", "0.6343021", "0.6341547", "0.6337963", "0.633549", "0.6327923", "0.6319723", "0.63107425", "0.6297358", "0.62895703", "0.62871516", "0.62858903", "0.6284418", "0.62623817", "0.626232", "0.6259826", "0.6259492", "0.6257664", "0.6246019", "0.62457883", "0.62441903", "0.6243202", "0.62394774", "0.6237711", "0.6236991" ]
0.0
-1
Forward pass through the network to obtain the U field.
def net_u(self, x, t): u = self.model(torch.cat((x,t),1)) return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def U(self):\n return self._U", "def forward(self, u):\n self.pop(u)\n return self.read(1.)", "def get_U(self):\n if self.U is not None:\n return self.U\n return self.calc_Uiso()", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, x):\n return self.net(x)", "def u(self):\n return self.__u", "def forward(self, x):\n x, self.hidden = self.gru(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x", "def u2fkn( self , u ):", "def UFFind(UFP, u):\n if not (UFP[u] == u):\n UFP[u] = UFFind(UFP, UFP[u])\n return UFP[u]", "def forward(self, input_):\n out = self.fc(input_)\n out = self.bn(out)\n out = self.relu(out)\n return torch.cat([out, input_], dim=1)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def relu_forward(x):\n ############################################################################\n # TODO: Implement the ReLU forward pass. #\n ############################################################################\n ############################################################################\n # START OF YOUR CODE #\n ############################################################################\n out = x\n out[out<0] = 0\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return out", "def eval_forward(self, u):\n\n if self.eval_forward_f is None:\n\n # masked random numbers\n tt_u = tt.matrix('u')\n mu = self.mask * tt_u\n\n # scale net\n s_net = nn.FeedforwardNet(self.n_inputs, mu)\n for h in self.s_hiddens:\n s_net.addLayer(h, self.s_act)\n s_net.addLayer(self.n_inputs, 'linear')\n util.copy_model_parms(self.s_net, s_net)\n s = s_net.output\n\n # translate net\n t_net = nn.FeedforwardNet(self.n_inputs, mu)\n for h in self.t_hiddens:\n t_net.addLayer(h, self.t_act)\n t_net.addLayer(self.n_inputs, 'linear')\n util.copy_model_parms(self.t_net, t_net)\n t = t_net.output\n\n # transform u -> x\n x = mu + (1.0 - self.mask) * (tt_u * tt.exp(s) + t)\n\n # compile theano function\n self.eval_forward_f = theano.function(\n inputs=[tt_u],\n outputs=x\n )\n\n return self.eval_forward_f(u.astype(dtype))", "def forward_once(self, x):\n\t\t#x = F.normalize(self.network(x), p=2)\n\t\tx = self.network(x)\n\t\treturn x", "def forward(self, inp):\n return inp.dot(self.W) + self.b", "def forward(self, state):\n\n # connect layers to each other and put relu activations between them\n for layer in self.hidden_layers:\n state = layer(state)\n state = F.relu(state)\n value = self.value_layer(state)\n return value", "def relu_forward(self, x):\n #out = None\n #############################################################################\n # TODO: Implement the ReLU forward pass. #\n #############################################################################\n out = np.array(x, copy=True)\n out[out <= 0] = 0\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = x\n return out, cache", "def z2u(self, z):\n raise NotImplementedError", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x", "def U(self, p):\n re = self._re(p)\n Le = self._Le(p)\n wf = self._wf(p)\n rf = self._rf(p)\n A = row_wise_dot(re, np.einsum('...ij,...j', self.Ee, re)) @ Le\n B = row_wise_dot(rf, np.einsum('...ij,...j', self.Ff, rf)) @ wf\n return (B - A) * G * self.d * 0.5", "def forward(self, t_local, z, backwards = False):\n self.nfe += 1\n\n\n node_attributes = z[:self.K_N,:]\n edge_attributes = z[self.K_N:,:]\n assert (not torch.isnan(node_attributes).any())\n assert (not torch.isnan(edge_attributes).any())\n\n #grad_edge, edge_value = self.edge_ode_func_net(node_attributes,self.num_atom) # [K*N*N,D],[K,N*N], edge value are non-negative by using relu.\n grad_edge, edge_value = self.edge_ode_func_net(node_attributes,edge_attributes,self.num_atom) # [K*N*N,D],[K,N*N], edge value are non-negative by using relu.todo:with self-evolution\n edge_value = self.normalize_graph(edge_value,self.K_N)\n assert (not torch.isnan(edge_value).any())\n grad_node = self.node_ode_func_net(node_attributes,edge_value,self.node_z0) # [K*N,D]\n assert (not torch.isnan(grad_node).any())\n assert (not torch.isinf(grad_edge).any())\n\n assert (not torch.isnan(grad_node).any())\n assert (not torch.isinf(grad_edge).any())\n\n # Concat two grad\n grad = self.dropout(torch.cat([grad_node,grad_edge],0)) # [K*N + K*N*N, D]\n\n\n return grad", "def u(self, point = -1):\n return self.solution('u', point)", "def forward(self, state):\n x = self.forward_to_var(state)\n return x.data[0]", "def forward(self, src=None, rec=None, u=None, vp=None, save=None, **kwargs):\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefield if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n save=self.geometry.nt if save else None,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n print(\"====Forward norm(u)\", norm(u))\n # Execute operator and return wavefield and receiver data\n # summary = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n summary = self.op_fwd(save).apply(src=src, u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"====Forward norm(u)\", norm(u))\n \n\n regnormu = norm(u)\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n print(\"Norm u:\", regnormu)\n\n s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=2)\n src_u = src.inject(field=s_u.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n\n op_f = Operator([src_u])\n op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))\n\n # import pdb;pdb.set_trace()\n print(\"Norm s_u\", norm(s_u))\n\n # Get the nonzero indices\n nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple\n assert len(nzinds) == len(self.model.grid.shape)\n shape = self.model.grid.shape\n x, y, z = self.model.grid.dimensions\n time = self.model.grid.time_dim\n t = self.model.grid.stepping_dim\n\n source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n\n source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n print(\"source_id data indexes start from 0 now !!!\")\n\n # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))\n source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0])))\n\n source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1\n\n print(\"Number of unique affected points is:\", len(nzinds[0]))\n\n # Assert that first and last index are as expected\n assert(source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)\n assert(source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0])-1)\n assert(source_id.data[nzinds[0][len(nzinds[0])-1], nzinds[1][len(nzinds[0])-1], nzinds[2][len(nzinds[0])-1]] == len(nzinds[0])-1)\n\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(source_mask.data)))\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(s_u.data[0])))\n\n print(\"-At this point source_mask and source_id have been populated correctly-\")\n\n nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])\n\n nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x,y ), space_order=0, dtype=np.int32)\n\n nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)\n inds = np.where(source_mask.data == 1.)\n print(\"Grid - source positions:\", inds)\n maxz = len(np.unique(inds[-1]))\n # Change only 3rd dim\n sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz)\n\n assert(len(nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions)-1))\n\n # Note : sparse_source_id is not needed as long as sparse info is kept in mask\n # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]\n\n id_dim = Dimension(name='id_dim')\n b_dim = Dimension(name='b_dim')\n\n save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n\n save_src_u_term = src.inject(field=save_src_u[src.dimensions[0], source_id], expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n print(\"Injecting to empty grids\")\n op1 = Operator([save_src_u_term])\n op1.apply(src=src, dt=kwargs.pop('dt', self.dt))\n print(\"Injecting to empty grids finished\")\n sp_zi = Dimension(name='sp_zi')\n\n\n sp_source_id = Function(name='sp_source_id', shape=(list(sparse_shape)),\n dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32)\n\n # Now holds IDs\n sp_source_id.data[inds[0], inds[1], :] = tuple(inds[-1][:len(np.unique(inds[-1]))])\n\n assert(np.count_nonzero(sp_source_id.data) == len(nzinds[0]))\n assert(len(sp_source_id.dimensions) == 3)\n\n # import pdb;pdb.set_trace()\n\n zind = Scalar(name='zind', dtype=np.int32)\n xb_size = Scalar(name='xb_size', dtype=np.int32)\n yb_size = Scalar(name='yb_size', dtype=np.int32)\n x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)\n y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)\n\n block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim,),\n space_order=0, dtype=np.int32)\n\n bsizes = (8, 8, 32, 32)\n block_sizes.data[:] = bsizes\n\n # eqxb = Eq(xb_size, block_sizes[0])\n # eqyb = Eq(yb_size, block_sizes[1])\n # eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n # eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1,\n implicit_dims=(time, x, y))\n\n eq1 = Eq(zind, sp_source_id[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))\n\n # inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]]\n # Is source_mask needed /\n inj_u = save_src_u[time, source_id[x, y, zind]]\n\n eq_u = Inc(u.forward[t+1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi))\n\n # The additional time-tiling equations\n # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n performance_map = np.array([[0, 0, 0, 0, 0]])\n\n bxstart = 4\n bxend = 9\n bystart = 4\n byend = 9\n bstep = 4\n\n txstart = 32\n txend = 65\n tystart = 32\n tyend = 65\n\n tstep = 32\n # Temporal autotuning\n for tx in range(txstart, txend, tstep):\n # import pdb; pdb.set_trace()\n for ty in range(tystart, tyend, tstep):\n for bx in range(bxstart, bxend, bstep):\n for by in range(bystart, byend, bstep):\n\n block_sizes.data[:] = [tx, ty, bx, by]\n\n eqxb = Eq(xb_size, block_sizes[0])\n eqyb = Eq(yb_size, block_sizes[1])\n eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n u.data[:] = 0\n print(\"-----\")\n tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u)\n\n # import pdb; pdb.set_trace()\n\n # Execute operator and return wavefield and receiver data\n print(\"TT====Forward norm(u)\", norm(u))\n summary_tt = self.op_fwd(save, tteqs).apply(u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"TT====Forward norm(u)\", norm(u))\n # op_tt = self.op_fwd(save, tteqs)\n\n # Execute operator and return wavefield and receiver data\n #summary_tt = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n # dt=kwargs.pop('dt', self.dt), **kwargs)\n\n # op_tt = self.op_fwd(kernel, save, tteqs)\n # summary_tt = op_tt.apply(u=u, dt=kwargs.pop('dt', self.dt), **kwargs)\n configuration['jit-backdoor'] = False\n norm_tt_u = norm(u)\n print(\"Norm u:\", regnormu)\n print(\"Norm(tt_u):\", norm_tt_u)\n configuration['jit-backdoor'] = True\n\n print(\"===Temporal blocking======================================\")\n\n performance_map = np.append(performance_map, [[tx, ty, bx, by, summary_tt.globals['fdlike'].gpointss]], 0)\n \n print(performance_map)\n # tids = np.unique(performance_map[:, 0])\n\n #for tid in tids:\n bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty))\n bx_data = np.unique(performance_map[bids, 2])\n by_data = np.unique(performance_map[bids, 3])\n gptss_data = performance_map[bids, 4]\n gptss_data = gptss_data.reshape(len(bx_data), len(by_data))\n\n fig, ax = plt.subplots()\n im = ax.imshow(gptss_data); #pause(2)\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(bx_data)))\n ax.set_yticks(np.arange(len(by_data)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(bx_data)\n ax.set_yticklabels(by_data)\n\n ax.set_title(\"Gpts/s for fixed tile size. (Sweeping block sizes)\")\n fig.tight_layout()\n\n fig.colorbar(im, ax=ax)\n # ax = sns.heatmap(gptss_data, linewidth=0.5)\n plt.savefig(str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + \".pdf\")\n\n\n if 1:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n # import pdb;pdb.set_trace()\n return rec, u, summary", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x.copy()\n out[x<=0] = 0\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\n if len(self.convs) == 0:\n return x\n x = x.contiguous()\n for c, n in zip(self.convs, self.norms):\n x = c(x.permute(0, 2, 1)) # (B, C, T)\n x = n(x.permute(0, 2, 1)) # (B, T, C)\n d = torch.nn.functional.dropout(x, p=self.dropout, training=self.training)\n x = torch.relu(d)\n return d", "def euler_forward(self, u, v, I):\n # The constants 0.04, 5, 140 are the one fits all parameters from Simple Model of Spiking Neurons E. Izhikevich.\n # These constants are justified when simulating large networks of neurons.\n # TODO: Parameterise the four constants 0.04, 5, 140.\n _v = v + self.dt * (0.04 * v ** 2 + 5 * v + 140 - u + I)\n _u = u + self.dt * (self.a * (self.b * v - u))\n return _v, _u", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.drop(self.node(input))\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the last dim\n self.result = self.drop(self.node(torch.cat(in_result, in_result[0].dim()-1).type(_tensor(\"LongTensor\"))))\n\n return self.result.view(*self.G.d_out)", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(self, x):\r\n x = x.reshape(x.shape[0], x.shape[1], 1 , 1)\r\n x = self.input(x)\r\n x = self.bn(x)\r\n x = F.relu(x)\r\n for i in range(len(self.DV)-1, -1, -1):\r\n x = self.DV[i](x)\r\n if i != 0:\r\n x = self.BN[i](x)\r\n x = F.relu(x)\r\n for col, t in enumerate(self.col_type):\r\n i = int(col/self.shape)\r\n j = col % self.shape\r\n if t == \"binary\":\r\n x[:,:,i,j] = torch.sigmoid(x[:,:,i,j])\r\n elif t == \"normalize\":\r\n x[:,:,i,j] = torch.tanh(x[:,:,i,j])\r\n else:\r\n x[:,:,i,j] = torch.relu(x[:,:,i,j])\r\n return x", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_6(inputs=down4,\n layer_size=3)\n\n inter = self.layer_inter(down5)\n\n up1 = self.layer_7(inputs=inter, indices=indices_5, layer_size=3)\n\n up2 = self.layer_8(inputs=up1, indices=indices_4, layer_size=3)\n\n up3 = self.layer_9(inputs=up2, indices=indices_3, layer_size=3)\n\n up4 = self.layer_10(inputs=up3, indices=indices_2, layer_size=2)\n\n up5 = self.layer_11(inputs=up4, indices=indices_1, layer_size=2)\n return up5", "def _layer_forward(self, z_prev, layer, use_relu=True):\n\n self.__dict__['z_prev_'+layer] = z_prev\n b = self.__getattribute__('b_'+layer)\n w = self.__getattribute__('w_'+layer)\n\n dim_out = w.shape[0]\n\n # simplification due to np broadcasting\n a = z_prev@w.T + b\n\n z = relu(a) if use_relu else a\n\n return (a, z)", "def forward(self, x):\n return x", "def t2u( self , t ):\n \n # Default is a constant signal\n u = self.ubar\n \n return u", "def relu(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0.)", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n\n return out.view(-1)", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def u2z(self, u):\n raise NotImplementedError", "def u(self):\n return (self.u_edges[1:] + self.u_edges[:-1]) / 2", "def forward(self, **kwargs):\n return self.a", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return torch.clamp(x, -1, 1)", "def postprocess(self, U):\n # de-normalize so to say\n U = self.scalarU.inverse_transform(U.reshape(1, -1))\n U = U.ravel()\n return np.array(U)", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def eval_forward(self, x, u):\n\n if self.eval_forward_f is None:\n\n # conditional input\n tt_x = tt.matrix('x')\n\n # masked random numbers\n tt_u = tt.matrix('u')\n mu = self.mask * tt_u\n\n # scale net\n s_net = nn.FeedforwardNet(self.n_inputs + self.n_outputs, tt.concatenate([tt_x, mu], axis=1))\n for h in self.s_hiddens:\n s_net.addLayer(h, self.s_act)\n s_net.addLayer(self.n_outputs, 'linear')\n util.copy_model_parms(self.s_net, s_net)\n s = s_net.output\n\n # translate net\n t_net = nn.FeedforwardNet(self.n_inputs + self.n_outputs, tt.concatenate([tt_x, mu], axis=1))\n for h in self.t_hiddens:\n t_net.addLayer(h, self.t_act)\n t_net.addLayer(self.n_outputs, 'linear')\n util.copy_model_parms(self.t_net, t_net)\n t = t_net.output\n\n # transform (x,u) -> y\n y = mu + (1.0 - self.mask) * (tt_u * tt.exp(s) + t)\n\n # compile theano function\n self.eval_forward_f = theano.function(\n inputs=[tt_x, tt_u],\n outputs=y\n )\n\n return self.eval_forward_f(x.astype(dtype), u.astype(dtype))", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n return out.view(-1, 1).squeeze(1)", "def get_u(self, e):\n e_p = e - self.old_e\n\n self.old_e = self.e\n self.e = e\n\n self.sum_e += e\n\n # PID controller.\n u = - self.k_p * e - self.k_d * e_p - self.k_i * self.sum_e\n\n return u", "def forward(self, input_x):\n return self.net(input_x.float())", "def sum_u(self):\r\n try:\r\n # add the velocity to the sum\r\n self.usum.vector()[:] += self.u.vector()[:]\r\n except AttributeError:\r\n # initialize the sum\r\n self.usum = self.u.copy(deepcopy=True)", "def calc_adv_U(self):\n num_U = 0\n adv_U = numpy.zeros((3,3), float)\n\n for atm in self:\n ## use the atom's U matrix if it exists, otherwise use the\n ## temperature factor\n\n if atm.U is not None:\n adv_U += atm.U\n num_U += 1\n\n return adv_U / num_U", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(self.drop(self.node(input.view(*self.G.d_in))))\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the last dim\n self.result = self.act(self.drop(self.node(torch.cat(in_result, in_result[0].dim() - 1))))\n\n return self.result.view(*self.G.d_out)", "def forward(self,x):\n x = x.transpose(1,2).contiguous()\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = F.leaky_relu(self.bn2(self.fc2(x)), 0.2)\n x = F.leaky_relu(self.bn3(self.fc3(x)), 0.2)\n x = torch.sigmoid(self.fc4(x))\n return x.transpose(1,2)", "def forward(self,z):\n z = z.float().transpose(1,2).contiguous()\n x = F.relu(self.bn1(self.fc1(z)))\n x = F.relu(self.bn1(x))\n x = F.relu(self.bn2(self.fc2(x)))\n x = F.relu(self.bn3(self.fc3(x)))\n vocal = torch.exp(self.fc4_1(x))\n noise = torch.exp(self.fc4_2(x))\n return vocal.transpose(1,2), noise.transpose(1,2)", "def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x.squeeze(0)", "def order_u(self):\n return self._degree_u + 1", "def x(self):\r\n return self.unif[0]", "def forward(self, x):\n res = self.residual(x)\n x = self.gcn(x)\n x = self.tcn(x) + res\n return self.relu(x)", "def update_relus(self):\n\n def relu_backward_hook_function(module, grad_in, grad_out):\n \"\"\"\n If there is a negative gradient, change it to zero\n \"\"\"\n # Get last forward output\n corresponding_forward_output = self.forward_relu_outputs[-1]\n corresponding_forward_output[corresponding_forward_output > 0] = 1\n modified_grad_out = corresponding_forward_output * torch.clamp(grad_in[0], min=0.0)\n del self.forward_relu_outputs[-1] # Remove last forward output\n return (modified_grad_out,)\n\n def relu_forward_hook_function(module, ten_in, ten_out):\n \"\"\"\n Store results of forward pass\n \"\"\"\n self.forward_relu_outputs.append(ten_out)\n\n # Loop through layers, hook up ReLUs", "def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x", "def relu6(input, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=0., max_value=6.)", "def forward(self):\n self.value = np.dot(self.x_node.value, self.w_node.value) + self.b_node.value", "def forward(self, x):\n pass", "def forward(self)->None:", "def forward(self, x):\n return self.l1(x)", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x):\n # Pass the input through all the layers apllying ReLU activation, but the last\n for layer in self.fc_layers[:-1]:\n x = F.relu(layer(x))\n # Pass the result through the output layer apllying hyperbolic tangent function\n x = torch.tanh(self.fc_layers[-1](x))\n # Return the better action for the input state\n return x", "def forward(self, X):\n X = self._normalize(X)\n h_relu1 = self.slice1(X)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out", "def forward(self):\n R = self.LP.cost.R\n A = self.LP.dyn.A\n B = self.LP.dyn.B\n\n x = self.LP.x0\n self.x[0] = x\n for i in range(self.LP.N):\n u = - np.linalg.inv(R+B.T.dot(self.V[i+1]).dot(B)).dot(.5*B.T.dot(self.W[i+1]) \\\n + B.T.dot(self.V[i+1]).dot(A).dot(x))\n if self.LP.dyn.u_dim == 1:\n self.u[i] = float(u)\n else:\n self.u[i] = u\n self.J_star[i] = float(x.T.dot(self.V[i]).dot(x) + self.W[i].T.dot(x)) #up to constant\n\n if i == 0:\n self.J[i] = self.LP.cost.loss(x, u, i)\n else:\n self.J[i] = self.J[i-1] + self.LP.cost.loss(x, u, i)\n x = self.LP.dyn.next_state(x, u)\n self.x[i+1] = x\n\n self.J[self.LP.N] = self.J[self.LP.N-1] + self.LP.cost.loss(x, 0, self.LP.N)\n\n self.J_star[self.LP.N] = float(x.T.dot(self.V[self.LP.N]).dot(x) \\\n + self.W[self.LP.N].T.dot(x)) #up to constant", "def forward(self, x):\n h = self.relu(self.en_x_1(self.dropout(x)))\n h = self.relu(self.en_x_2(self.dropout(h)))\n h = self.relu(self.en_x_3(self.dropout(h)))\n return self.en_x_4_mu(self.dropout(h)), self.en_x_4_sigma(self.dropout(h))", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n #out = np.zeros(x.shape)\n #np.clip(x, 0, None, out)\n out = np.empty_like(x) #faster than zeros\n np.clip(x, 0, None, out)\n #out = x\n #out [out < 0] = 0\n #print(x)\n #print(out)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = x * (x > 0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def degree_u(self):\n return self._degree_u", "def _nn_forward(self, x_joined):\n out = self.nn(x_joined).squeeze(0).detach().numpy()\n output = out[:-self.update_size()]\n update_vector = out[-self.update_size():]\n return output, update_vector", "def forward(self, input=None):\n if (input is not None) and (self.result is None):\n self.result = self.act(input)\n\n # Pull the input from previous network layers\n elif self.result is None:\n in_result = []\n\n # Apply a separate activation to each resulting input if applicable\n if self.G.in_activation:\n for i, n in enumerate(self.input):\n in_result.append( self.G.in_activation[i](n()).type(_tensor(\"FloatTensor\")) )\n\n else:\n for n in self.input:\n in_result.append( n() )\n\n # Concatenate input along the lat dim\n self.result = self.act(torch.cat(in_result, in_result[0].dim() - 1))\n\n return self.result.view(*self.G.d_out)", "def forward(self, state):\n x = state\n for layer in self.linear_layers[:-1]:\n x = F.relu(layer(x))\n x = self.linear_layers[-1](x)\n return x", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(0, x)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, input):\n input, _ = input\n bs = input.shape[0]\n d1 = self.relu1(self.fc1(input))\n d2 = self.relu2(self.fc2(d1))\n d3 = self.fc3(d2)\n out = self.sigmoid(d3)\n\n out = out.view(bs, 17, 3)\n return out", "def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))", "def forward(self, x):\n # Get results of encoder network\n h1 = self.encode_nn(x)\n\n # latent space\n mu = self.encode_mu(h1)\n log_var = self.encode_log_var(h1)\n\n # Reparameterize\n z = self.reparameterize(mu, log_var)\n return z, mu, log_var", "def forward(self, x):\n # Get results of encoder network\n h1 = self.encode_nn(x)\n\n # latent space\n mu = self.encode_mu(h1)\n log_var = self.encode_log_var(h1)\n\n # Reparameterize\n z = self.reparameterize(mu, log_var)\n return z, mu, log_var", "def forward(self, x):\n x = self.conv_layer(x)\n\n self.gru.flatten_parameters()\n memory, out = self.gru(x)\n\n if self.phoneme_level:\n pv_forward = memory[:, :, :self.E//2]\n pv_backward = memory[:, :, self.E//2:]\n prosody_vector = torch.cat((pv_forward, pv_backward), dim=-1)\n else:\n out = out.transpose(0, 1)\n prosody_vector = torch.cat((out[:, 0], out[:, 1]), dim=-1).unsqueeze(1)\n prosody_vector = self.predictor_bottleneck(prosody_vector)\n\n return prosody_vector", "def forward(self, U, V):\n raise NotImplementedError(\"Please do not use the Kernel class directly\")", "def forward(self, x):\n residues = []\n # Downward Pass\n x = self.layers[0](x.unsqueeze(1))\n for layer in self.layers[1:self.half]:\n x = layer(x)\n residues.insert(0, x)\n\n # Upward Pass\n for idx, layer in enumerate(self.layers[self.half:(len(self.layers)-1)]):\n x = layer(x, residues[idx])\n x = self.layers[-1](x)\n\n return(x)", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)", "def forward(self, state, action): #concatenate the action -value\n\n xu = torch.cat([state, action], 1)\n\n x1 = F.relu(self.fc1(xu))\n x1 = F.relu(self.fc2(x1))\n x1 = self.fc3(x1)\n\n x2 = F.relu(self.fc4(xu))\n x2 = F.relu(self.fc5(x2))\n x2 = self.fc6(x2)\n \n return x1, x2", "def lu(self):\n if not self.domain.is_Field:\n raise DMNotAField('Not a field')\n L, U, swaps = self.rep.lu()\n return self.from_rep(L), self.from_rep(U), swaps", "def uf(self):\n return self._uf", "def get_u(r_div_R, z_div_L, k, Bp, Bm, gp, gm, lam1, int_Y):\n \n # uR = (lam1/2.)*(1. + r_div_R)*int_Y\n # uR = (1. + r_div_R)*int_Y\n uR = (1. + r_div_R)*int_Y\n \n uZ_out = -k*(exp(k*z_div_L)*(Bp + gm) \\\n - exp(-k*z_div_L)*(Bm + gp))\n\n return uZ_out * uR", "def relu_forward(x):\n out = None\n ###########################################################################\n # TODO: Implement the ReLU forward pass. #\n ###########################################################################\n out = np.maximum(x,0)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n cache = x\n return out, cache", "def forward(self, x):\n\n out = torch.relu(self.conv1(x))\n out = torch.relu(self.conv2(out))\n\n out = torch.relu(self.resnet_block(out))\n\n out = torch.relu(self.deconv1(out))\n out = torch.tanh(self.deconv2(out))\n\n return out", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, state):#forward pass\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return torch.tanh(self.fc3(x))", "def forward(self, state):\n\n x = state # Careful: deepcopy bug?\n # Intermediate Layers\n for layer in self.layers[:-1]:\n\n x = nn.ReLU()(layer(x))\n\n x = nn.Tanh()(self.layers[-1](x))\n return x", "def forward(self, x):\n return self.relu(self.conv(x))" ]
[ "0.6244465", "0.6143601", "0.61348015", "0.60307336", "0.5914714", "0.586357", "0.5847307", "0.5737416", "0.57315356", "0.5697165", "0.5688825", "0.5688825", "0.5688825", "0.56794524", "0.56679034", "0.5638842", "0.5594738", "0.55896443", "0.5576749", "0.5571661", "0.5566135", "0.55470383", "0.55427605", "0.55363", "0.5533603", "0.5511647", "0.5506512", "0.5505972", "0.5497721", "0.5486407", "0.548037", "0.5476035", "0.54685867", "0.5468541", "0.5467939", "0.54578525", "0.5452075", "0.54466486", "0.5445254", "0.54385006", "0.542862", "0.54209685", "0.5416206", "0.54059947", "0.5404547", "0.539713", "0.53939366", "0.539324", "0.53845215", "0.5381983", "0.53802574", "0.5377551", "0.537401", "0.5359855", "0.53598505", "0.5346049", "0.5337342", "0.5337006", "0.53349125", "0.5331081", "0.53269154", "0.53198904", "0.5314291", "0.5304881", "0.5301916", "0.5288876", "0.52844673", "0.527676", "0.5275084", "0.5266523", "0.5246314", "0.52454597", "0.52436614", "0.52405983", "0.5240039", "0.52320546", "0.522516", "0.5224297", "0.52206016", "0.5217984", "0.5204504", "0.52011865", "0.5196571", "0.5195059", "0.5186499", "0.5186499", "0.51855266", "0.51777923", "0.5175602", "0.5172361", "0.517132", "0.51706004", "0.51705337", "0.5169497", "0.51622057", "0.5158712", "0.51585853", "0.51584387", "0.51542884", "0.5151343" ]
0.58103776
7
The providerassigned unique ID for this managed resource.
def id(self) -> str: return pulumi.get(self, "id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_id(self):\n return self.get('_id')", "def provider_id(self):\n raise NotImplementedError", "def id(self):\n return self.raw_resource.uuid", "def healthcare_provider_id(self):\n return self._healthcare_provider_id", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def unique_id(self):\r\n return f\"{DOMAIN}_{self.charge_point_id}_{self.connector_id}\"", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self):\n return self._uuid", "def unique_id(self):\n return self._uuid", "def unique_id(self) -> str:\n return self._uid", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def unique_id(self):\n return self.properties.get(\"UniqueId\", None)", "def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def custom_id(self) -> str:\n return self._underlying.custom_id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def get_objectID(self):\n return self.resource.uuid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def unique_id(self):\n return self.device_id", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def resourceid(self):", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def unique_id(self):\n return self._device_id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def get_id(self):\n return str(self._id)", "def get_id(self):\n return str(self._id)", "def identity(self) -> str:\n return self.requester.uuid", "def get_id(self) -> str:\n return self._register_id", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def unique_id(self):\n return f\"{self.config_entry.entry_id}_{self.hub_name}_{self.sensor_name}\"", "def getID(self):\n return str(self._storage_id)", "def id(self):\n return self.raw_resource[\"id\"]", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def id(self) -> str:\r\n return self._id", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def identifier(self):\n return self.__id", "def get_id(self):\n return self.uid", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def unique_id(self):\n return self._device.serial", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id" ]
[ "0.8193033", "0.78504187", "0.77109545", "0.7604915", "0.74777704", "0.74738747", "0.74738747", "0.74738747", "0.7426179", "0.7379609", "0.73721254", "0.73721254", "0.73721254", "0.73721254", "0.73721254", "0.73721254", "0.73721254", "0.73721254", "0.7357793", "0.7357793", "0.73480314", "0.72914726", "0.72816133", "0.72517055", "0.72515404", "0.7217732", "0.72115767", "0.72115767", "0.72016704", "0.71804124", "0.71662354", "0.71662354", "0.71662354", "0.71388066", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7137693", "0.7133868", "0.71262634", "0.71189636", "0.71131366", "0.70899886", "0.70684016", "0.7059034", "0.7059034", "0.7059034", "0.7059034", "0.7059034", "0.7059034", "0.7057848", "0.7057848", "0.70524055", "0.70343935", "0.70216423", "0.7020117", "0.70147526", "0.7013113", "0.7012862", "0.7006446", "0.6989674", "0.6989674", "0.6989674", "0.6989674", "0.698813", "0.6972098", "0.6954006", "0.69364727", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226", "0.69364226" ]
0.0
-1
The managed object reference ID of the root resource pool for the cluster.
def resource_pool_id(self) -> str: return pulumi.get(self, "resource_pool_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pool_id ( self ):\n return self._pool_id", "def managed_object_id(self):\n o = self._data[\"managed_object\"]\n if type(o) in (int, long):\n return o\n return o.id", "def identity_pool_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def elastic_pool_id(self) -> Optional[str]:\n return pulumi.get(self, \"elastic_pool_id\")", "def parent_cluster_resource_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def root_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"root_id\")", "def get_objectID(self):\n return self.resource.uuid", "def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")", "def get_parentID(self):\n parent = Collection.find(self.resource.container)\n return parent.uuid", "def getId(self):\n return _libsbml.CompartmentReference_getId(self)", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def parent_cluster_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")", "def cluster_id(self):\n return self._cluster_id", "def obj_id(self) -> int:\n return int(self.index.split(\"/\")[-1]) if self.index else None", "def identity_pool_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_pool_id\")", "def get_objectID(self):\n return self.collection.uuid", "def identity_pool_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def rootid(self):\n candidates = [nid for nid, attrs\n in self.graph.nodes.items()\n if attrs['type'] == 'root']\n \n if len(candidates) > 1:\n errmsg = self.name + ' has more than one root'\n raise ValueError(errmsg)\n\n if len(candidates) == 0:\n errmsg = self.name + ' has no root'\n raise ValueError(errmsg) \n \n return candidates[0]", "def central_node_id(self):\n if self._central_node_id is None:\n return self.nodes[0]\n else:\n return self._central_node_id", "def owner_id(self) -> int:\n return self.proto.owner", "def get_parentID(self):\n parent_path = self.collection.container\n if self.collection.is_root:\n parent_path = \"/\"\n parent = Collection.find(parent_path)\n return parent.uuid", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def master_id(self):\r\n return self._arm.master_id", "def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")", "def get_resource_id(self, obj):\n return obj.id", "def owner_id(self) -> str:\n return self.__owner_id", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")", "def owner_id(self):\n return self._owner_id", "def reference_id(self) -> Optional[str]:\n return pulumi.get(self, \"reference_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def customer_owned_ipv4_pool(self) -> str:\n return pulumi.get(self, \"customer_owned_ipv4_pool\")", "def getId(self):\n return _libsbml.Compartment_getId(self)", "def tree_id(self):\n if self.is_root:\n return 0\n elif self._link is not None:\n return self._link.tree_id\n else:\n return self._tree_id", "def pool(self):\n return self._properties.get('pool')", "def rootkey(self):\n return self._follow(self._tree_ref).key", "def id(self):\n return self.raw_resource.uuid", "def getObjectID(self):\n\n return self.objectID", "def id(self): \n if self.cloudnet:\n return self.cloudnet.id\n else:\n return None", "def resourceid(self):", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def get_id(self):\n if not self.nccl_id:\n logger.warning(\"The NCCL ID has not been \"\n \"set yet for store {}.\".format(self.name))\n return self.nccl_id", "def node_id(self) -> int:\r\n return self._node_id", "def label_id(self):\n return int(self.instance_id // 1000)", "def orig_cluster_id(self):\n if self.old_cluster_name is None:\n raise RuntimeError('old_cluster_name is not set')\n return self.fuel_web.client.get_cluster_id(self.old_cluster_name)", "def _get_msti_root_id(self):\n return self.__msti_root_id", "def cont_to_id(self):\n return self.client.containers.get(self.container).id", "def _get_id(self) -> int:\n if len(self._id_pool) == 0:\n raise ArchonError(\"No ids reamining in the pool!\")\n return self._id_pool.pop()", "def get_managed_object(self):\n return self.key", "def get_pool_id(pool_name, host=None):\n cmd = utils.XMS_CLI_HEADER + \"-f json pool list\"\n print cmd\n ret = utils.execute_cmd_in_host(cmd, host)\n if ret[2] != 0 or isinstance(ret[0], dict):\n print \"[Error] Failed to get pool info. Error message: [{err}]\".format(err=ret[1])\n return -1\n try:\n pool_info = json.loads(ret[0])\n pools = pool_info[\"pools\"]\n for p in pools:\n if pool_name == p[\"name\"]:\n return p[\"id\"]\n except Exception as e:\n print \"[Error] error message is: \" + e.message\n return -1", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def compartment_id(self):\n return self._compartment_id", "def compartment_id(self):\n return self._compartment_id", "def getIdRef(self):\n return _libsbml.SBaseRef_getIdRef(self)", "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def containerID(self):\n return self._container", "def getOid(self):\n if self.__state & self.stClean:\n return self.__oid\n else:\n raise SmiError('%s object not fully initialized' % self.__class__.__name__)", "def get_id(self):\n return self.__id", "def get_id(self):\n return self.__id", "def nodebalancer_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"nodebalancer_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def global_replication_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"global_replication_group_id\")", "def get_catalog_hierarchy_id(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.get_bin_hierarchy_id\n return self._get_provider_session('catalog_hierarchy_session').get_catalog_hierarchy_id()", "def persistent_id(self):\n return '{0}/{1}'.format(self.model_label(), self.id)", "def get_id(self):\n\n\t\treturn self.__id", "def resource_group_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"resource_group_id\")", "def control_node_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"control_node_id\")", "def getRootName(self):\n return self.__rootName", "def context_parent_id(self) -> str | None:\n return bytes_to_ulid_or_none(self.context_parent_id_bin)", "def get_custom_object_id():\n worker = ray.worker.global_worker\n object_id = ray._raylet.compute_put_id(worker.current_task_id,\n worker.task_context.put_index)\n worker.task_context.put_index += 1\n return object_id", "def get_host_master_id(self):\r\n return self._handler.get_host_master_id()", "def resource_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_group_id\")", "def get_id(self):\n return self.name", "def base_image_id(self):\n return self._base_image_id", "def core_id(self):\n return self._dll.JLINKARM_GetId()", "def _newClusterId(self):\n return self.guidGenerator.new_id()", "def getid(self):\n return self.__id", "def getLastObjectId(self):\n return self.objId", "def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")", "def managed_rule_identifier(self) -> str:\n return pulumi.get(self, \"managed_rule_identifier\")" ]
[ "0.71896243", "0.66001445", "0.65141094", "0.6471181", "0.6368429", "0.6354283", "0.634698", "0.6305942", "0.62388986", "0.62217504", "0.6184611", "0.61742735", "0.61742735", "0.61742735", "0.61742735", "0.61742735", "0.61529875", "0.6127469", "0.6123672", "0.61147434", "0.6111746", "0.6098793", "0.6090876", "0.60850084", "0.6024355", "0.60161644", "0.59966296", "0.59966296", "0.59966296", "0.59966296", "0.59966296", "0.599174", "0.59531385", "0.59531385", "0.59526616", "0.5931739", "0.5921793", "0.5921793", "0.5921793", "0.5910044", "0.5891344", "0.58674043", "0.5865836", "0.5865836", "0.5865836", "0.5846603", "0.58360183", "0.58343476", "0.58328295", "0.58216864", "0.58187777", "0.5809769", "0.5797921", "0.579548", "0.57866424", "0.57866424", "0.57866424", "0.57799953", "0.5772858", "0.57616293", "0.5755846", "0.57490695", "0.57463205", "0.5745851", "0.5741512", "0.5731438", "0.5717889", "0.5717889", "0.57112294", "0.57112294", "0.57018363", "0.5693533", "0.5685139", "0.5679186", "0.5664299", "0.5664299", "0.566111", "0.5660358", "0.5660358", "0.5660358", "0.5660358", "0.56501937", "0.56490844", "0.56462514", "0.56418395", "0.5636773", "0.5626972", "0.56261516", "0.5621096", "0.56149316", "0.5613158", "0.55974424", "0.55966043", "0.5591686", "0.5586472", "0.5585928", "0.5584781", "0.5584699", "0.55723506", "0.5566295" ]
0.74713767
0
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ```
def get_compute_cluster(datacenter_id: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult: __args__ = dict() __args__['datacenterId'] = datacenter_id __args__['name'] = name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('vsphere:index/getComputeCluster:getComputeCluster', __args__, opts=opts, typ=GetComputeClusterResult).value return AwaitableGetComputeClusterResult( datacenter_id=pulumi.get(__ret__, 'datacenter_id'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), resource_pool_id=pulumi.get(__ret__, 'resource_pool_id'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def get_compute_cluster_output(datacenter_id: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetComputeClusterResult]:\n ...", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def get_cluster_id(self):\n cmd = \"svcinfo lscluster -delim :\"\n\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_CLUSTER_ID)\n cluster_id = values[index]\n return cluster_id", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def cluster_id(self):\n return self._cluster_id", "def find_cluster(self, id):\n raise NotImplementedError", "def cluster_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier\")", "def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def lookup_cluster_by_name(cluster_name):\n cluster_root = get_cluster_root()\n if not cluster_root:\n print('Cannot get the root of the linked list of clusters')\n return\n cluster = None\n\n # lookup for the task associated with the id\n if cluster_root['cluster_']['name'].string() == cluster_name:\n cluster = cluster_root['cluster_'].address\n else:\n curr = cluster_root\n while True:\n curr = curr['next'].cast(uClusterDL_ptr_type)\n\n if curr['cluster_']['name'].string() == cluster_name:\n cluster = curr['cluster_'].address\n break\n\n if curr == cluster_root:\n break\n\n if not cluster:\n print(\n (\"Cannot find a cluster with the name: {}.\".format(cluster_name))\n )\n return cluster", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def show_vsan_cluster(self, cluster_id):\n url = \"clusters/%s\" % str(cluster_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def get_datacenter(conn):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_datacenters()[\"items\"]:\n if item[\"id\"] == datacenter_id:\n return item\n\n raise SaltCloudNotFound(\n \"The specified datacenter '{}' could not be found.\".format(datacenter_id)\n )", "def get_cluster(cluster_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterResult:\n __args__ = dict()\n __args__['clusterId'] = cluster_id\n __args__['location'] = location\n __args__['project'] = project\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = _utilities.get_version()\n __ret__ = pulumi.runtime.invoke('google-native:container/v1:getCluster', __args__, opts=opts, typ=GetClusterResult).value\n\n return AwaitableGetClusterResult(\n addons_config=__ret__.addons_config,\n authenticator_groups_config=__ret__.authenticator_groups_config,\n autopilot=__ret__.autopilot,\n autoscaling=__ret__.autoscaling,\n binary_authorization=__ret__.binary_authorization,\n cluster_ipv4_cidr=__ret__.cluster_ipv4_cidr,\n conditions=__ret__.conditions,\n confidential_nodes=__ret__.confidential_nodes,\n create_time=__ret__.create_time,\n current_master_version=__ret__.current_master_version,\n current_node_version=__ret__.current_node_version,\n database_encryption=__ret__.database_encryption,\n default_max_pods_constraint=__ret__.default_max_pods_constraint,\n description=__ret__.description,\n enable_kubernetes_alpha=__ret__.enable_kubernetes_alpha,\n enable_tpu=__ret__.enable_tpu,\n endpoint=__ret__.endpoint,\n expire_time=__ret__.expire_time,\n initial_cluster_version=__ret__.initial_cluster_version,\n ip_allocation_policy=__ret__.ip_allocation_policy,\n label_fingerprint=__ret__.label_fingerprint,\n legacy_abac=__ret__.legacy_abac,\n location=__ret__.location,\n locations=__ret__.locations,\n logging_config=__ret__.logging_config,\n logging_service=__ret__.logging_service,\n maintenance_policy=__ret__.maintenance_policy,\n master_auth=__ret__.master_auth,\n master_authorized_networks_config=__ret__.master_authorized_networks_config,\n mesh_certificates=__ret__.mesh_certificates,\n monitoring_config=__ret__.monitoring_config,\n monitoring_service=__ret__.monitoring_service,\n name=__ret__.name,\n network=__ret__.network,\n network_config=__ret__.network_config,\n network_policy=__ret__.network_policy,\n node_ipv4_cidr_size=__ret__.node_ipv4_cidr_size,\n node_pools=__ret__.node_pools,\n notification_config=__ret__.notification_config,\n private_cluster_config=__ret__.private_cluster_config,\n release_channel=__ret__.release_channel,\n resource_labels=__ret__.resource_labels,\n resource_usage_export_config=__ret__.resource_usage_export_config,\n self_link=__ret__.self_link,\n services_ipv4_cidr=__ret__.services_ipv4_cidr,\n shielded_nodes=__ret__.shielded_nodes,\n status=__ret__.status,\n subnetwork=__ret__.subnetwork,\n tpu_ipv4_cidr_block=__ret__.tpu_ipv4_cidr_block,\n vertical_pod_autoscaling=__ret__.vertical_pod_autoscaling,\n workload_identity_config=__ret__.workload_identity_config)", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def resource_type(self):\n return 'cluster'", "def pc_cluster(data, clusters):\n dist = MorningstarPCA.pc_distance(data, clusters)\n return MorningstarPCA.get_column_with_min_value(dist)", "def cluster_name(self):\n return self._data['cluster_name']", "def get_cluster(self, label):\n try:\n return self._clusters[label]\n except KeyError:\n return None", "def find_kubernetes_cluster_template(self, id: str) -> dto.KubernetesClusterTemplate:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def find_cluster_sample(self, sample):\n for cluster in self.cluster_lst:\n if sample in cluster.get_samples():\n return cluster.get_c_id()", "def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")", "def get_coe_cluster_certificate(self, cluster_id):\n return (\n self.container_infrastructure_management.get_cluster_certificate(\n cluster_id\n )\n )", "def cluster_query(cluster_id):\n request_debug(r, logger)\n # cluster_id = request_get(r, \"cluster_id\")\n\n result = cluster_handler.get_by_id(cluster_id)\n logger.info(result)\n if result:\n response_ok['data'] = result\n return jsonify(response_ok), CODE_OK\n else:\n logger.warning(\"cluster not found with id=\" + cluster_id)\n response_fail[\"data\"] = r.form\n response_fail[\"code\"] = CODE_NOT_FOUND\n return jsonify(response_fail), CODE_NOT_FOUND", "def get_datacenter_id():\n datacenter_id = config.get_cloud_config_value(\n \"datacenter_id\", get_configured_provider(), __opts__, search_global=False\n )\n\n conn = get_conn()\n\n try:\n conn.get_datacenter(datacenter_id=datacenter_id)\n except PBNotFoundError:\n log.error(\"Failed to get datacenter: %s\", datacenter_id)\n raise\n\n return datacenter_id", "def get_cluster_output(cluster_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetClusterResult]:\n ...", "def get_cluster_name(cls):\n\n mid = Machineid()\n if mid.is_sps_cluster:\n return cls.SPS\n if mid.is_spts_cluster:\n return cls.SPTS\n if mid.is_mdfl_cluster:\n return cls.MDFL\n\n return cls.LOCAL", "def get_cluster_idx(_cluster):\n\n return _cluster.cluster_idx", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def get_cluster(t2_url, t2_token, id):\n response = requests.get(f\"{t2_url}/api/clusters/{id}\", headers={ \"t2-token\": t2_token })\n if(response.status_code != 200):\n log(f\"API call to get cluster returned error code {response.status_code}\")\n return None\n return response.json()", "def get_cluster_by_id(self, c_id: str) -> List[str]:\n return [k for k, v in self._clusters.items() if v == c_id]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n size_gb: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"cluster_id\"] = cluster_id\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"size_gb\"] = size_gb\n __props__.__dict__[\"tags\"] = tags\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def getClusterData(clusterName,data):\n clusters = rhevGet(\"/api/clusters\")\n doc = libxml2.parseDoc(clusters)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/clusters/cluster[name [position()=1]= '\"+ clusterName + \"']\")\n return res[0].prop(data)", "def cluster_identity_get(self, desired_attributes=None):\n return self.request( \"cluster-identity-get\", {\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterIdentityInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterIdentityInfo, False ],\n } )", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n public_points: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"auto_renew\"] = auto_renew\n __props__.__dict__[\"auto_renew_period\"] = auto_renew_period\n __props__.__dict__[\"cluster_name\"] = cluster_name\n __props__.__dict__[\"data_center_name\"] = data_center_name\n __props__.__dict__[\"disk_size\"] = disk_size\n __props__.__dict__[\"disk_type\"] = disk_type\n __props__.__dict__[\"enable_public\"] = enable_public\n __props__.__dict__[\"instance_type\"] = instance_type\n __props__.__dict__[\"ip_white\"] = ip_white\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"major_version\"] = major_version\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"password\"] = password\n __props__.__dict__[\"pay_type\"] = pay_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"period_unit\"] = period_unit\n __props__.__dict__[\"public_points\"] = public_points\n __props__.__dict__[\"security_groups\"] = security_groups\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "def cluster_name(self):\n return self.name", "async def get(id):\n cluster = clusters.get_by_id(id)\n\n if cluster is None:\n raise HTTPException(status_code=404, detail=\"Cluster not found for ID: {0}\".format(id))\n\n return cluster.export()", "def orig_cluster_id(self):\n if self.old_cluster_name is None:\n raise RuntimeError('old_cluster_name is not set')\n return self.fuel_web.client.get_cluster_id(self.old_cluster_name)", "def get_cluster_config(cohesity_client):\n config = cohesity_client.cluster.get_cluster()\n return config", "def _find_cluster(clusters, label):\n for clst in clusters:\n if clst.label == label: return clst\n return None", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ClusterArgs.__new__(ClusterArgs)\n\n __props__.__dict__[\"allocation_state\"] = None\n __props__.__dict__[\"allocation_state_transition_time\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"current_node_count\"] = None\n __props__.__dict__[\"errors\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"node_setup\"] = None\n __props__.__dict__[\"node_state_counts\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"provisioning_state_transition_time\"] = None\n __props__.__dict__[\"scale_settings\"] = None\n __props__.__dict__[\"subnet\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"user_account_settings\"] = None\n __props__.__dict__[\"virtual_machine_configuration\"] = None\n __props__.__dict__[\"vm_priority\"] = None\n __props__.__dict__[\"vm_size\"] = None\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def _get_center(data, node_id, feature_columns):\n if node_id in data.id.values:\n return data[data.id == node_id][feature_columns].values\n else:\n return _get_center(data, node_id[:-1], feature_columns)", "def cluster_type(self) -> str:\n return pulumi.get(self, \"cluster_type\")", "def get_cluster(self, profile):\n if self._value.has_option(profile, 'cluster'):\n if self._value.has_option(profile, 'cluster'):\n cluster = self._value.get(profile, 'cluster')\n self.logger.info(\"Connecting to: %s cluster\" % cluster)\n else:\n self.logger.error(\n \"No cluster parameter found\"\n )\n exit(1)\n else:\n self.logger.error(\n \"No profile found. Please define a default profile, \\\n or specify a named profile using `--profile`\"\n )\n exit(1)\n return cluster", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def gke_cluster(self) -> Optional['outputs.MembershipEndpointGkeCluster']:\n return pulumi.get(self, \"gke_cluster\")", "def get_cluster_template(self, name_or_id, filters=None, detail=False):\n return _utils._get_entity(\n self,\n 'cluster_template',\n name_or_id,\n filters=filters,\n detail=detail,\n )", "def launch_cluster(self):\n version = self.get_latest_spark_version()\n import os\n real_path = os.path.dirname(os.path.realpath(__file__))\n if self.is_aws():\n with open(real_path+'/../data/aws_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n else:\n with open(real_path+'/../data/azure_cluster.json', 'r') as fp:\n cluster_json = json.loads(fp.read())\n # set the latest spark release regardless of defined cluster json\n cluster_json['spark_version'] = version['key']\n c_info = self.post('/clusters/create', cluster_json)\n self.wait_for_cluster(c_info['cluster_id'])\n return c_info['cluster_id']", "def ecs_getClusterArn(region, cluster):\n client = boto3.client('ecs', region_name=region)\n response = client.describe_clusters(clusters=[cluster])\n\n logging.debug(\"ECS Cluster Details: %s\", response)\n if len(response['clusters']) == 1:\n return (response['clusters'][0]['clusterArn'])\n else:\n return ''", "def cluster_solution_id(dataset_name, cluster_solution_name):\n dataset_id = get_dataset(name=dataset_name).id\n cs_id = db.session.query(ClusterSolution)\\\n .filter(\n and_(\n ClusterSolution.dataset_id == dataset_id,\n ClusterSolution.name == cluster_solution_name\n )\n )[0].id\n return cs_id", "def cluster_info(self, target_nodes: Optional[\"TargetNodesT\"] = None) -> ResponseT:\n return self.execute_command(\"CLUSTER INFO\", target_nodes=target_nodes)", "def DescribeCluster(self, ResourceId):\n\n Client = boto3.client('emr')\n \n response = Client.describe_cluster (\n ClusterId = ResourceId\n\t)\n\n return response", "def search_cluster_by_node(self, target):\n for i in range(len(self.result)):\n cluster = self.result[i]\n for node in cluster.get_nodes():\n if target == node:\n return i\n return None", "def getClusterVmNextId(self):\n data = self.connect('get','cluster/nextid',None)\n return data", "def create_coe_cluster(\n self,\n name,\n cluster_template_id,\n **kwargs,\n ):\n cluster = self.container_infrastructure_management.create_cluster(\n name=name,\n cluster_template_id=cluster_template_id,\n **kwargs,\n )\n\n self.list_coe_clusters.invalidate(self)\n return cluster", "def get_datacenter_id(options):\n datacenter = get_datacenter(options)\n return datacenter._GetMoId()", "def get_ceph_clusters_by_pcc(conn: dict, id: str) -> dict:\n return get(conn, f\"{S3PCCS}/{id}/storage/clusters\")", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def get_cluster_pool(cluster_pool_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClusterPoolResult:\n __args__ = dict()\n __args__['clusterPoolName'] = cluster_pool_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:hdinsight/v20230601preview:getClusterPool', __args__, opts=opts, typ=GetClusterPoolResult).value\n\n return AwaitableGetClusterPoolResult(\n aks_cluster_profile=pulumi.get(__ret__, 'aks_cluster_profile'),\n aks_managed_resource_group_name=pulumi.get(__ret__, 'aks_managed_resource_group_name'),\n cluster_pool_profile=pulumi.get(__ret__, 'cluster_pool_profile'),\n compute_profile=pulumi.get(__ret__, 'compute_profile'),\n deployment_id=pulumi.get(__ret__, 'deployment_id'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n log_analytics_profile=pulumi.get(__ret__, 'log_analytics_profile'),\n managed_resource_group_name=pulumi.get(__ret__, 'managed_resource_group_name'),\n name=pulumi.get(__ret__, 'name'),\n network_profile=pulumi.get(__ret__, 'network_profile'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n status=pulumi.get(__ret__, 'status'),\n system_data=pulumi.get(__ret__, 'system_data'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'))", "def data_center_name(self) -> str:\n return pulumi.get(self, \"data_center_name\")", "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def get_cluster_properties(redshift_client):\n cluster_properties = redshift_client.describe_clusters(\n ClusterIdentifier=IDENTIFIER\n )['Clusters'][0]\n return cluster_properties", "def list_cluster_response():\n return {\n \"clusters\": [\n EXAMPLE_NAME\n ]\n }", "def get_cluster_def():\n if settings.NO_OP:\n return None\n\n ensure_in_custer()\n\n cluster = os.getenv('POLYAXON_CLUSTER', None)\n try:\n return json.loads(cluster) if cluster else None\n except (ValueError, TypeError):\n print('Could get cluster definition, '\n 'please make sure this is running inside a polyaxon job.')\n return None", "def cluster_kmeans(self, data, n_clusters):\n km = cl.KMeans(n_clusters)\n kmf = km.fit(data)\n\n labels = kmf.labels_\n\n return labels, [np.nan]", "def __str__(self):\n return \"Cluster\"", "def get_datacenter(options):\n content = get_vc_content(options)\n rootFolder = content.rootFolder\n for item in rootFolder.childEntity:\n if (options.datacenter == item.name):\n return item\n return None", "def list_clusters(self, **kwargs):\n return self._get_names('SCVMHostCluster')" ]
[ "0.70518404", "0.70052683", "0.67735595", "0.6768537", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.66624004", "0.66624004", "0.66624004", "0.66624004", "0.6613303", "0.660601", "0.660601", "0.6474703", "0.6474703", "0.6474703", "0.63924193", "0.63861185", "0.63270766", "0.6276351", "0.6269334", "0.62308496", "0.6221327", "0.62061673", "0.61876196", "0.6160735", "0.61367285", "0.61367285", "0.6121547", "0.6103939", "0.606107", "0.6054131", "0.6054131", "0.6031278", "0.6031278", "0.60063285", "0.5993482", "0.5970224", "0.5945154", "0.5939524", "0.593516", "0.5921245", "0.58835465", "0.5882657", "0.5878035", "0.58768255", "0.5869813", "0.5865723", "0.5855551", "0.5855551", "0.58481365", "0.58481365", "0.58481365", "0.58481365", "0.58481365", "0.5829678", "0.58203787", "0.5819459", "0.57718134", "0.5758876", "0.57381326", "0.5734544", "0.57087946", "0.56975925", "0.56702477", "0.5658413", "0.56578165", "0.56324726", "0.5627083", "0.56178856", "0.5616781", "0.56036544", "0.5601759", "0.5601101", "0.5597886", "0.5590274", "0.55648077", "0.5554557", "0.5547291", "0.5545548", "0.55238587", "0.5521631", "0.55111134", "0.5500737", "0.5494587", "0.54801834", "0.5477136", "0.54766876", "0.546071", "0.5446311", "0.5440939", "0.5437183", "0.54263484", "0.54172444", "0.54061675", "0.5401415", "0.5395855" ]
0.72311264
0