#!/usr/bin/env python3

'''
    匹配paper所涉及的所有领域的original领域
'''

import pymongo
from config import *
from multiprocessing import Pool
from get_micro_search_data import *
import json
# from selenium import webdriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.common.exceptions import TimeoutException


#连接mongodb
client = pymongo.MongoClient(MONGO_URL)
#连接field_name库
db_field_name_info = client[MONGO_FIELD_NAME_DB]
#连接paper_program_data_info库
db_paper_data_info = client[MONGO_PAPER_FIELD_NAME_DB]

#启动sellinium
# browser = webdriver.Chrome()
# wait = WebDriverWait(browser,10)
# browser.get("https://academic.microsoft.com/search?q=composite%20material&f=&orderBy=0&skip=0&take=10")
# browser.maximize_window()
# input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,'#search-input')))
# submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#SearchBoxSubmit')))



'''
    匹配field_name的最开始的领域
    param:  field_name_data_info
    return: (1)匹配失败的添加到failed_match_field_name表中
            (2)匹配成功的将每个匹配成功的原始领域的数据添加到dumplication_field_info表中
'''
def match_field_name(field_name_data_info):
    matched_list = []
    # print(11111111111)
    if len(list(db_field_name_info['field_name_five'].find({"name":field_name_data_info.get("FN").capitalize()}))):
        # print(1)
        match_field_name_data = db_field_name_info['field_name_five'].find({"name":field_name_data_info.get("FN").capitalize()})
        for match_data in match_field_name_data:
            print(match_data)
            matched_list.append(match_data.get('ori_name'))
        matched_list = list(set(matched_list))

        #将匹配完成后的信息保存到matched_field_name_info中
        for data in matched_list:
            db_paper_data_info['matched_field_name_info'].save({"name":field_name_data_info.get("FN"),"ori_name":data})

    elif len(list(db_field_name_info['field_name_five'].find({"four_name":field_name_data_info.get("FN").capitalize()}))):
        print(2)
        match_field_name_data = db_field_name_info['field_name_five'].find({"four_name":field_name_data_info.get("FN").capitalize()})
        for match_data in match_field_name_data:
            matched_list.append(match_data.get('ori_name'))
        matched_list = list(set(matched_list))

        #将匹配完成后的信息保存到matched_field_name_info中
        for data in matched_list:
            db_paper_data_info['matched_field_name_info'].save({"name":field_name_data_info.get("FN"),"ori_name":data})

    elif len(list(db_field_name_info['field_name_five'].find({"thr_name":field_name_data_info.get("FN").capitalize()}))):
        print(3)
        match_field_name_data = db_field_name_info['field_name_five'].find({"thr_name":field_name_data_info.get("FN").capitalize()})
        for match_data in match_field_name_data:
            matched_list.append(match_data.get('ori_name'))
        matched_list = list(set(matched_list))

        #将匹配完成后的信息保存到matched_field_name_info中
        for data in matched_list:
            db_paper_data_info['matched_field_name_info'].save({"name":field_name_data_info.get("FN"),"ori_name":data})

    elif len(list(db_field_name_info['field_name_five'].find({"sec_name":field_name_data_info.get("FN").capitalize()}))):
        print(4)
        match_field_name_data = db_field_name_info['field_name_five'].find({"sec_name":field_name_data_info.get("FN").capitalize()})
        for match_data in match_field_name_data:
            matched_list.append(match_data.get('ori_name'))
        matched_list = list(set(matched_list))

        #将匹配完成后的信息保存到matched_field_name_info中
        for data in matched_list:
            db_paper_data_info['matched_field_name_info'].save({"name":field_name_data_info.get("FN"),"ori_name":data})

    elif len(list(db_field_name_info['field_name_five'].find({"ori_name":field_name_data_info.get("FN").capitalize()}))):
        print(5)
        match_field_name_data = db_field_name_info['field_name_five'].find({"ori_name":field_name_data_info.get("FN").capitalize()})
        for match_data in match_field_name_data:
            matched_list.append(match_data.get('ori_name'))
        matched_list = list(set(matched_list))

        #将匹配完成后的信息保存到matched_field_name_info中
        for data in matched_list:
            db_paper_data_info['matched_field_name_info'].save({"name":field_name_data_info.get("FN"),"ori_name":data})

    else:
        print(22222222)
        db_paper_data_info['failed_match_field_name'].save(field_name_data_info)


'''
    将failed_match_field_name表中的数据重新在微软学术搜索中搜索匹配到对应的ori_name
    param:  failed_field_name(匹配失败的数据)
    return: 从微软学术搜索中匹配到的数据，搜索成功后保存到matched_field_name_info中
'''
def match_failed_match_field_name(failed_field_name):

    data = get_micro_search_data(failed_field_name)
    ori_field_list,result_status = data.use_micro_search_interface()
    print("failed_field_name----------------",failed_field_name)
    fail_list = []
    #如果返回搜索状态为200，则将数据保存到matched_field_name_info表中
    if len(ori_field_list):
        for ori_item in ori_field_list:
            db_paper_data_info['matched_field_name_info_two'].save({"name":failed_field_name,"ori_name":ori_item})
        return None
    else:
        return failed_field_name

def micro_search(name):
    try:
        time.sleep(random.randint(5,7))
        input.clear()
        input.send_keys(name)
        submit.click()
        return None
    except:
        time.sleep(3)
        return micro_search(name)

    # browser.find_element_by_xpath('//*[@id="search-input"]').send_keys('composite material')
    # browser.find_element_by_xpath('//*[@id="SearchBoxSubmit"]').click()

def main():
    '''
        下面是匹配field_name_five中的的步骤
    '''
    # dumplication_field_info = db_paper_data_info['dumplication_field_info'].find({}).batch_size(1)
    # dumplication_field_info = list(dumplication_field_info)
    #
    # pool = Pool(processes = 6)
    # match_result = pool.map(match_field_name,dumplication_field_info)

    '''
        下面是匹配failed_match_field_name的步骤
    '''
    #获取到失败的数据
    # failed_match_list = db_paper_data_info['dumplication_field_info'].find({}).skip(60000).limit(15000)
    #
    # failed_list = []
    # for failed_match_item in failed_match_list:
    #     failed_list.append(failed_match_item.get("FN"))
    #
    # #开启多线程
    # pool = Pool(processes = 20)
    # failed_list = pool.map(match_failed_match_field_name,failed_list)
    # failed_list = list(filter(None, failed_list))
    #
    # for failed_item in failed_list:
    #     db_paper_data_info['failed_field_name_info_two'].save({"name":failed_item})
    # print("结束了233333333")

    '''
        下面是模拟浏览器，使用sellinium匹配failed_field_name_info_two中的领域
    '''
    # browser = webdriver.chrome()
    failed_match_list = db_paper_data_info['failed_field_name_info_two'].find({}).skip(0).limit(10)

    # list = []

    for failed_match_item in failed_match_list:
        data = get_micro_search_data(failed_match_item['name'])
        ori_field_list,result_status = data.use_micro_search_interface()
        print(result_status)






if __name__ == '__main__':
    main()
