import os
import re
from tqdm import tqdm
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
from read_keyword import get_data
from judge import Judge
import filter

# 存放搜索的关键字
global search


# 爬取数据
def spider(web):
    # 初始化Judge
    j = Judge()
    # key_search = input('请输入索引关键字：')
    # 获取所有成分，存放到medical_list中
    keywords_list = get_data()
    # 已抓取过的摘要存放到列表have_search_list = []
    have_search_list = []
    # 用一个变量来标明第一次搜索和后面几次搜索
    count_mark = 0
    for key_search in keywords_list:
        # 给全局变量赋值
        global search
        search = key_search
        # 如果已经抓取过，则跳过，进行下一个
        if key_search in have_search_list:
            continue
        else:
            have_search_list.append(key_search)
            # 每个成分创建一个文件夹
            file_path = './Abstract/' + key_search
            if not os.path.exists(file_path):
                os.mkdir(file_path)

        if count_mark == 0:
            web.find_element(By.XPATH, '//*[@id="txt_SearchText"]').send_keys(key_search, Keys.ENTER)
            count_mark += 1
        else:
            web.find_element(By.XPATH, '//*[@id="txt_search"]').clear()
            web.find_element(By.XPATH, '//*[@id="txt_search"]').send_keys(key_search, Keys.ENTER)
        # input('筛选是否完成：')
        # 筛选年份
        time.sleep(3)
        filter.Choose_year(web)
        # Choose_year(web)
        time.sleep(8)
        # while循环实现翻页
        page_count = 0
        while True:
            try:
                time.sleep(2)
                # 获取论文列表
                page_count += 1
                tr_list = web.find_elements(By.XPATH, '//*[@id="gridTable"]/table/tbody/tr')
                print(f'{key_search} : 正在爬取第{page_count}页!')
                # 加了一个进度条
                for tr in tqdm(tr_list):
                    time.sleep(1)
                    #
                    tr.find_element(By.XPATH, './td[@class="name"]/a').click()
                    # 切换到新窗口
                    if j.is_child_page(web):
                        web.switch_to.window(web.window_handles[1])
                    else:
                        print('*'*100)
                        print('未打开新窗口')
                        time.sleep(10)
                        continue
                    time.sleep(4)
                    # 中文期刊
                    if j.Chinese(web):
                        # 抓取标题
                        title = web.find_element(By.XPATH, '/html/body/div[@class="wrapper"]/div[@class="main"]'
                                                           '/div[@class="container"]/div[@class="doc"]/div[@class='
                                                           '"doc-top"]/div[@class="brief"]/div[@class="wx-tit"]'
                                                           '/h1').text
                        # 抓取摘要
                        abstract = web.find_element(By.XPATH, '//*[@id="ChDivSummary"]').text
                    # 英文期刊
                    elif j.English(web):
                        print("进到英文标题页面了")
                        # 抓取标题
                        title = web.find_element(By.XPATH, '//*[@id="doc-title"]').text
                        # 抓取摘要
                        abstract = web.find_element(By.XPATH, '//*[@id="doc-summary-content-text"]').text
                    else:
                        title = "标题不存在！"
                        abstract = '摘要不存在！'

                    # 如果标题中存在/则用汉字替换
                    r = r"[.!+-=——,$%^，,。？?、~@#￥%……&*《》<>「」{}【】()/\\\[\]'\"]"
                    title_copy = re.sub(r, ' ', title)
                    # title_copy = re.sub(r'/','斜杠',title)

                    # print(title)!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                    # print(abstract)

                    # 写入摘要
                    try:
                        with open(file_path + '/{0}.txt'.format(title_copy), 'w', encoding='utf-8') as f:
                            f.write(title)
                            f.write('\n')
                            f.write(abstract)
                        if j.is_child_page(web):
                            # 关闭子网页
                            web.close()
                            # 切换到原网页
                            web.switch_to.window(web.window_handles[0])
                        else:
                            web.switch_to.window(web.window_handles[0])
                    except:
                        print('文件名称Invalid !')
                        if j.is_child_page(web):
                            # 关闭子网页
                            web.close()
                            # 切换到原网页
                            web.switch_to.window(web.window_handles[0])
                        else:
                            web.switch_to.window(web.window_handles[0])
                if j.Next_page(web):
                    web.find_element(By.XPATH, '//*[@id="PageNext"]').click()  # 通过点击
                    # web.find_element(By.XPATH, '/html/body').send_keys(Keys.RIGHT)  # 通过点击右键
                else:
                    break
                time.sleep(3)
            except:
                print('定位失败！当前在第{}页！'.format(page_count))
                # 跳回第一个页面
                if j.is_child_page(web):
                    web.switch_to.window(web.window_handles[1])
                    web.close()
                    web.switch_to.window(web.window_handles[0])
                else:
                    web.switch_to.window(web.window_handles[0])
                # 点击下一页
                if j.Next_page(web):
                    web.find_element(By.XPATH, '//*[@id="PageNext"]').click()
                    # web.find_element(By.XPATH, '/html/body').send_keys(Keys.RIGHT)
                    time.sleep(5)
                else:
                    break
        # print('......：', key_search, 'Successful!')


