# -*- coding: utf-8 -*-
# @Time : 2020/7/17 14:04
# @Author : haos
# @File : spider.py
# @Software : PyCharm
import os
import re
import shutil
import time

import pymongo
import xlwt
from selenium import webdriver

from settings import Settings


def main():
    # 获取所有链接，
    links = get_links(1)
    # 保存增量链接
    increment_insert(links)
    # 抓取未抓取链接的页面数据
    # get_info_by_link()
    # 2020年以前的数据没有下载链接,这里是保存2020年及以后的页面下载链接
    save_download_link_info()

    download_file_and_rename({'isRename': 0})


# 根据条件获取downloadLinkInfo集合需要下载的链接，并下载   {'year': '2020', 'isDownload': 0}
def download_file_and_rename(select):
    # 下载文件，并改名移动到指定目录
    driver = get_driver()
    client2 = get_client()['downloadLinkInfo']
    links = get_need_links(client2, select)
    for link in links:
        driver.get(link['downloadLink'])
        time.sleep(2)
        title = re.sub(r'(^\(\d*\)|:|：|\s*|/)', '', link['title'])
        path = 'download/'+link['currency']+'/' + link['year'] + '/' + link['month']
        if os.path.exists(path):
            pass
        else:
            os.makedirs(path)
        shutil.move('download/' + str(link['downloadLink']).split('/')[-1],
                    path+ '/' + title + '.xls')
        client2.find_one_and_update(link, {'$set': {'isDownload': 1, 'isRename': 1}})


# 根据linkInf集合的链接，保存该链接中的下载excel链接到downloadLinkInfo集合
# 并修改linkInf集合的链接的是否获取下载链接状态为已获取 {'year': '2020', 'valid': 1}
def save_download_link_info():
    client = get_client()["linkInf"]
    client2 = get_client()["downloadLinkInfo"]
    for link in client.find({'year': {'$gte': '2020'}, 'valid': 1, 'isGetDownload': 0}).batch_size(20):
        # 打开链接页面,获取下载链接
        info = get_download_link_info(link)
        # 判断链接是否存在
        count = client2.count_documents({'year': info['year'],
                                         'currency': info['currency'],
                                         'month': info['month'],
                                         'title': info['title']})
        if count == 0:
            # 不存在则保存下载链接信息
            client2.insert_one(info)
        # 修改linkInf中的是否获取下载链接状态
        # client2.insert_one(info)
        client.find_one_and_update(link, {'$set': {'isGetDownload': 1}})


# 抓取未抓取链接的页面数据，并将数据保存到Excel中，然后修改这条链接的状态为以抓取
def get_info_by_link(select,batch):
    client = get_client()["linkInf"]
    # 未抓取链接的数据
    for link in client.find(select).batch_size(batch):
        # 保存到excel
        export_excel(link)
        # 修改已抓取数据的状态
        client.find_one_and_update(link, {'$set': {'exist': 1}})


# 增量链接插入 linkInf集合
def increment_insert(links):
    client = get_client()['linkInf']
    for link in links:
        count = client.count_documents(link)
        if count == 0:
            # 这条链接不存在，插入
            # 1有效，0无效，表示链接的数据是否需要爬取
            link['valid'] = 1
            # 1表示已经爬取，0表示未爬取
            link['exist'] = 0
            link['isGetDownload'] = 0
        else:
            links.remove(link)
    if len(links) > 0:
        client.insert_many(links)
        print('所有链接插入完成共%d条' % len(links))
    else:
        print('无新增链接需要插入')


# 获取需要爬取的数据
def get_need_links(client, select):
    # CursorNotFound: cursor id 8390931929391402547 not found 设置每批次5 条，解决这个问题
    need_links = client.find(select).batch_size(5)
    return need_links


# 获取页面所有链接 index 0 表示人民币，1表示美元
def get_links(index):
    if index > 0:
        index = 1
    elif index < 0:
        index = 0
    url = Settings().url
    driver = get_driver()
    driver.get(url)
    name = driver.find_element_by_class_name('tbtjyear_span')
    spans = name.find_elements_by_tag_name("span")
    # 2020,2019……
    link_infs = []
    for span in spans:
        year = span.text[0:4]
        span.click()
        time.sleep(3)
        currency = driver.find_element_by_class_name('tbtj'+year+'_span').find_elements_by_tag_name('span')[index]
        currency_text = currency.text
        currency.click()
        time.sleep(1)
        # 年下的所有表名和月份链接
        tbody = driver.find_elements_by_class_name('tbtj' + year + '_tab')[index].find_element_by_tag_name('tbody')
        trs = tbody.find_elements_by_tag_name("tr")
        # 去掉第一行的表头信息
        for tr in trs[1:]:
            tds = tr.find_elements_by_tag_name("td")
            if len(tds) == 0:
                continue
            # 表名
            tab_name = tds[0].text
            # 月份链接
            links = tds[1].find_elements_by_class_name('blue')
            for link in links:
                link_inf = {}
                url = link.get_attribute('href')
                # 月份，作为sheet名
                month = link.text
                link_inf['year'] = year
                link_inf['currency'] = str(currency_text).replace(' ', '')
                # 表名
                link_inf['title'] = tab_name
                # 月份
                link_inf['month'] = month
                # 链接
                link_inf['url'] = url
                link_infs.append(link_inf)
    driver.close()
    print('获取页面链接完成，共得到%d条链接' % len(link_infs))
    return link_infs


# 获取mongodb连接
def get_client():
    myclient = pymongo.MongoClient(Settings().mongo_driver)
    # 'mongodb://localhost:27017/')
    return myclient["links"]


# 获取浏览器驱动
def get_driver():
    chrome_driver = Settings().chrome_driver
    # 实例化谷歌设置选项
    option = webdriver.ChromeOptions()
    option.add_experimental_option('prefs', Settings().prefs)
    option.add_argument("disable-infobars")
    # 无界面参数
    # option.add_argument("headless")
    option.add_argument("no-sandbox")
    # 初始化driver
    driver = webdriver.Chrome(executable_path=chrome_driver, options=option)
    driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", Settings().addScript)
    driver.maximize_window()
    return driver


# 导出到excel
def export_excel(link_inf):
    driver2 = get_driver()
    workbook = xlwt.Workbook(encoding='utf-8')
    url = link_inf['url']
    title = re.sub(r'(^\(\d*\)|:|：|\s*|/)', '', link_inf['title'])
    worksheet = workbook.add_sheet(title, cell_overwrite_ok=True)
    analysis_month(driver2, url, worksheet)
    driver2.close()
    path = link_inf['currency'] + '/' + link_inf['year'] + '/' + link_inf['month']
    if os.path.exists(path):
        pass
    else:
        os.makedirs(path)

    name = title + '.xls'
    workbook.save(path+'/'+name)
    print('文件%s,已保存在%s路径：' % (name, path))


# 解析每个月份里的数据
def analysis_month(driver, url, websheet):
    driver.get(url)
    trs = driver.find_element_by_id('easysiteText').find_elements_by_tag_name('tr')
    i = 1
    style = xlwt.XFStyle()  # 创建一个样式对象，初始化样式
    al = xlwt.Alignment()
    al.horz = 0x02  # 设置水平居中
    al.vert = 0x01  # 设置垂直居中
    style.alignment = al
    ro = 0
    co = 0
    for tr in trs:
        tds = tr.find_elements_by_tag_name('td')
        j = 0
        for td in tds:
            row = td.get_attribute('rowspan')
            col = td.get_attribute('colspan')
            text = re.sub(',|-', '', str(td.text))
            # 1. 行合并
            # 2. 列合并
            # 3. 行列都和并
            if row and col:  # 合并行列 待处理
                websheet.write_merge(i, i + int(row) - 1, j, j + int(col) - 1, text, style)
                j = j + int(col)
                co = int(col) - 1

                # i =
            elif row:  # 合并行 待处理
                # print('row=%d' % int(row))
                websheet.write_merge(i, i+int(row)-1, j, j, text, style)
                j = j + 1
                ro = int(row)
                # i =
            elif col:  # 合并列 已处理
                # print('col=%d' % int(col))
                websheet.write_merge(i, i, j, j + int(col) - 1, text, style)
                j = j + int(col)
            else:
                if ro == 0:
                    websheet.write(i, j, text)
                    j = j + 1
                else:
                    websheet.write(i, j+1, text)
                    j = j + 1
            # print(td.text, end="\t")
        if ro > 0:
            ro = ro - 1
        i = i + 1
        # print()


# 打开页面获取下载链接信息
def get_download_link_info(link_inf):

    driver2 = get_driver()
    url = link_inf['url']
    driver2.get(url)
    download_link_info = {}
    download_link = driver2.find_element_by_partial_link_text('【')
    url = download_link.get_attribute('href')
    download_link_info['title'] = link_inf['title']
    download_link_info['currency'] = link_inf['currency']
    download_link_info['year'] = link_inf['year']
    download_link_info['month'] = link_inf['month']
    # 下载链接
    download_link_info['downloadLink'] = url
    # 是否下载
    download_link_info['isDownload'] = 0
    # 是否重命名
    download_link_info['isRename'] = 0
    print('页面对应的下载链接信息已经获取')
    driver2.close()

    return download_link_info


# 当程序执行时调用函数
if __name__ == '__main__':
    main()
