# coding: utf-8
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.firefox.options import Options
import os
import json
import time


class PbcSpider:
    def __init__(self):
        __options = Options()
        __options.headless = True
        __options.add_argument('--no-sandbox')
        __options.add_argument('--disable-dev-shm-usage')    # 解决报错：DevToolsActivePort file doesn't exist
        self.browser = webdriver.Chrome(chrome_options=__options)   # 为Chrome设置无头模式
        # self.browser = webdriver.Firefox(firefox_options=__options)  # 为Firefox设置无头模式
        print("激活爬虫浏览器>>>")

    def __del__(self):
        self.browser.close()
        print("<<<关闭爬虫浏览器")

    def get_one_page(self, url):
        try:
            self.browser.get(url)
        except TimeoutException:
            print('TimeOut: {}'.format(url))

    def get_url_list(self, url):
        # print('page_url:', url)
        url_list = []
        self.get_one_page(url)
        items = self.browser.find_elements_by_css_selector('.newslist_style a')
        # items = self.browser.find_elements_by_xpath('//*[@opentype="page"]//a')

        for item in items:
            a_href = item.get_attribute('href')
            a_title = item.text
            url_list.append({"title": a_title, "href": a_href})
        return url_list

    def get_news_detail(self, url):
        print('detail_url:', url)
        self.get_one_page(url)
        shijian = self.browser.find_element_by_id("shijian").text
        zoom_p = self.browser.find_element_by_css_selector('.zoom1 p').text

        table_infos = {'DateTime': shijian, 'ZoomP': zoom_p, 'ReverseRepo': [], 'TMLF': [], 'MLF': [], 'Other': []}
        try:
            tables = self.browser.find_elements_by_xpath('//div[@class="zoom1"]/table/tbody')
            for index, table in enumerate(tables):
                table_title = self.get_table_title(index)
                table_trs = table.find_elements_by_xpath('./tr')
                for index, table_tr in enumerate(table_trs):
                    if index == 0:
                        continue
                    table_tds = table_tr.find_elements_by_xpath('./td')

                    td_maturity = table_tds[0].find_element_by_xpath(
                        './p/span[1]/span/span').text + table_tds[0].find_element_by_xpath('./p/span[2]/span').text
                    td_volume = table_tds[1].find_element_by_xpath(
                        './p/span[1]/span/span').text + table_tds[1].find_element_by_xpath('./p/span[2]/span').text
                    td_rate = table_tds[2].find_element_by_xpath('./p/span[1]/span/span').text

                    # if td_maturity.endswith('天'):
                    if "逆回购" in table_title:
                        table_infos['ReverseRepo'].append(
                            {'maturity': td_maturity, 'volume': td_volume, 'rate': td_rate})
                    elif "TMLF" == table_title:
                        table_infos['TMLF'].append(
                            {'maturity': td_maturity, 'volume': td_volume, 'rate': td_rate})
                    elif "MLF" == table_title:
                        table_infos['MLF'].append(
                            {'maturity': td_maturity, 'volume': td_volume, 'rate': td_rate})
        except NoSuchElementException:
            print('NoSuchElement: {}'.format(url))
            # PbcSpider.json_to_file('NoSuchElement: {}'.format(url), file_prefix='except')

        return table_infos

    def get_table_title(self, index):
        try:
            return self.browser.find_element_by_xpath(
                '//div[@id="zoom"]/p[@align="center"][{}]/span/strong/span/span'.format(index+1)).text    # MLF和TMLF
        except NoSuchElementException:
            try:
                return self.browser.find_element_by_xpath(
                    '//div[@id="zoom"]/p[@align="center"]/strong/span/span/span'.format(index+1)).text    # 逆回购操作情况
            except NoSuchElementException:
                return ""

    @staticmethod
    def test_list_html():
        with open(os.path.join(os.path.basename(), 'backup_data', 'pbc-list.html'), 'r', encoding='utf-8') as f:
            return f.read()

    @staticmethod
    def test_detail_html():
        with open(os.path.join(os.path.basename(), 'backup_data', 'pbc-detail.html'), 'r', encoding='utf-8') as f:
            return f.read()

    @staticmethod
    def json_to_file(obj, file_prefix='demo'):
        dt = time.strftime('%Y%m%d%H', time.localtime())
        # dt = "202008241834"
        filename = 'backup_data/{0}.{1}.json'.format(file_prefix, dt)
        if not os.path.exists(filename):
            with open(filename, 'w', encoding='utf-8') as f:
                pass

        with open(filename, 'r+', encoding='utf-8') as f:
            content = f.read()
            f.seek(0)
            f.truncate()
            if len(content) <= 0:
                content = []
            else:
                content = json.loads(content)
            if isinstance(obj, list):
                content.extend(obj)
            elif isinstance(obj, str):
                content.append(obj)
            f.write(json.dumps(content, indent=2, ensure_ascii=False))


def main():
    pbc = PbcSpider()

    base_url = "http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/17081/index{page}.html"
    for page in range(34, 35):
        list_url = base_url.format(**{'page': page})
        urls = pbc.get_url_list(list_url)
        pbc.json_to_file(urls, file_prefix='urls')
        news_list = []
        for a_dict in urls:
            news = pbc.get_news_detail(a_dict.get('href'))
            news_list.append(news)
            time.sleep(2)

        pbc.json_to_file(news_list, file_prefix='news')


def test():
    pbc = PbcSpider()
    # url = 'http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/4072750/index.html'
    # url = 'http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/4075144/index.html'
    # url = 'file:///E:/Program_Code/python_spider/mystock/public/backup_data/pbc-detail.html'
    # url = 'http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/4067627/index.html'  # 无表
    # url = 'http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/3659278/index.html'   # CMU
    url = 'http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125431/125475/3863899/index.html'   # TMLF、MLF
    news_infos = pbc.get_news_detail(url)
    with open('backup_data/new_infos.json', 'w', encoding='utf-8') as f:
        f.write(json.dumps(news_infos, indent=2, ensure_ascii=False))
    print(news_infos)


if __name__ == '__main__':
    main()
    # test()
