# -*- coding:utf-8 -*-

# @Time : 2022/11/22 15:11
# @Author : 快乐的小猴子
# @Version : 
# @Function :

import conf
from HomePageDatas import HomePageDatas
from lxml import etree


class GujinHistory(HomePageDatas):
    """ 初始化数据 """

    def __init__(self, datas_list):
        self.datas_list = datas_list

    """ 提取文章数据：文章标题、文章内容 """

    def get_article_detail_datas(self, resp):
        et = etree.HTML(resp)
        article_title = et.xpath('//div[@class="pageleft fl"]/div[1]/h1/text()')[0]
        article_conts = et.xpath('//div[@class="pageleft fl"]/div[3]/div/p/text()')
        article_conts_new = ''.join(article_conts).replace('。', '。\n')
        return article_title, article_conts_new

    """ 保存 """

    def save_file(self, file_path, article_title, article_conts):
        try:
            file_name = file_path + '{}.txt'.format(article_title.replace('、', '').replace('?', ''))
            with open(file_name, 'w', encoding='utf-8') as fs:
                fs.write(article_conts)
            print('{} save success!!!'.format(article_title))
        except Exception as e:
            print('{} 保存失败！！！'.format(article_title))

    """ 获取一篇文章数据 """

    def get_one_article_datas(self, url):
        resp = self.send(url)
        # 获取文章详细内容
        article_title, article_conts = self.get_article_detail_datas(resp)
        return article_title, article_conts

    """ 获取中国历史专题下所有文章的url地址 """

    def get_son_all_article_urls(self, son_url):
        son_all_article_urls_list = []
        resp = self.send(son_url)
        et = etree.HTML(resp)
        ydqw_href = et.xpath('//div[@class="pageleft fl"]/ul/li/a/@href')
        total_page = et.xpath('//div[@class="text-center"]/ul/strong[1]/text()')[0]
        page_href = et.xpath('//div[@class="text-center"]/ul/li[7]/a/@href')[0]
        for i in range(0, len(ydqw_href)):
            ydqw_url = conf.url.strip().rsplit('/', 1)[0] + ydqw_href[i]
            son_all_article_urls_list.append(ydqw_url)
        for j in range(2, int(total_page) + 1):
            ydqw_url_oth = conf.url.strip().rsplit('/', 1)[0] + page_href.replace('_{}'.format(total_page),
                                                                                  '_{}'.format(str(j)))
            resp = self.send(ydqw_url_oth)
            ett = etree.HTML(resp)
            cont_href = ett.xpath('//div[@class="pageleft fl"]/ul/li/h3/a/@href')
            for k in range(0, len(cont_href)):
                cont_url = conf.url.strip().rsplit('/', 1)[0] + cont_href[k]
                son_all_article_urls_list.append(cont_url)
        print('son_all_article_urls_list: ', len(son_all_article_urls_list), son_all_article_urls_list)
        return son_all_article_urls_list

    """ 获取子专题所有的文章信息，并保存（如亚洲历史、欧洲历史...） """

    def get_son_all_article_data(self, fu_name, son_name, son_url):
        # 校验存文件路径是否存在，不存在则创建
        file_path = self.verf_file_path(fu_name, son_name)
        # 获取该专题下所有文章的url
        son_all_article_urls_list = self.get_son_all_article_urls(son_url)
        # for i in range(0, len(son_all_article_urls_list)):
        for i in range(0, 33):
            url = son_all_article_urls_list[i]
            # 获取一篇文章的数据
            article_title, article_conts = self.get_one_article_datas(url)
            # 保存
            self.save_file(file_path, article_title, article_conts)

    """ 获取主页该专题及子专题名称、url """

    def get_world_all_datas(self, num):
        # 主页标题栏名称和url 世界历史 /world/
        fu_name = list(self.datas_list[num].keys())[0]
        fu_url = list(self.datas_list[num].values())[0][0]
        print('name, url: ', fu_name, fu_url)
        son_data_list = list(self.datas_list[num].values())[0][1]
        # for i in range(0, len(son_data_list)):
        # 只限定了前两个子专题
        for i in range(0, 2):
            son_name = son_data_list[i][0]
            son_url = conf.url.strip().rsplit('/', 1)[0] + son_data_list[i][1]
            print('son_name, son_url: ', son_name, son_url)
            # 获取子专题所有的文章数据信息，如亚洲历史、欧洲历史、美洲历史......
            self.get_son_all_article_data(fu_name, son_name, son_url)

    """ 世界历史主函数 """

    def gj_main(self):
        for i in conf.map_ref:
            num = int(i)
            self.get_world_all_datas(num)

