# -*- coding:utf-8 -*-

# @Time : 2023/5/15 10:05
# @Author : 快乐的小猴子
# @Version : 
# @Function : 政策

import os
from zhengwuinfos.conf import settings
from zhengwuinfos.BaseInfos import BaseInfos


class Policy(BaseInfos):
    """
    政策类
    """

    def verify_path(self, addr):
        """ 校验文件路径是否存在，不存在则创建 """
        dir = os.getcwd() + '\\zhengwuinfos\\files\\' + addr + '\\'
        # print(dir)
        if not os.path.exists(dir):
            os.makedirs(dir)
        return dir

    def get_page_cnt(self):
        """ 获取所有数据的总页码数 """
        url = self.get_url(1, 'd5c69b3f67334ed984a43e864f42f1ab')
        resp = self.send_reqs(url)
        if resp['data']['total'] % resp['data']['rows'] == 0:
            page_cnts = int(resp['data']['total'] / resp['data']['rows'])
        else:
            page_cnts = int(resp['data']['total'] / resp['data']['rows']) + 1
        return page_cnts

    def handle_datas(self, resp):
        """ 获取单页所有条数的数据 """
        one_page_data_list = []
        for i in range(0, len(resp['data']['results'])):
            # time.sleep(random.random())
            rows_dics = {}
            # 标题
            title = resp['data']['results'][i]['title']
            # 子标题
            subTitle = resp['data']['results'][i]['subTitle']
            # 内容
            content = resp['data']['results'][i]['content']
            # 发布时间
            pubTime = resp['data']['results'][i]['publishedTimeStr']
            # 文件下载路径
            if resp['data']['results'][i]['resList']:
                down_url = settings.url.rsplit('/', 2)[0] + resp['data']['results'][i]['resList'][0]['filePath']
            else:
                down_url = ''
            rows_dics['title'] = title
            rows_dics['pubTime'] = pubTime
            rows_dics['down_url'] = down_url
            rows_dics['subTitle'] = subTitle
            rows_dics['content'] = content
            one_page_data_list.append(rows_dics)
        return one_page_data_list

    def handle_all_datas(self, page_cnts):
        """ 索取所有页面的所有数据 """
        all_datas_list = []
        num = 0
        for x in range(1, page_cnts + 1):
            # time.sleep(random.random())
            url = super().get_url(x, 'd5c69b3f67334ed984a43e864f42f1ab')
            resp = super().send_reqs(url)
            one_page_data_list = self.handle_datas(resp)
            for y in range(0, len(one_page_data_list)):
                num += 1
                all_datas_list.append(one_page_data_list[y])
                print('第 {} 页 第 {} 条数据爬取成功 ^_^'.format(x, y + 1))
        return all_datas_list

    def save_policy_files(self, all_datas_list):
        """ 保存政策类数据 """
        dir = self.verify_path('政策')
        for i in range(0, len(all_datas_list)):
            fileName = all_datas_list[i]['title']
            subTitle = all_datas_list[i]['subTitle']
            fileCont = all_datas_list[i]['content']
            pubTime = all_datas_list[i]['pubTime']
            downUrl = all_datas_list[i]['down_url']
            with open(dir + fileName + '.doc', 'w', encoding='UTF-8') as fp:
                fp.write('\t\t\t\t' + fileName + '\t' + pubTime + '\t' + downUrl + '\n')
                fp.write('\t\t' + subTitle + '\n')
                fp.write('\t\t' + fileCont + '\n')
            print('Success {} 文件已保存在 {} 路径下！！！'.format(fileName + '.doc', dir))

    def policy_main(self):
        print('政策类数据正在爬取中：')
        page_cnts = self.get_page_cnt()
        all_datas_list = self.handle_all_datas(page_cnts)
        self.save_policy_files(all_datas_list)

# policy = Policy()
# policy.policy_main()