# 安徽省公共资源交易信息网的数据获取类：
# by jjf 2023-11-6

import json
import html
from bs4 import BeautifulSoup
from models.projectindex import ProjectIndex
from models.project import Project
from public.http_method.http_method_form import http_post
from public.string_utils import *
from public.content_indentify import *
from downloaders.downloader import Downloader



class Downloader_PR_AH(Downloader):

    def __init__(self):
        """空构造函数"""
        # API接口的访问地址：
        url = 'https://ggzy.ah.gov.cn/souSuo'
        # API接口的访问参数（body参数）：
        params = {
            "currentPage": "1",  # 当前页面
            "region": "",
            "type": "",
            "contenttype": "",
            "param": "遥感+中标公告",  # 关键词
            "timeParam": "all",  # 时间参数
            "startTime": "2022-1-1",  # 起始时间
            "endTime": "2022-12-31",  # 结束时间
            "order": "score"  # 排序
        }
        # 头格式
        # headers = {
        #     "Content-Type":
        #     "application/json",
        #     'User-Agent':
        #     'Mozilla/5.0(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
        # }
        super().__init__("https://ggzy.ah.gov.cn", "安徽省公共资源交易信息网", "安徽省",
                         url, "POST")
        self.set_params(params)

    def set_limit_time(self, starttime, endtime):
        """设置数据时间段限制条件"""
        self.params['startTime'] = self.limit_time['starttime'] = starttime
        self.params['endTime'] = self.limit_time['endtime'] = endtime
        return self.params

    def set_keyword(self, keyword):
        """设置数据查询关键词"""
        # self.keyword = keyword
        self.params['param'] = keyword
        return self.params

    def try_get_datas(self):
        params = self.params.copy() 
        rn = params['currentPage']
        params['currentPage'] = 1
        print(self.url)
        response = http_post(self.url, data=self.params)
        # 使用BeautifulSoup解析HTML内容  
        soup = BeautifulSoup(response, 'html.parser')  
        # 从解析后的HTML中提取所需数据并存储在Python字典中  
        data = {  
            'title': soup.title.string,  
            'paragraphs': [p.string for p in soup.find_all('p')],  
            'links': [link.get('href') for link in soup.find_all('a', href=True)]  
        }  
        # 将Python字典转换为JSON格式  
        json_data = json.dumps(data)  
        # 将JSON数据保存到本地文件  
        with open('G:/scrapy-stream/test2.txt', 'w') as file:  
            file.write(json_data)


    def get_datas(self) -> [ProjectIndex]:
        """获取当前页项目索引数据

        Returns:
            []: ProjectIndex数组
        """
        r = http_post(self.url, data=self.params)
        # # 输出来看看样本数据：
        # json_str = json.dumps(r, indent=4, ensure_ascii=False)
        # print("data on a page:", json_str)
        # # ------------------
        self.total_record = r['result']['totalcount']  # 实时修正总记录
        results = r['result']['records']
        datas = []
        for result in results:
            d = self.build_project_index(data=result)
            datas.append(d)
        return datas

    def build_project_index(self, data) -> ProjectIndex:
        """创建项目索引

            data (_type_): 数据内容字符串

        Returns:
            ProjectIndex: 一条项目索引
        """
        d = ProjectIndex()
        # 关联设置的字段：
        d.datasourceId = 0  # 数据源标识码（来源站点标识码）
        d.tid = 0  # 任务标识码
        # 直接获取的数据：
        d.linkurl = data['linkurl']
        d.region = "安徽省"
        d.publishTime = data['webdate']
        return d

    def to_next_page(self) -> bool:
        """移动到下一页

        Returns:
            bool: 是否成功移动到下一页
        """
        if super().to_next_page():
            self.params['pn'] = self.curpage * self.page_size
            # rn = (self.curpage + 1) * self.page_size - 1
            # if rn > self.total_record:
            #     rn = self.total_record
            # self.params['rn'] = rn
            return True
        else:
            return False


if __name__ == '__main__':
    test = Downloader_PR_AH()
    print(test.try_get_datas())
