# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
from utils.HttpUtils import HttpUtils

requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
巨潮资讯-搜索关键字
http://www.cninfo.com.cn/new/fulltextSearch?notautosubmit=&keyWord=%E8%B4%A2%E5%8A%A1%E6%8A%A5%E8%A1%A8
下载PDF文件
"""
class ZiXun():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        self.cookie = cookiejar.CookieJar()
        # ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Connection": "keep-alive",
            "Cookie": "JSESSIONID=7B3CB47AF4C7522B082379E3000D6E8E; _sp_ses.2141=*; insert_cookie=45380249; UC-JSESSIONID=2FE3C5317FF8871A2E22A5A924CF21C0; _sp_id.2141=93e11e77-a196-4535-bd6e-b6b008a5c388.1576411729.1.1576411789.1576411729.be9e7245-6b4f-4721-84e6-38182bda20be",
            "DNT": "1",
            "Host": "www.cninfo.com.cn",
            "Referer": "http://www.cninfo.com.cn/new/fulltextSearch?notautosubmit=&keyWord=%E8%B4%A2%E5%8A%A1%E6%8A%A5%E8%A1%A8",
            # "User-Agent":ua.random
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'
        }


    def get_contents(self, pageNum):
        postData = {
            "searchkey": "财务报表",  # 检索关键字
            "sdate":"",
            "edate":"",
            "isfulltext": "false",
            "sortName": "pubdate",
            "sortType": "desc",
            "pageNum": pageNum,  # 页号
        }
        url = "http://www.cninfo.com.cn/new/fulltextSearch/full"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        data_json = json.loads(html.text)
        for d in data_json['announcements']:
            dict_data = dict()
            dict_data['secCode'] = d['secCode']  # 证券代码
            dict_data['secName'] = d['secName']  # 证券名称
            dict_data['announcementTitle'] = d['announcementTitle'].replace("<em>", "").replace("</em>", "")  # 标题
            dict_data['announcementTime'] = datetime.fromtimestamp(int(d['announcementTime']) / 1000)  # 发布时间
            dict_data['announcementTime'] = dict_data['announcementTime'].strftime("%Y-%m-%d")
            dict_data['announcementId'] = d['announcementId']  # PDF文件名
            dict_data['adjunctUrl'] = d['adjunctUrl']  # PDF下载地址
            dict_data['orgId'] = d['orgId']  # orgId
            dict_data['adjunctType'] = d['adjunctType']  # 文章类型 如：PDF
            url = "http://static.cninfo.com.cn/" + d['adjunctUrl']
            filename = d['secCode'] + "-" + d['secName']
            # 近期公告
            # detail_data = {"orgId": d['orgId'], "announcementId":d['announcementId'],
            #                "announcementTime":dict_data['announcementTime']}
            # detail_url = "http://cninfo.com.cn/new/disclosure/detail"
            # res = HttpUtils.do_request("GET", detail_url, self.headers, detail_data)
            # 文章类型是PDF
            if dict_data['adjunctType'] == "PDF":
                # 下载PDF文件
                with open('PDF/' + filename + ".pdf", 'wb') as f:
                    res = HttpUtils.do_request("GET", url, "", "")
                    f.write(res.content)
                    f.close()


if __name__ == '__main__':
    zixun = ZiXun()
    for i in range(1, 10):
        zixun.get_contents(i)