import requests
import time
import numpy as np
import pandas as pd
import os
def define_listParams(pageNum,stock,seDate):
    """
    定义获取基本信息的参数
    :param pageNum: 公告页码
    :param stock: 股票代码+机构编号，中间用‘,’分割
    :param seDate: 公告发布时间，格式为：yyyy-mm-dd
    :return:
    """
    payload = {
            'pageNum': pageNum,
            'pageSize': '30',
            'column': 'szse',
            'tabName': 'fulltext',
            'plate': '',
            'stock': stock,
            'searchkey': '',
            'secid': '',
            'category': '',
            'trade': '',
            'seDate': seDate,
            'sortName': '',
            'sortType': '',
            'isHLtitle': 'true',
        }
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
    }
    return payload,headers
def define_detailPayload(announcementId):
    """
    爬取详细信息所需的载荷
    :param announcementId: 巨潮公告ID
    :return:
    """
    t = time.localtime()
    payload = {
            'announceId': announcementId,
            'flag': 'true',
            'announceTime': time.strftime("%Y-%m-%d", t),
        }
    headers = {
         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
        }
    return payload,headers
def cal_pageNum(stock,seDate,url):
    param,headers = define_listParams(pageNum=1,stock=stock,seDate=seDate)
    # 保存每个公司的标识
    response = requests.post(url=url, headers=headers, data=param)
    data_list = response.json()
    total_announcement=data_list['totalAnnouncement']
    print(total_announcement)
    if total_announcement==0:
        return 0
    elif total_announcement%30==0:
        return total_announcement/30
    else:
        return total_announcement//30+1
def save_announcementId(stock,seDate,ids:list,titles:list,
                        finalurl:list,shortTitle:list,
                        fileType:list,annType:list,
                        path=".\\公告文件\\基本信息"):
    num=len(ids)
    year=seDate[0:4]
    stocks=[stock]*num
    years=[year]*num
    data=pd.DataFrame([stocks,years,ids,titles,finalurl,shortTitle,fileType,annType]).T
    # print(data)
    data.columns=['stocks','years','ids','titles','finalurl','shortTitle','fileType','annType']
    savePath=path+"\\"+str(stock).zfill(6)
    # print(savePath)
    if not os.path.exists(savePath):
        os.makedirs(savePath)
    data.to_csv(savePath+'\\'+str(year)+'.csv')
def get_idlist(total_pages,stock,seDate,url):
    year=seDate[0:4]
    id_list = []
    title_list=[]
    finalurl=[]
    shortTitle=[]
    filetype=[]
    announcementType=[]
    print('总页数：',total_pages)
    for pageNum in range(1, int(total_pages+1)):
        param, headers = define_listParams(pageNum=pageNum, stock=stock, seDate=seDate)
        # 保存每个公司的标识
        #print(url,param)
        response = requests.post(url=url, headers=headers, data=param)
        data_list = response.json()
        # print('data:',data_list)
        for dic in data_list['announcements']:
            # print('dic')
            id_list.append(dic['announcementId'])
            # print(id_list)
            title_list.append(dic['announcementTitle'])
            finalurl.append(dic['adjunctUrl'])
            shortTitle.append(dic['shortTitle'])
            filetype.append(dic['adjunctType'])
            announcementType.append(dic['announcementType'])
        #     print('一次循环结束')
        # print('一页循环结束')
    save_announcementId(stock, seDate, ids=id_list, titles=title_list, finalurl=finalurl,
                        shortTitle=shortTitle, fileType=filetype, annType=announcementType)
    # print('已保存')
def m_get_idlist(total_pages,stock,seDate,url):
    year=seDate[0:4]
    id_list = []
    title_list=[]
    finalurl=[]
    shortTitle=[]
    filetype=[]
    announcementType=[]
    print(total_pages)
    for pageNum in range(1, int(total_pages+1)):
        param, headers = define_listParams(pageNum=pageNum, stock=stock, seDate=seDate)
        # 保存每个公司的标识
        #print(url,param)
        response = requests.post(url=url, headers=headers, data=param)
        data_list = response.json()
        #print(data_list)
        for dic in data_list['announcements']:
            print(dic)
            id_list.append(dic['announcementId'])
            #print(id_list)
            title_list.append(dic['announcementTitle'])
            finalurl.append(dic['adjunctUrl'])
            shortTitle.append(dic['shortTitle'])
            filetype.append(dic['adjunctType'])
            announcementType.appen(dic['annoncementType'])
    save_announcementId(stock,seDate,ids=id_list,titles=title_list,finalurl=finalurl,
                        shortTitle=shortTitle,fileType=filetype,annType=announcementType)
def m_crawl_pdf(announcementID,stock,title,url,count,lock,path=".\\公告文件\\pdf全文"):
    lock.acquire()
    data,headers=define_detailPayload(announcementID)
    t = time.localtime()
    print(time.strftime("%Y-%m-%d", t))
    try:
        last_list = requests.post(url=url, headers=headers, params=data,timeout=15).json()
        print(last_list['fileUrl'])
        print(last_list['announcement']['announcementTitle'])
        savePath = path + "\\" + str(stock).zfill(6)
        if not os.path.exists(savePath):
            os.makedirs(savePath)
        responsepdf = requests.get(last_list['fileUrl'],timeout=15)

        if responsepdf.status_code == 200:
            with open(savePath + '\\' + str(announcementID) +str(title)+ '.pdf', "wb") as code:
                code.write(responsepdf.content)
            print(f"{title}已爬取")
        else:
            print(responsepdf.status_code)
            with open(f"未成功爬取id{str(count)}.txt", 'a', encoding='utf-8') as f:
                f.write(str(announcementID) + ' ' + str(stock) + ' ' + str(title) + '\n')

    except Exception as e:
        print(e)
        print("遭遇反爬")
        with open(f"未成功爬取id{str(count)}.txt",'a',encoding='utf-8') as f:
            f.write(str(announcementID)+' '+str(stock)+' '+str(title)+'\n')
    lock.release()
    time.sleep(0.1)


def m_download_pdf(url):
    try:
        responsepdf = requests.get(url, timeout=15)
        return responsepdf
    except Exception as e:
            print(e)
            with open("未成功爬取id.txt", 'a', encoding='utf-8') as f:
                f.write(str(str(url)+ '\n'))
            return 1
def m_outer(filename):
    def m_save_pdf(responsepdf):
        response=responsepdf.result()
        if response !=1:
            # print('response:',response)
            if response.status_code == 200:
                with open(str(filename), "wb") as code:
                    code.write(response.content)
                print(f"{filename}已爬取")
            else:
                # print(response.status_code)
                with open("未成功爬取id.txt", 'a', encoding='utf-8') as f:
                    f.write(str(filename) + '\n')
    return m_save_pdf


def crawl_pdf(announcementID,stock,title,url,count,path=".\\公告文件\\pdf全文"):
    data,headers=define_detailPayload(announcementID)
    t = time.localtime()
    print(time.strftime("%Y-%m-%d", t))
    try:
        last_list = requests.post(url=url, headers=headers, params=data,timeout=15).json()
        print(last_list['fileUrl'])
        print(last_list['announcement']['announcementTitle'])
        savePath = path + "\\" + str(stock).zfill(6)
        if not os.path.exists(savePath):
            os.makedirs(savePath)
        responsepdf = requests.get(last_list['fileUrl'],timeout=15)
        print(responsepdf)
        if responsepdf.status_code == 200:
            with open(savePath + '\\' + str(announcementID) + '.pdf', "wb") as code:
                code.write(responsepdf.content)
            print(f"{title}已爬取")
        else:
            print(responsepdf.status_code)
            with open(f"未成功爬取id{str(count)}.txt", 'a', encoding='utf-8') as f:
                f.write(str(announcementID) + ' ' + str(stock) + ' ' + str(title) + '\n')
    except Exception as e:
        print(str(e))
        print("遭遇反爬")
        with open(f"未成功爬取id{str(count)}.txt",'a',encoding='utf-8') as f:
            f.write(str(announcementID)+' '+str(stock)+' '+str(title)+'\n')
    time.sleep(0.3)

if __name__ == '__main__':
    profile_url='http://www.cninfo.com.cn/new/hisAnnouncement/query'
    detail_url = 'http://www.cninfo.com.cn/new/announcement/bulletin_detail'
    stocklist = pd.read_csv(r'.\公告文件\股票代码.csv')
    # print(stocklist)
    for line in range(len(stocklist)):
        code = str(stocklist.loc[line, 'code']).zfill(6)
        if int(code)>=488:
            orgid = str(stocklist.loc[line, 'orgId'])
            stock = code + ',' + orgid
            for year in range(2007, 2023, 1):
                seDate = str(year) + '-01-01~' + str(year) + '-12-31'
                try:
                    pageNum=cal_pageNum(stock,seDate,url=profile_url)
                    print(f'{stock[0:6]}在{seDate[0:4]}年有{pageNum}页公告')
                except:
                    if not os.path.exists(r'.\日志'):
                        os.makedirs(r'.\日志')
                        with open(r'.\日志\错误日志.txt','a',encoding='utf-8') as log:
                            log.write(str(stock)+str(year)+str(e)+'\n')
                try:
                    get_idlist(total_pages=pageNum,stock=stock,seDate=seDate,url=profile_url)
                    time.sleep(0.1)
                except Exception as e:
                    if not os.path.exists(r'.\日志'):
                        os.makedirs(r'.\日志')
                        with open(r'.\日志\错误日志.txt','a',encoding='utf-8') as log:
                            log.write(str(stock)+str(year)+str(e)+'\n')
