import requests
import re
import openpyxl
import time
import pandas as pd
import random

user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
            'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1',
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        ]

def gen_request_data(stock_code, page_num, stock_name, start_date, end_date):
        page_num = int(page_num)
        data = {
            "pageNum":f"{page_num}",
            "pageSize":"30",
            "column":"szse",
            "tabName":"fulltext",
            "plate":"",
            "stock":"{}".format(stock_code),
            "searchkey":f"{stock_name}",
            "secid":"",
            "category":"category_ndbg_szsh",
            "trade":"",
            "seDate":"{}~{}".format(start_date, end_date),
            "sortName":"",
            "sortType":"",
            "isHLtitle":"true"
        }
        return data

def get_report(data):
    url = "http://www.cninfo.com.cn/new/hisAnnouncement/query"
    headers = {
        "Accept":"*/*",
        "Accept-Encoding":"gzip, deflate",
        "Accept-Language":"zh-CN,zh;q=0.9",
        "Connection":"keep-alive",
        "Content-Length":"181",
        "Content-Type":"application/x-www-form-urlencoded; charset=UTF-8",
        "Host":"www.cninfo.com.cn",
        "Origin":"http://www.cninfo.com.cn",
        "Referer":"http://www.cninfo.com.cn/new/commonUrl/pageOfSearch?url=disclosure/list/search",
        "User-Agent":random.choice(user_agents),
        "X-Requested-With":"XMLHttpRequest"
    }
    data = data
    response = requests.post(url, data=data, headers=headers)
    return response

def download_report(data):
    all_results = []
    max_retries = 3 #最大重试次数
    retry_count = 0 #当前重试次数

    response = None
    # 重试机制
    while retry_count <= max_retries:
        # 发送请求
        try:
            response = get_report(page_num,date)
            response.raise_for_status()
            break
        except requests.exceptions.RequestException as e:
            print(f"出现错误！: {e}")
            print(f"5秒后重试...")
            time.sleep(5)
            retry_count += 1

    if response is not None:
        try:
            data = response.json()
            retry_count = 0
            while True:
                try:
                    if data["announcements"] is None:
                        raise Exception("公告数据为空")
                    else:
                        all_results.extend(data["announcements"])
                    break
                except (TypeError, KeyError) as e:
                    print("解析公告数据失败: {e}")
                    print("5秒后重试...")
                    time.sleep(5)
                    retry_count += 1
                    if retry_count > max_retries:
                        raise Exception("达到最大重试次数，跳过此页")
                    continue
            page_num += 1
            counter +=1

        except (ValueError, KeyError) as e:
                print(f"解析响应数据失败: {e}")
                print(f"5秒后重试...")
                time.sleep(5)
                retry_count += 1
                if retry_count > max_retries:
                    raise Exception("达到最大重试次数，跳过此页")
                

if __name__ == "__main__":
    df = pd.read_excel("/home/ubuntu/code/git/subject-word-extraction/data/in/stock.xlsx")
    max_retrys = 3 #最大重试次数
    retry_count = 0 #当前重试次数

    res_list = []
    i = 0
    for (stknickname,),val in df.groupby(by=["stknickname"]):
        start_date = "2010-01-01"
        end_date = "2025-01-01"
        stk_code, stk_flag = stknickname.split(".")
        stk_code = ""
        stk_name = val["nickname"].values[0]

        page_num = 1
        request_data = gen_request_data(stk_code, page_num, stk_name, start_date, end_date)
        response = get_report(request_data)

        response_content = response.json()
        # print(f"\n{response_content}")
        total_record_num = response_content["totalAnnouncement"]
        if total_record_num is not None:
            if total_record_num > 0:
                results = response_content["announcements"]
                if results is not None:
                    for content in results:
                        if re.search("摘要",content['announcementTitle']) or re.search("公告",content['announcementTitle']):
                            pass
                        else:
                            tmp_dict = {}
                            tmp_dict["sec_name"] = str(content['secName'])
                            tmp_dict["sec_code"] = str(content['secCode'])
                            tmp_dict["year"] = str(content["adjunctUrl"].split("/")[1].split("-")[0])
                            tmp_dict["title"] = content['announcementTitle']
                            tmp_dict["adjunct_url"] = content['adjunctUrl']
                            tmp_dict["down_url"] = 'http://static.cninfo.com.cn/' + tmp_dict["adjunct_url"]
                            tmp_dict["file_name"] = f'{tmp_dict["sec_code"]}-{tmp_dict["sec_name"]}-{tmp_dict["title"]}.pdf'
                            print("\n")
                            print(tmp_dict)
                            res_list.append(tmp_dict)
                    page_num += 1
            else:
                continue
        else:
            continue

        i += 1

        if i % 50 == 0:
            print("================="*20)
            print(f"\n爬取{i}家")
            pd.DataFrame(res_list).to_csv(f"/home/ubuntu/code/git/subject-word-extraction/data/output/stock_url-2_{time.time()}.csv",index=False)
            print("保存成功！\n")
            
        time.sleep(random.uniform(2,4))
        # break
        
    
    pd.DataFrame(res_list).to_csv(f"/home/ubuntu/code/git/subject-word-extraction/data/output/stock_url-2_{time.time()}.csv",index=False)
        
        



