# 下载http://www.szse.cn/搜索内容的pdf,并将信息存入pg
import requests
import json
import os
from datetime import datetime, timedelta
import time 

# 调试路径
import sys
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(project_root)
from postgresql.postgresql import Postgresql

Postgresqlhost = os.environ.get('Postgresqlhost')
Postgresqldatabase = os.environ.get('Postgresqldatabase')
Postgresqluser = os.environ.get('Postgresqluser')
Postgresqlpassword = os.environ.get('Postgresqlpassword')
# StartTime = os.environ.get('StartTime')
StartTime = "2024-01-01"
Postgresqlhost=""
Postgresqldatabase="esg"
Postgresqluser="sumz"
Postgresqlpassword="Sumz"


# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)

# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")
if StartTime:
    startday_str = StartTime
else:
    startday_str = yesterday_str

keywords_list = ["年度报告"]

URL = 'http://www.szse.cn/'

down_url = "https://www.szse.cn/api/disc/info/download?id="

headers = {
    'Origin': URL,
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/json'
}

def get_data_json(searchKey, page_num=1):

  url = "http://www.szse.cn/api/disc/announcement/annList"

  data = {
    "bigCategoryId": [
        "010301"
    ],
    "seDate": [
        "%s" %startday_str,
        # "2024-03-20",
        "%s" %yesterday_str
    ],
    "searchKey": [
        "%s" %searchKey
    ],
    "channelCode": [
        "listedNotice_disc"
    ],
    "pageSize": 50,
    "pageNum": page_num
  }
  
  response = requests.post(url, data=json.dumps(data), headers=headers)

  # response.encoding = 'utf-8'
  json_text = response.text
  # json_text = re.sub(r'^jsonpCallback\d+\((.*)\)$', r'\1', text)
  json_data = json.loads(json_text)
  return json_data

for keywords_list_one in keywords_list:
    print(f"【爬取开始】【关键字】【{keywords_list_one}】")
     
    # 第一次爬取，获取数量
    init_data_info_json = get_data_json(keywords_list_one)

    # 第二次爬取，基于数量开始循环每一页
    total_size = init_data_info_json['announceCount']

    all_down_url = []
    all_down_pdf_title = []
    all_down_pdf_updatetime = []
    all_label = []

    lines_list = []

    if total_size == 0:
        continue
    elif total_size > 50:
        for page_num in range(1, int(total_size/50)+2):
            json_data = get_data_json(keywords_list_one, page_num)

            for knowledgeList_one in json_data['data']:
                if '英文版' in knowledgeList_one['title']:
                    continue

                down_pdf_url = down_url + knowledgeList_one['id']
                down_pdf_title = knowledgeList_one['title']
                down_pdf_updatetime = knowledgeList_one['publishTime'].split()[0]

                if down_pdf_url in all_down_url:
                    continue
                # 排除非文件的信息

                lines_list.append({
                    "stock_code": knowledgeList_one['secCode'][0] + '.SZ', 
                    "company_report_type": "szse",
                    "company_report_path": down_pdf_url,
                    "file_time": down_pdf_updatetime,
                    "keyword": keywords_list_one,
                    "file_name": down_pdf_title,
                    "company_name": knowledgeList_one['secName'][0],
                    })
                
    else:
        for knowledgeList_one in init_data_info_json['data']:
            if '英文版' in knowledgeList_one['title']:
                continue
    
            down_pdf_url = down_url + knowledgeList_one['id']
            down_pdf_title = knowledgeList_one['title']
            down_pdf_updatetime = knowledgeList_one['publishTime'].split()[0]

            if down_pdf_url in all_down_url:
                continue

            lines_list.append({
                "stock_code": knowledgeList_one['secCode'][0] + '.SZ', 
                "company_report_type": "szse",
                "company_report_path": down_pdf_url,
                "file_time": down_pdf_updatetime,
                "keyword": keywords_list_one,
                "file_name": down_pdf_title,
                "company_name": knowledgeList_one['secName'][0],
                })

    print(f"【数据组合完成准备入库】【关键字】【{keywords_list_one}】")
    
    # 创建Postgresql实例并传入连接参数
    postgresql = Postgresql(host=Postgresqlhost, database=Postgresqldatabase, user=Postgresqluser, password=Postgresqlpassword)
    # 创建连接和游标对象
    postgresql.create_connection()
    
    # 示例插入数据
    for lines_list_one in lines_list:
        result = postgresql.execute_insert("company_report_swap2", lines_list_one)
        postgresql.execute_commit()

    # 关闭连接
    postgresql.quit_connection()
    print(f"【数据入库完成】【关键字】【{keywords_list_one}】")
