# 下载http://www.szse.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
from datetime import datetime, timedelta
from datetime import datetime
import time

# 调试路径
import sys
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(project_root)
from postgresql.postgresql import Postgresql
Postgresqlhost = os.environ.get('Postgresqlhost')
Postgresqldatabase = os.environ.get('Postgresqldatabase')
Postgresqluser = os.environ.get('Postgresqluser')
Postgresqlpassword = os.environ.get('Postgresqlpassword')
# StartTime = os.environ.get('StartTime')
StartTime = "2024-01-01"

Postgresqlhost=""
Postgresqldatabase="esg"
Postgresqluser="sumz"
Postgresqlpassword="Sumz"


# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")

if StartTime:
    startday_str = StartTime
else:
    startday_str = yesterday_str

# keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治", "年报", "年度报告"]
# keywords_list = ["可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
URL = 'https://www.bse.cn'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}

def get_data_json(searchKey="", page_num=None):

    url = 'https://www.bse.cn/disclosureInfoController/companyAnnouncement.do'

    data = {
        "disclosureSubtype[]": ["9503-1001", "9503-1005"],
        "page": page_num,
        "companyCd": "",
        "isNewThree": "1",
        "startTime": "%s" %startday_str,
        "endTime": "%s" %yesterday_str,
        "keyword": None,
        "xxfcbj[]": "2",
        "sortfield": "xxssdq",
        "sorttype": "asc"
    }
    
    response = requests.post(url, data=data, headers=headers)

    json_text = response.text
    data_str = json_text.replace('null(', '').replace(')', '')
    json_data = json.loads(data_str)
    return json_data[0]

lines_list = []

print(f"【下载开始】【】【全部年报】")
# 第一次爬取，获取数量
init_data_info_json = get_data_json()

# 第二次爬取，基于数量开始循环每一页
total_size = init_data_info_json["listInfo"]["totalPages"]
if total_size == 0:
    print(f"【结果】【无】")
else:
    all_down_url = []
    all_down_pdf_title = []
    all_down_pdf_updatetime = []
    all_company_cd = []
    for page_num in range(total_size):
        if page_num == 0:
            page_num = ""
        json_data = get_data_json("", page_num)

        for knowledgeList_one in json_data['listInfo']['content']:
            down_pdf_url = URL + knowledgeList_one['destFilePath']
            down_pdf_title = knowledgeList_one['disclosureTitle']
            down_pdf_updatetime = knowledgeList_one['publishDate']
            all_company_cd_code = knowledgeList_one['companyCd'] + '.BJ'
            # 排除非文件的信息
            if down_pdf_url in all_down_url:
                continue
            all_down_url.append(down_pdf_url)
            all_down_pdf_title.append(down_pdf_title)
            all_down_pdf_updatetime.append(down_pdf_updatetime)
            all_company_cd.append(all_company_cd_code)

            lines_list.append({
                "stock_code": all_company_cd_code, 
                "company_report_type": "bse",
                "company_report_path": down_pdf_url,
                "file_time": down_pdf_updatetime,
                "keyword": "年报",
                "file_name": down_pdf_title,
                "company_name": knowledgeList_one['companyName'],
                })

print(f"【数据组合完成准备入库】【关键字】【】")

# 创建Postgresql实例并传入连接参数
postgresql = Postgresql(host=Postgresqlhost, database=Postgresqldatabase, user=Postgresqluser, password=Postgresqlpassword)
# 创建连接和游标对象
postgresql.create_connection()

# 示例插入数据
for lines_list_one in lines_list:
    result = postgresql.execute_insert("company_report_swap2", lines_list_one)
    postgresql.execute_commit()

# 关闭连接
postgresql.quit_connection()
print(f"【数据入库完成】【关键字】【】")