# 下载http://www.szse.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
from datetime import datetime, timedelta
from datetime import datetime
import time

# # 调试路径
# import sys
# project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
# sys.path.append(project_root)
from postgresql.postgresql import Postgresql
Postgresqlhost = os.environ.get('Postgresqlhost')
Postgresqldatabase = os.environ.get('Postgresqldatabase')
Postgresqluser = os.environ.get('Postgresqluser')
Postgresqlpassword = os.environ.get('Postgresqlpassword')
StartTime = os.environ.get('StartTime')


# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")

if StartTime:
    startday_str = StartTime
else:
    startday_str = yesterday_str

keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治", "年报", "年度报告"]
# keywords_list = ["可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
URL = 'https://www.bse.cn'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}

def get_data_json(searchKey, page_num=""):

    url = 'https://www.bse.cn/disclosureInfoController/companyAnnouncement.do'

    data = {
        "page": page_num,
        "companyCd": "",
        "isNewThree": 1,
        "startTime": "%s" %startday_str,
        # "startTime": "2024-02-01",
        "endTime": "%s" %yesterday_str,
        "keyword": "%s" %searchKey,
        "sortfield": "xxssdq",
        "sorttype": "asc",
        "xxfcbj[]": 2,
        "disclosureType[]": 5
    }
    
    response = requests.post(url, data=data, headers=headers)

    json_text = response.text
    data_str = json_text.replace('null(', '').replace(')', '')
    json_data = json.loads(data_str)
    return json_data[0]

for keywords_list_one in keywords_list:
  
    print(f"【下载开始】【关键字】【{keywords_list_one}】")
    # 第一次爬取，获取数量
    init_data_info_json = get_data_json(keywords_list_one)

    # 第二次爬取，基于数量开始循环每一页
    total_size = init_data_info_json["listInfo"]["totalPages"]
    if total_size == 0:
        continue
    else:
        all_down_url = []
        all_down_pdf_title = []
        all_down_pdf_updatetime = []
        all_company_cd = []
        for page_num in range(total_size):
            if page_num == 0:
             page_num = ""
            json_data = get_data_json(keywords_list_one, page_num)

            for knowledgeList_one in json_data['listInfo']['content']:
                down_pdf_url = URL + knowledgeList_one['destFilePath']
                down_pdf_title = knowledgeList_one['disclosureTitle']
                down_pdf_updatetime = knowledgeList_one['publishDate']
                all_company_cd_code = knowledgeList_one['companyCd'] + '.BJ'
                # 排除非文件的信息
                if down_pdf_url in all_down_url:
                    continue
                all_down_url.append(down_pdf_url)
                all_down_pdf_title.append(down_pdf_title)
                all_down_pdf_updatetime.append(down_pdf_updatetime)
                all_company_cd.append(all_company_cd_code)


    # 下载文件到本地
    lines_list = []
    for index, value in enumerate(all_down_url):

        url = value

        try:
          file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
        except:
           file_name = all_down_pdf_title[index]  + ".pdf"

        file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
        file_time = all_down_pdf_updatetime[index]

        # 获取当前日期
        now = datetime.now()
        # 创建目录
        base_directory = '/datapdf'
        year_directory = os.path.join(base_directory, str(now.year))
        month_directory = os.path.join(year_directory, str(now.month))
        day_directory = os.path.join(month_directory, str(now.day))
        os.makedirs(day_directory, exist_ok=True)

        response = requests.get(url)
        file_path = os.path.join(day_directory, file_name)
        with open(file_path, 'wb') as file:
            file.write(response.content)
        print(f'File {url} downloaded successfully: {file_path}')

        lines_list.append([all_company_cd[index], "bse", file_path, file_time])
        time.sleep(5)


    # 创建Postgresql实例并传入连接参数
    postgresql = Postgresql(host=Postgresqlhost, database=Postgresqldatabase, user=Postgresqluser, password=Postgresqlpassword)
    # 创建连接和游标对象
    postgresql.create_connection()

    print(f"【下载完成】【关键字】【{keywords_list_one}】")

    # 示例插入数据
    for lines_list_one in lines_list:
        data = {
            "stock_code": lines_list_one[0], 
            "company_report_type": lines_list_one[1], 
            "company_report_path": lines_list_one[2],
            "file_time": lines_list_one[3],
            "keyword": keywords_list_one
                }
        result = postgresql.execute_insert("company_report", data)
        postgresql.execute_commit()

    # 关闭连接
    postgresql.quit_connection()
    print(f"【数据入库完成】【关键字】【{keywords_list_one}】")