# 下载http://www.bse.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
from datetime import datetime, timedelta
import time


# 调试路径
import sys
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(project_root)
from postgresql.postgresql import Postgresql

Postgresqlhost = os.environ.get('Postgresqlhost')
Postgresqldatabase = os.environ.get('Postgresqldatabase')
Postgresqluser = os.environ.get('Postgresqluser')
Postgresqlpassword = os.environ.get('Postgresqlpassword')
StartTime = os.environ.get('StartTime')


# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y%m%d")

if StartTime:
    startday_str = StartTime
else:
    startday_str = yesterday_str

# keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
keywords_list = ["社會責任", "可持續發展", "ESG", "環境、社會及治理", "環境、社會及管治", "年報", "年度報告"]
URL = 'https://www1.hkexnews.hk/'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}

def get_data_json(searchKey, total_records=100):
  url = "https://www1.hkexnews.hk/search/titleSearchServlet.do"
  params = {
    'sortDir': 0,
    'sortByOptions': 'DateTime',
    'category': 0,
    'market': 'SEHK',
    'stockId': -1,
    'documentType': -1,
    'fromDate': '%s' %startday_str,
    'toDate': '%s' %yesterday_str,
    'title': searchKey,
    'searchType': 0,
    't1code': -2,
    't2Gcode': -2,
    't2code': -2,
    'rowRange': total_records,
    'lang': 'zh'
  } 
  response = requests.get(url, params=params)

  if response.status_code == 200:
      data_str = response.text
      json_data = json.loads(data_str)
      return json_data
  else:
      print("Failed to retrieve data. Status code:", response.status_code)
      return False

for keywords_list_one in keywords_list:
     
    print(f"【下载开始】【关键字】【{keywords_list_one}】")

    # 第一次爬取，获取数量
    total_records = get_data_json(keywords_list_one)

    # 第二次爬取，基于数量开始循环每一页
    if total_records["recordCnt"] == 0:
         continue
    else:
        all_down_url = []
        all_down_pdf_title = []
        all_down_pdf_text = []
        down_pdf_updatetimes = []
        all_label = []

        json_data = get_data_json(keywords_list_one, total_records["recordCnt"])

        result_data_list = json.loads(json_data['result'])

        for knowledgeList_one in result_data_list:
            down_pdf_url = URL + knowledgeList_one['FILE_LINK']
            if down_pdf_url.endswith('.xlsx'):
                continue
            down_pdf_title = knowledgeList_one['TITLE']
            down_pdf_text = knowledgeList_one['LONG_TEXT']

            #转换时间格式
            date_str = knowledgeList_one['DATE_TIME']
            date_obj = datetime.strptime(date_str, '%d/%m/%Y %H:%M')
            down_pdf_updatetime = date_obj.strftime('%Y-%m-%d')

            # 排除非文件的信息
            if down_pdf_url in all_down_url:
                continue
            all_down_url.append(down_pdf_url)
            all_down_pdf_title.append(down_pdf_title)
            all_down_pdf_text.append(down_pdf_text)
            down_pdf_updatetimes.append(down_pdf_updatetime)

            all_label.append(knowledgeList_one['STOCK_CODE'] + '.HK')

    # 下载文件到本地
    lines_list = []
    for index, value in enumerate(all_down_url):

        url = value

        try:
            file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
        except:
            file_name = all_down_pdf_title[index]  + ".pdf"

        file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
        pdf_text = all_down_pdf_text[index]

        response = requests.get(url)

        base_directory = '/datapdf'
        now = datetime.now()
        year_directory = os.path.join(base_directory, str(now.year))
        month_directory = os.path.join(year_directory, str(now.month))
        day_directory = os.path.join(month_directory, str(now.day))
        os.makedirs(day_directory, exist_ok=True)

        file_path = os.path.join(day_directory, file_name)
        with open(file_path, 'wb') as file:
            file.write(response.content)
        print(f'File {url} downloaded successfully: {file_path}')

        lines_list.append([all_label[index], 'hkexnews', file_path, down_pdf_updatetimes[index]])
        time.sleep(5)

    print(f"【下载完成】【关键字】【{keywords_list_one}】")

    if not lines_list:
        continue

    # 创建Postgresql实例并传入连接参数
    postgresql = Postgresql(host=Postgresqlhost, database=Postgresqldatabase, user=Postgresqluser, password=Postgresqlpassword)
    # 创建连接和游标对象
    postgresql.create_connection()

    # 示例插入数据
    for lines_list_one in lines_list:
        data = {
            "stock_code": lines_list_one[0], 
            "company_report_type": lines_list_one[1], 
            "company_report_path": lines_list_one[2],
            "file_time": lines_list_one[3],
            "keyword": keywords_list_one
                }
        result = postgresql.execute_insert("company_report", data)
        postgresql.execute_commit()

    # 关闭连接
    postgresql.quit_connection()
    print(f"【数据入库完成】【关键字】【{keywords_list_one}】")