# 下载http://www.sse.com.cn/搜索内容的pdf,并信息存pg库
import requests
import json
import os
import re
from datetime import datetime, timedelta
import time

# 调试路径
import sys
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(project_root)
from postgresql.postgresql import Postgresql

Postgresqlhost = os.environ.get('Postgresqlhost')
Postgresqldatabase = os.environ.get('Postgresqldatabase')
Postgresqluser = os.environ.get('Postgresqluser')
Postgresqlpassword = os.environ.get('Postgresqlpassword')
StartTime = os.environ.get('StartTime')

# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")

if StartTime:
    startday_str = StartTime
else:
    startday_str = yesterday_str

keywords_list = ["ESG", "社会责任", "可持续发展", "环境、社会及治理", "环境、社会及管治", "年报", "年度报告"]

URL = 'http://www.sse.com.cn'

headers = {
    'Referer': URL,
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0'
}

def downlourl(keywords_list_one, limit=10):
    url = f"http://query.sse.com.cn/search/getESSearchDoc.do?jsonCallBack=jsonpCallback36522469&searchword=&page=0&limit={limit}&spaceId=3&orderByDirection=DESC&orderByKey=create_time&searchMode=precise&keyword={keywords_list_one}&siteName=sse&keywordPosition=title%2Cpaper_content&publishTimeStart={startday_str}+00%3A00%3A00&publishTimeEnd={yesterday_str}+23%3A59%3A59&channelId=10001&trackId=40093479452572385582371833570647&_=1711439876521"
    return(url)

def get_data_json(url):
  response = requests.get(url,headers=headers)
  response.encoding = 'utf-8'
  text=response.text
  json_text = re.sub(r'^jsonpCallback\d+\((.*)\)$', r'\1', text)
  json_data = json.loads(json_text)
  return json_data

for keywords_list_one in keywords_list:
    print(f"【下载开始】【关键字】【{keywords_list_one}】")
    # 第一次爬取，获取数量
    init_data_info_json = get_data_json(downlourl(keywords_list_one))

    # 第二次爬取，获取所有数量的列表
    total_size = init_data_info_json['data']['totalSize']
    json_data = get_data_json(downlourl(keywords_list_one, limit=total_size))

    all_down_url = []
    all_down_pdf_title = []
    all_down_pdf_updatetime = []
    all_label = []
    if json_data['data']['knowledgeList'] == None:
        continue
    for knowledgeList_one in json_data['data']['knowledgeList']:
        down_pdf_url = URL + knowledgeList_one['extend'][4]['value']
        down_pdf_title = knowledgeList_one['title']
        down_pdf_updatetime = knowledgeList_one['updateTime'].split()[0]

        if down_pdf_url in all_down_url:
            continue
    
        # 排除非文件的信息
        if knowledgeList_one['extend'][8]['value']:
            all_label.append(knowledgeList_one['extend'][8]['value'] + '.SH')
        else:
            continue
        if down_pdf_url.endswith(".pdf"):
            all_down_url.append(down_pdf_url)
            all_down_pdf_title.append(down_pdf_title)
            all_down_pdf_updatetime.append(down_pdf_updatetime)

    # 下载文件到本地
    lines_list = []
    for index, value in enumerate(all_down_url):
        url = value
        try:
            file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
        except:
            file_name = all_down_pdf_title[index]  + ".pdf"

        file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
        file_time = all_down_pdf_updatetime[index]

        response = requests.get(url)

        base_directory = '/datapdf'
        now = datetime.now()
        year_directory = os.path.join(base_directory, str(now.year))
        month_directory = os.path.join(year_directory, str(now.month))
        day_directory = os.path.join(month_directory, str(now.day))
        os.makedirs(day_directory, exist_ok=True)

        try:
            file_path = os.path.join(day_directory, file_name)
            with open(file_path, 'wb') as file:
                file.write(response.content)
            print(f'File {url} downloaded successfully: {file_path}')
        except OSError as e:
            print(f"An error occurred: {e}")
            # 可以在这里执行备选方案或者直接跳过错误
            continue

        lines_list.append([all_label[index], 'sse', file_path, file_time])
        time.sleep(5)

    # 创建Postgresql实例并传入连接参数
    postgresql = Postgresql(host=Postgresqlhost, database=Postgresqldatabase, user=Postgresqluser, password=Postgresqlpassword)
    # 创建连接和游标对象
    postgresql.create_connection()

    print(f"【下载完成】【关键字】【{keywords_list_one}】")
    
    # 示例插入数据
    for lines_list_one in lines_list:
        data = {
            "stock_code": lines_list_one[0], 
            "company_report_type": lines_list_one[1], 
            "company_report_path": lines_list_one[2],
            "file_time": lines_list_one[3],
            "keyword": keywords_list_one
                }
        result = postgresql.execute_insert("company_report", data)
        postgresql.execute_commit()

    # 关闭连接
    postgresql.quit_connection()
    print(f"【数据入库完成】【关键字】【{keywords_list_one}】")
