# 下载http://www.szse.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
import re
from datetime import datetime, timedelta
from atlassian import Confluence
from datetime import datetime
import shutil

# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")

keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治", "年报"]
# keywords_list = ["可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
URL = 'http://www.szse.cn/'

down_url = "https://www.szse.cn/api/disc/info/download?id="

headers = {
    'Origin': URL,
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/json'
}

# 删除再创建目录
def recreate_directory(directory):
    if os.path.exists(directory):
        shutil.rmtree(directory)
    os.makedirs(directory)

def get_data_json(searchKey, page_num=1):

  url = "http://www.szse.cn/api/disc/announcement/annList"

  data = {
    "seDate": [
        "%s" %yesterday_str,
        # "2024-03-20",
        "%s" %yesterday_str
    ],
    "searchKey": [
        "%s" %searchKey
    ],
    "channelCode": [
        "listedNotice_disc"
    ],
    "pageSize": 50,
    "pageNum": page_num
  }
  
  response = requests.post(url, data=json.dumps(data), headers=headers)

  # response.encoding = 'utf-8'
  json_text = response.text
  # json_text = re.sub(r'^jsonpCallback\d+\((.*)\)$', r'\1', text)
  json_data = json.loads(json_text)
  return json_data

confluence = Confluence(
    url='http://doc.wangwuli.com:99/',
    username='admin',
    password='wangwuli')


space = "SPC"
status = confluence.get_all_spaces()

now = datetime.now()
# 提取年、月、日
current_year = now.strftime('%Y')
current_month = now.strftime('%Y%m')
current_day = now.strftime('%Y%m%d')

if not confluence.page_exists(space, current_year):
    confluence.create_page(space, current_year, current_year, parent_id=None, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_month):
    current_year_id = confluence.get_page_by_title(space, current_year)
    confluence.create_page(space, current_month, current_month, parent_id=current_year_id, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_day):
    current_month_id = confluence.get_page_by_title(space, current_month)
    confluence.create_page(space, current_day, current_day, parent_id=current_month_id, type='page', representation='storage', editor='v2', full_width=False)

current_day_id = confluence.get_page_by_title(space, current_day)

for keywords_list_one in keywords_list:
     
    # 第一次爬取，获取数量
    init_data_info_json = get_data_json(keywords_list_one)

    # 第二次爬取，基于数量开始循环每一页
    total_size = init_data_info_json['announceCount']

    all_down_url = []
    all_down_pdf_title = []
    all_down_pdf_updatetime = []
    all_label = []

    if total_size == 0:
        continue
    elif total_size > 50:
        for page_num in range(1, int(total_size/50)+2):
            json_data = get_data_json(keywords_list_one, page_num)

            for knowledgeList_one in json_data['data']:
                down_pdf_url = down_url + knowledgeList_one['id']
                down_pdf_title = knowledgeList_one['title']
                down_pdf_updatetime = knowledgeList_one['publishTime'].split()[0]
                # 排除非文件的信息
                all_down_url.append(down_pdf_url)
                all_down_pdf_title.append(down_pdf_title)
                all_down_pdf_updatetime.append(down_pdf_updatetime)
                all_label.append([down_pdf_updatetime, knowledgeList_one['secCode'][0], knowledgeList_one['secName'][0]])
    else:
        for knowledgeList_one in init_data_info_json['data']:
            down_pdf_url = down_url + knowledgeList_one['id']
            down_pdf_title = knowledgeList_one['title']
            down_pdf_updatetime = knowledgeList_one['publishTime'].split()[0]
            # 排除非文件的信息
            all_down_url.append(down_pdf_url)
            all_down_pdf_title.append(down_pdf_title)
            all_down_pdf_updatetime.append(down_pdf_updatetime)
            all_label.append([down_pdf_updatetime, knowledgeList_one['secCode'][0], knowledgeList_one['secName'][0]])


    # 下载文件到本地，并上传到confluence
    for index, value in enumerate(all_down_url):

        url = value

        try:
            file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
        except:
            file_name = all_down_pdf_title[index]  + ".pdf"

        file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
        file_time = all_down_pdf_updatetime[index]

        response = requests.get(url)
        file_path = os.path.join('./tmp', file_name)
        with open(file_path, 'wb') as file:
            file.write(response.content)
        print(f'File {url} downloaded successfully: {file_path}')

        # 定义页面内容
        page_title = file_name
        page_content = f'''
        <h2>{file_time}</h2>
        '''

        # 创建页面
        if not confluence.page_exists(space, page_title):
            page_id_obj = confluence.create_page(space=space, title=page_title, body=page_content, parent_id=current_day_id)
            now_page_id = page_id_obj['id']
            # 上传文件
            confluence.attach_file(file_path, page_id=now_page_id)
            # 设置标签
            for all_label_one in all_label[index]:
                confluence.set_page_label(now_page_id, all_label_one.replace("*", ""))
        else:
            now_page_id = confluence.get_page_by_title(space, page_title)
        
        print(f"confluence page successfully:{page_title}")

#清理临时目录
recreate_directory("./tmp")
