# 下载http://www.sse.com.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
import re
from datetime import datetime, timedelta
from atlassian import Confluence
from datetime import datetime
import shutil

# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y-%m-%d")

keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治", "年报"]
# keywords_list = ["可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
URL = 'http://www.sse.com.cn'

headers = {
    'Referer': URL,
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0'
}

# 删除再创建目录
def recreate_directory(directory):
    if os.path.exists(directory):
        shutil.rmtree(directory)
    os.makedirs(directory)

def downlourl(keywords_list_one, limit=10):
    url = f"http://query.sse.com.cn/search/getESSearchDoc.do?jsonCallBack=jsonpCallback36522469&searchword=&page=0&limit={limit}&spaceId=3&orderByDirection=DESC&orderByKey=create_time&searchMode=precise&keyword={keywords_list_one}&siteName=sse&keywordPosition=title%2Cpaper_content&publishTimeStart={yesterday_str}+00%3A00%3A00&publishTimeEnd={yesterday_str}+23%3A59%3A59&channelId=10001&trackId=40093479452572385582371833570647&_=1711439876521"
    return(url)

def get_data_json(url):
  response = requests.get(url,headers=headers)
  response.encoding = 'utf-8'
  text=response.text
  json_text = re.sub(r'^jsonpCallback\d+\((.*)\)$', r'\1', text)
  json_data = json.loads(json_text)
  return json_data

confluence = Confluence(
    url='http://doc.wangwuli.com:99/',
    username='admin',
    password='wangwuli')


space = "SPC"
status = confluence.get_all_spaces()

now = datetime.now()
# 提取年、月、日
current_year = now.strftime('%Y')
current_month = now.strftime('%Y%m')
current_day = now.strftime('%Y%m%d')

if not confluence.page_exists(space, current_year):
    confluence.create_page(space, current_year, current_year, parent_id=None, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_month):
    current_year_id = confluence.get_page_by_title(space, current_year)
    confluence.create_page(space, current_month, current_month, parent_id=current_year_id, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_day):
    current_month_id = confluence.get_page_by_title(space, current_month)
    confluence.create_page(space, current_day, current_day, parent_id=current_month_id, type='page', representation='storage', editor='v2', full_width=False)

current_day_id = confluence.get_page_by_title(space, current_day)

for keywords_list_one in keywords_list:
     
  # 第一次爬取，获取数量
  init_data_info_json = get_data_json(downlourl(keywords_list_one))

  # 第二次爬取，获取所有数量的列表
  total_size = init_data_info_json['data']['totalSize']
  json_data = get_data_json(downlourl(keywords_list_one, limit=total_size))

  all_down_url = []
  all_down_pdf_title = []
  all_down_pdf_updatetime = []
  all_label = []
  if json_data['data']['knowledgeList'] == None:
     continue
  for knowledgeList_one in json_data['data']['knowledgeList']:
    down_pdf_url = URL + knowledgeList_one['extend'][4]['value']
    down_pdf_title = knowledgeList_one['title']
    down_pdf_updatetime = knowledgeList_one['updateTime'].split()[0]
    # 排除非文件的信息
    if down_pdf_url.endswith(".pdf"):
      all_down_url.append(down_pdf_url)
      all_down_pdf_title.append(down_pdf_title)
      all_down_pdf_updatetime.append(down_pdf_updatetime)
      all_label.append([knowledgeList_one['extend'][8]['value'],knowledgeList_one['extend'][5]['value'], down_pdf_updatetime])


  # 下载文件到本地，并上传到confluence
  # go = False
  for index, value in enumerate(all_down_url):

    # if keywords_list_one == '社会责任'and index == 204:
    #   go = True
      
    # if not go:
    #   continue

    url = value
    # file_name = all_down_pdf_title[index] + ".pdf"
    try:
      file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
    except:
       file_name = all_down_pdf_title[index]  + ".pdf"

    file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
    file_time = all_down_pdf_updatetime[index]

    response = requests.get(url)
    file_path = os.path.join("./tmp", file_name)
    with open(file_path, 'wb') as file:
        file.write(response.content)
    print(f'File {url} downloaded successfully: {file_path}')

    # 定义页面内容
    page_title = file_name
    page_content = f'''
    <h2>{file_time}</h2>
    '''

    # 创建页面
    if not confluence.page_exists(space, page_title):
        page_id_obj = confluence.create_page(space=space, title=page_title, body=page_content, parent_id=current_day_id)
        now_page_id = page_id_obj['id']
        # 上传文件
        confluence.attach_file(file_path, page_id=now_page_id)
        # 设置标签
        for all_label_one in all_label[index]:
            if all_label_one != None :
                confluence.set_page_label(now_page_id, all_label_one)
    else:
        now_page_id = confluence.get_page_by_title(space, page_title)
    
    print(f"confluence page successfully:{page_title}")

#清理临时目录
recreate_directory("./tmp")
