# 下载http://www.bse.cn/搜索内容的pdf,并上传至confluence
import requests
import json
import os
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from atlassian import Confluence
import shutil

# 获取当前日期
today = datetime.now()
# 计算昨天日期
yesterday = today - timedelta(days=1)
# 将昨天日期格式化为字符串
yesterday_str = yesterday.strftime("%Y%m%d")

# keywords_list = ["社会责任", "可持续发展", "ESG", "环境、社会及治理", "环境、社会及管治"]
keywords_list = ["社會責任", "可持續發展", "ESG", "環境、社會及治理", "環境、社會及管治", "年報"]
URL = 'https://www1.hkexnews.hk/'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}

# 删除再创建目录
def recreate_directory(directory):
    if os.path.exists(directory):
        shutil.rmtree(directory)
    os.makedirs(directory)

# def get_data_json(searchKey, total_records=""):

#   url = "https://www1.hkexnews.hk/search/titlesearch.xhtml"

#   data = {
#     "lang": "ZH",
#     "category": 0,
#     "market": "SEHK",
#     "searchType": 0,
#     "documentType": -1,
#     "from": "20240323",
#     "to": "%s" %yesterday_str,
#     "MB-Daterange": 0,
#     # "title": searchKey,
#     "title": "財務",
#     "t1code": -2,
#     "t2Gcode": -2,
#     "t2code": -2,
#     "stockId": -1,
#     "rowRange": total_records
#   }
  
#   response = requests.post(url, data=data, headers=headers)

#   json_text = response.text


#   soup = BeautifulSoup(json_text, 'html.parser')

#   record_count = soup.find('div', class_='total-records').text.split(' ')[1]
#   if total_records == "":
#      return int(record_count)

#   tbody_content = soup.find('tbody')

#   documents = []

#   for tr in tbody_content.find_all('tr'):
#       release_time = tr.find('td', class_='release-time').text.strip().split(': ')[1]
#       stock_code = tr.find('td', class_='stock-short-code').text.strip().split(': ')[1]
#       stock_name = tr.find('td', class_='stock-short-name').text.strip().split(': ')[1]
#       document_type = tr.find('div', class_='headline').text.strip()
#       document_name = tr.find('div', class_='doc-link').text.strip().split('\n')[0]
#       document_link = tr.find('a')['href']
#       file_size = tr.find('span', class_='attachment_filesize').text

#       document = {
#           "release_time": release_time,
#           "stock_code": stock_code,
#           "stock_name": stock_name,
#           "document_type": document_type,
#           "document_name": document_name,
#           "document_link": document_link,
#           "file_size": file_size
#       }

#       documents.append(document)  

#   return documents
    
def get_data_json(searchKey, total_records=100):
  url = "https://www1.hkexnews.hk/search/titleSearchServlet.do"
  params = {
    'sortDir': 0,
    'sortByOptions': 'DateTime',
    'category': 0,
    'market': 'SEHK',
    'stockId': -1,
    'documentType': -1,
    'fromDate': '%s' %yesterday_str,
    'toDate': '%s' %yesterday_str,
    'title': searchKey,
    'searchType': 0,
    't1code': -2,
    't2Gcode': -2,
    't2code': -2,
    'rowRange': total_records,
    'lang': 'zh'
  } 
  response = requests.get(url, params=params)

  if response.status_code == 200:
      data_str = response.text
      json_data = json.loads(data_str)
      return json_data
  else:
      print("Failed to retrieve data. Status code:", response.status_code)
      return False

confluence = Confluence(
    url='http://doc.wangwuli.com:99/',
    username='admin',
    password='wangwuli')


space = "SPC"
status = confluence.get_all_spaces()

now = datetime.now()
# 提取年、月、日
current_year = now.strftime('%Y')
current_month = now.strftime('%Y%m')
current_day = now.strftime('%Y%m%d')

if not confluence.page_exists(space, current_year):
    confluence.create_page(space, current_year, current_year, parent_id=None, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_month):
    current_year_id = confluence.get_page_by_title(space, current_year)
    confluence.create_page(space, current_month, current_month, parent_id=current_year_id, type='page', representation='storage', editor='v2', full_width=False)
if not confluence.page_exists(space, current_day):
    current_month_id = confluence.get_page_by_title(space, current_month)
    confluence.create_page(space, current_day, current_day, parent_id=current_month_id, type='page', representation='storage', editor='v2', full_width=False)

current_day_id = confluence.get_page_by_title(space, current_day)

for keywords_list_one in keywords_list:
     
  # 第一次爬取，获取数量
  total_records = get_data_json(keywords_list_one)

  # 第二次爬取，基于数量开始循环每一页
  if total_records["recordCnt"] == 0:
     continue
  else:
    all_down_url = []
    all_down_pdf_title = []
    all_down_pdf_text = []
    all_label = []
    # for page_num in range(total_size):
    #     if page_num == 0:
    #        page_num = ""
    json_data = get_data_json(keywords_list_one, total_records["recordCnt"])

    result_data_list = json.loads(json_data['result'])

    for knowledgeList_one in result_data_list:
        down_pdf_url = URL + knowledgeList_one['FILE_LINK']
        down_pdf_title = knowledgeList_one['TITLE']
        # down_pdf_updatetime = knowledgeList_one['DATE_TIME']
        down_pdf_text = knowledgeList_one['LONG_TEXT']

        #转换时间格式
        date_str = knowledgeList_one['DATE_TIME']
        date_obj = datetime.strptime(date_str, '%d/%m/%Y %H:%M')
        down_pdf_updatetime = date_obj.strftime('%Y-%m-%d')

        # 排除非文件的信息
        all_down_url.append(down_pdf_url)
        all_down_pdf_title.append(down_pdf_title)
        all_down_pdf_text.append(down_pdf_text)

        all_label.append([down_pdf_updatetime, knowledgeList_one['STOCK_NAME'], knowledgeList_one['STOCK_CODE']])


  # 下载文件到本地，并上传到confluence
  for index, value in enumerate(all_down_url):

    url = value

    try:
      file_name = all_down_pdf_title[index].split("]")[2]  + ".pdf"
    except:
       file_name = all_down_pdf_title[index]  + ".pdf"

    file_name = ''.join(c for c in file_name if c.isalnum() or c in ['.', '_', '-'])
    pdf_text = all_down_pdf_text[index]

    response = requests.get(url)
    file_path = os.path.join("./tmp", file_name)
    with open(file_path, 'wb') as file:
        file.write(response.content)
    print(f'File {url} downloaded successfully: {file_path}')

    # 定义页面内容
    page_title = file_name
    page_content = f'''
    <h2>{pdf_text}</h2>
    '''

    # 创建页面
    if not confluence.page_exists(space, page_title):
        page_id_obj = confluence.create_page(space=space, title=page_title, body=page_content, parent_id=current_day_id)
        now_page_id = page_id_obj['id']

        # 上传文件
        confluence.attach_file(file_path, page_id=now_page_id)

        # 设置标签
        for all_label_one in all_label[index]:
          confluence.set_page_label(now_page_id, all_label_one)
    else:
        now_page_id = confluence.get_page_by_title(space, page_title)
    
    print(f"confluence page successfully:{page_title}")

#清理临时目录
recreate_directory("./tmp")
