#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Create by zhang
# Create on 2022/6/29 21:12
import os
import re
import math
import json
from time import sleep
from typing import List

import requests
from copy import deepcopy

from config.config import config
from domain.report.common.period_enum import PeriodEnum
from domain.report.common.period_mapper import PeriodMapper
from infrastructure.util.dateutils import get_current_year, get_dateStr_from_dateTime, today, DateFormat

report_base_path = config.get("data", "report_pdf")


URL = 'http://www.szse.cn/api/disc/announcement/annList'

HEADER = {
    'Host': 'www.szse.cn',
    'Origin': 'http://www.szse.cn',
    'Referer': 'http://www.szse.cn/disclosure/listed/fixed/index.html',
    'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 "
                  "(KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
    'Content-Type': 'application/json',
    'Connection': 'keep-alive',
    'X-Request-Type': 'ajax',
    'X-Requested-With': 'XMLHttpRequest',
}

PAGE_SIZE = 30

PAYLOAD = {
    'channelCode': ["fixed_disc"],
    'pageNum': 1,
    'pageSize': PAGE_SIZE,
    'seDate': ["", ""],
    'stock': ["000001"],
}

PDF_URL_PREFIX = 'http://disc.static.szse.cn/download'

file_name_pattern = "{}_{}_{}_{}"


def get_year_period(title:str):
    pattern1 = re.compile(r'.*(\d{4})年.*(半年度)报告(摘要)*')
    pattern2 = re.compile(r'.*(\d{4})年.*(一季度|三季度|年度)报告(摘要)*')
    matcher = pattern1.match(title)
    if matcher is not None:
        return matcher.groups()
    return pattern2.match(title).groups()


def _get_pdf_url(code, begin_date, end_date, period:PeriodEnum=PeriodEnum.all):
    pdf_urls = []
    payload = deepcopy(PAYLOAD)
    payload['stock'] = [code]
    payload['seDate'] = [begin_date, end_date]
    res = requests.post(URL, data=json.dumps(payload), headers=HEADER).json()
    for i in res['data']:
        year, period_str, summary = get_year_period(i['title'])
        if summary is not None:
            continue
        if period_str in PeriodMapper.get_zh_period(period):
            file_name = file_name_pattern.format(code, period_str, year, i['publishTime'].split(" ")[0].replace("-", ""))
            # file_name = '_'.join([i['title'], ''.join(i['publishTime'].split()[0].split('-'))])
            pdf_url = PDF_URL_PREFIX + i['attachPath']
            pdf_urls.append((file_name, pdf_url))
    page_count = math.ceil(res['announceCount'] / PAGE_SIZE)
    for j in range(page_count - 1):
        sleep(1) # 睡眠
        payload['pageNum'] = j + 2
        res = requests.post(URL, data=json.dumps(payload), headers=HEADER).json()
        for i in res['data']:
            if period_str in PeriodMapper.get_zh_period(period):
                file_name = file_name_pattern.format(code, period_str, year, i['publishTime'].split(" ")[0].replace("-", ""))
                # file_name = '_'.join([i['title'], ''.join(i['publishTime'].split()[0].split('-'))])
                pdf_url = PDF_URL_PREFIX + i['attachPath']
                pdf_urls.append((file_name, pdf_url))
    return pdf_urls


def _save_pdf(code, begin_date='', end_date='', period:PeriodEnum=PeriodEnum.all):
    pdf_urls = _get_pdf_url(code, begin_date, end_date, period)
    file_path = os.path.join(report_base_path, code)
    saved_names = list()
    if not os.path.isdir(file_path):
        os.makedirs(file_path)
    count = 0
    for file_name, url in pdf_urls:
        if count > 0:
            sleep(3)
        extension = url.split('.')[-1].lower()
        file_full_name = os.path.join(file_path, '.'.join([file_name, extension])).replace('*', '')
        if os.path.exists(file_full_name):
            continue

        rs = requests.get(url, stream=True)
        retry_num = 0
        if (rs is None or rs.status_code != 200) and retry_num < 2:
            sleep(3)
            rs = requests.get(url, stream=True)
        if rs is None or rs.status_code != 200:
            continue
        with open(file_full_name, "wb") as fp:
            count = 0
            for chunk in rs.iter_content(chunk_size=10240):
                if chunk:
                    fp.write(chunk)
        saved_names.append(file_name)
        count += 1


def download_report(stock_id:str, year:int, period:PeriodEnum=PeriodEnum.year) -> List[str]:
    if stock_id is None or len(stock_id) == 0 or year < 1990 or year > get_current_year():
        return list()
    if period is None:
        period = PeriodEnum.all
    stock_id = stock_id.split(".")[0]
    begin_date = f"{year}-01-01"
    end_date = get_dateStr_from_dateTime(today(), dateFormat=DateFormat.Y_m_d)
    # pdf_urls = _get_pdf_url(stock_id, begin_date, end_date)
    saved_names = _save_pdf(stock_id, begin_date=begin_date, end_date=end_date)
    return saved_names
