import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import requests
from kj_d1.base.session import get_session
from datetime import datetime, timedelta
import json
import random
from urllib.parse import urlencode, unquote, parse_qs
from kj_d1.base.path import ReportInfo
import pandas as pd
import re
import time


now_de = datetime.now()
start_dt = str(now_de - timedelta(days=366))[:10]
end_dt = str(now_de)[:10]


# stock_code = '600732'


def sh_file(stock_code):
    url = 'https://query.sse.com.cn/security/stock/queryCompanyBulletinNew.do'

    param = {
        'jsonCallBack': 'jsonpCallback89435024',
        'isPagination': 'true',
        'pageHelp.pageSize': '25',
        'pageHelp.cacheSize': '1',
        'START_DATE': start_dt,
        'END_DATE': end_dt,
        'SECURITY_CODE': stock_code,
        'TITLE': '',
        'BULLETIN_TYPE': '00,0101,0102,0104,0103',
        'stockType': '',
        'pageHelp.pageNo': '1',
        'pageHelp.beginPage': '1',
        'pageHelp.endPage': '1',
        '_': '1741570657954'
    }

    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
        'referer': 'https://www.sse.com.cn/'
    }

    resp = requests.get(url, headers=headers, params=param)
    res_txt = resp.text
    res_txt = res_txt.replace('jsonpCallback89435024', '')
    res_txt = res_txt[1:-1]
    res_txt = json.loads(res_txt)
    res_txt = res_txt['pageHelp']['data']
    res_txt2 = []
    for x1 in res_txt:
        for x in x1:
            if (x['BULLETIN_TYPE_DESC'] == '年报' or x['BULLETIN_TYPE_DESC'] == '半年报') \
                    and ('年度报告' in x['TITLE'] and '摘要' not in x['TITLE']):
                res_txt2.append(x)
    res_txt2.sort(key=lambda x: x['SSEDATE'], reverse=True)
    res_txt = res_txt2[0]
    file_info = ReportInfo(res_txt['SECURITY_CODE'], res_txt['SECURITY_NAME'], res_txt['TITLE'], res_txt['BULLETIN_TYPE_DESC'])
    pdf_txt = requests.get(f'{headers["referer"]}{res_txt["URL"]}', headers=headers)
    file_nm = file_info.file_path.file_path
    with open(file_nm, 'bw') as f:
        f.write(pdf_txt.content)
    return file_info


def sz_file(stock_code):
    url = f'http://www.szse.cn/api/disc/announcement/annList?random={random.random()}'

    data = {"seDate": [start_dt, end_dt], "stock": [stock_code], "channelCode": ["listedNotice_disc"],
            "bigCategoryId": ["010303", "010301"], "pageSize": 50, "pageNum": 1}

    headers = {
        # POST /api/disc/announcement/annList?random=0.042159722908595265 HTTP/1.1
        'Content-Type': 'application/json',
        'Referer': 'https://www.szse.cn/disclosure/listed/notice/index.html',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
    }

    resp = requests.post(url, headers=headers, data=json.dumps(data))
    res_txt = resp.json()['data']
    res_txt2 = []
    for x in res_txt:
        if '年度报告' in x['title'] and '摘要' not in x['title']:
            res_txt2.append(x)
    res_txt2.sort(key=lambda x: x['publishTime'], reverse=True)
    res_txt = res_txt2[0]
    file_info = ReportInfo(res_txt['secCode'][0], res_txt['secName'][0], res_txt['title'])
    pdf_txt = requests.get(f'http://disc.static.szse.cn/download{res_txt["attachPath"]}', headers=headers)
    file_nm = file_info.file_path.file_path
    with open(file_nm, 'bw') as f:
        f.write(pdf_txt.content)
    return file_info


def bj_file(stock_code):
    url = "https://www.bse.cn/disclosureInfoController/companyAnnouncement.do?callback=jQuery331_1741596466173"

    data = parse_qs(f'disclosureSubtype%5B%5D=9503-1001&disclosureSubtype%5B%5D=9503-1005&disclosureSubtype%5B%5D=9503-1002&disclosureSubtype%5B%5D=9503-1006&page=&companyCd={stock_code}&isNewThree=1&startTime={start_dt}&endTime={end_dt}&keyword=&xxfcbj%5B%5D=2&needFields%5B%5D=companyCd&needFields%5B%5D=companyName&needFields%5B%5D=disclosureTitle&needFields%5B%5D=disclosurePostTitle&needFields%5B%5D=destFilePath&needFields%5B%5D=publishDate&needFields%5B%5D=xxfcbj&needFields%5B%5D=destFilePath&needFields%5B%5D=fileExt&needFields%5B%5D=xxzrlx&sortfield=xxssdq&sorttype=asc')

    headers = {
        # POST /api/disc/announcement/annList?random=0.042159722908595265 HTTP/1.1
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Referer': 'https://www.bse.cn/disclosure/announcement.html',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
    }

    resp = requests.post(url, headers=headers, data=data)
    res_txt = resp.text.replace('jQuery331_1741596466173', '')[1:-1]
    res_txt = json.loads(res_txt)
    res_txt = res_txt[0]
    res_txt = res_txt['listInfo']['content']
    res_txt2 = []
    for x in res_txt:
        if '年度报告' in x['disclosureTitle'] and '摘要' not in x['disclosureTitle']:
            res_txt2.append(x)
    res_txt2.sort(key=lambda x: x['publishDate'], reverse=True)
    res_txt = res_txt2[0]
    file_info = ReportInfo(res_txt['companyCd'], res_txt['companyName'], res_txt['disclosureTitle'])
    pdf_txt = requests.get(f'https://www.bse.cn{res_txt["destFilePath"]}', headers=headers)
    file_nm = file_info.file_path.file_path
    with open(file_nm, 'bw') as f:
        f.write(pdf_txt.content)
    return file_info


# 清理报告标题中的特殊字符
def clean_title(title):
    if isinstance(title, str):
        # 使用正则表达式去除特殊字符和字母，只保留数字、中文和基本标点
        cleaned_title = re.sub(r'[^\d\s\u4e00-\u9fff.,，。、:：()（）\-]', '', title)
        # 替换多个空格为单个空格
        cleaned_title = re.sub(r'\s+', ' ', cleaned_title).strip()
        return cleaned_title
    return title

def hk_file(stock_code, type='annual'):
    file_list = [i for i in os.listdir('kj_d1/Crawl/hk_urls/') if i.endswith('.csv') and type in i]
    if len(file_list) == 0:
        raise('没有找到对应的文件')
    df = pd.read_csv(f'kj_d1/Crawl/hk_urls/{file_list[0]}')
    df["报告标题"] = df["报告标题"].apply(clean_title)

    df['发布日期'] = pd.to_datetime(df['发布日期'])
    focus_df = df[df['股票代码'] == stock_code].sort_values(by='发布日期', ascending=False)
    if not focus_df.empty:
        url = focus_df.iloc[0]['报告链接']
        company_name = focus_df.iloc[0]['股票名称']
        report_type = focus_df.iloc[0]['报告标题']
        file_info = ReportInfo(stock_code, company_name, report_type)
        file_nm = file_info.file_path.file_path
        try:
            # 下载文件
            response = requests.get(url, stream=True)
            response.raise_for_status()  # 检查是否成功
            
            # 保存文件
            with open(file_nm, "wb") as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            
            print(f"已下载: {file_nm}")
            time.sleep(3)  # 添加延迟，避免请求过快

        except Exception as e:
            print(f"下载失败 {stock_code}_{company_name}: {str(e)}")
    else:
        raise('没有找到对应的文件')
    return file_info


def crawl_file(stock_code):
    if '.' in stock_code:
        raise('非法输入！包含后缀.SZ|SH|BJ|HK')
        return
    
    if len(stock_code) == 6:
        if stock_code.startswith('4') or stock_code.startswith('8'):
            exchange = 'bj'
        elif stock_code.startswith('0') or stock_code.startswith('3'):
            exchange = 'sz'
        elif stock_code.startswith('6'):
            exchange = 'sh'
        else:
            raise('请核对交易代码')
    elif len(stock_code) == 5:
        exchange = 'hk'
    else:
        raise('请核对交易代码')

    if exchange == 'sh':
        return sh_file(stock_code)
    elif exchange == 'sz':
        return sz_file(stock_code)
    elif exchange == 'bj':
        return bj_file(stock_code)
    elif exchange == 'hk':
        return hk_file(stock_code)
    else:
        raise ('依据代码无法定位交易所')


if __name__ == '__main__':
    stock_code = '02175'
    print(crawl_file(stock_code))