# _*_ coding:utf-8 _*_
# @Time  : 2022-02-15
# @Author: zizle


# 获取期货日报的文章数据和PDF数据
import random
import time
import json
import datetime
import pathlib
import requests
import urllib3
from loguru import logger
urllib3.disable_warnings()

logger.add('error.log')

# 获取列表

LOGIN_COOKIE = "ASP.NET_SessionId=jrmfo121jsnmcfghb00xzfue; FuturesDaily_Mac=2E28C9424E003194DB382B68C66CF0F6; FuturesDaily_Licence=3DBFA84844D0A476D9417D980C66ADA4; FuturesDaily_2018/*=7E00C1AEB6152D864DEA6E66A29BE9DB2701FE629066F9A5335018BF0A0098536F80CEDE8FC17647A2FDFD4599C1F86B1B20CBC5BC4C36BE8794CAD1A4BA60031D3F11C6909E290086DB99895BB432BC7D957524C59C72ACADF2472AC1857B2FDCB6F27476E13A09DA9FA1C56932728A3FADEAAAB8A8A856E33CE9565A2AD70F5F056003C17483F7F9358DA5B22CF1B144F75BB4D05502843802B2318A203B8C54C178F5B6B1385B6997F5086A93944194AAE7349A8065093B8318DED6BA60C0E89C5195B37901F1707D52F0D69C41E349A731ADF2C72BBE19CBCBAA61D7133E7494A12DC074FBA1"
USE_COOKIE = False
USE_PROXIES = {'https': '221.4.241.198:9091'} if USE_COOKIE else False


def get_grid_json(today, page):
    grid_url = 'http://paper.7h365.com/Members/GetGridJson'
    headers = {
        'Host': 'paper.7h365.com',
        'Connection': 'keep-alive',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'Referer': 'http://paper.7h365.com/Members/MemberIndex',
        'Accept-Encoding': 'gzip,deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
    }
    if USE_COOKIE:
        headers['Cookie'] = LOGIN_COOKIE
    now_ts = int(datetime.datetime.now().timestamp() * 1000)
    # today = datetime.datetime.today().strftime('%Y-%m-%d')
    # 注意这里的v_parms不是v_params
    s = '{"v_num":"","v_author":"","v_date":"' + today + '","v_keyword":"","v_parms":"searchclick"}'
    params = {
        '_search': 'false',
        'nd': now_ts,
        'rows': 50,
        'page': page,
        'sidx': 'Num desc,Layout asc',
        'sord': 'asc',
        'queryJson': s
    }
    try:
        if USE_PROXIES:
            r = requests.get(url=grid_url, headers=headers, params=params, proxies=USE_PROXIES)
        else:
            r = requests.get(url=grid_url, headers=headers, params=params)
        resp = r.json()
    except Exception as e:
        print(f'发起请求数据失败{e}')
        logger.error(f'发起请求数据失败{e}')
    else:
        # 将数据保存下来
        with open(f'{today}_{page}.json', 'w', encoding='utf8') as fp:
            json.dump(resp, fp, indent=4)
        print(f'保存{today}_{page}的数据列表成功!')
        if page < resp['total']:
            get_grid_json(today, page+1)


def get_article_content(today, page):
    with open(f'{today}_{page}.json', 'r') as fp:
        content = json.load(fp)
    total_page = content['total']
    records = content['records']
    print(f'{today} 第{page}页数据: 文章{len(content["rows"])}条。')
    datetime_obj = datetime.datetime.strptime(today, '%Y-%m-%d')
    # folder = 'DailyArticle/%04d/%02d/%02d' % (datetime_obj.year, datetime_obj.month, datetime_obj.day)
    folder = 'DailyArticle/%04d/%s' % (datetime_obj.year, datetime_obj.strftime('%Y-%m-%d'))
    save_folder = pathlib.Path(folder)
    if not save_folder.exists():
        save_folder.mkdir(parents=True)
    for row in content['rows']:
        file_savepath = save_folder.joinpath(f'{today}_{row["Id"]}.json')
        request_article_content(row['Id'], file_savepath)
        if USE_COOKIE:
            sleep_time = random.randint(1, 3)
            time.sleep(sleep_time)
        else:
            time.sleep(2)

    if total_page > page:
        get_article_content(today, page+1)


def request_article_content(article_id, save_path):
    article_url = 'http://paper.7h365.com/Members/Article_Show'
    params = {
        'keyValue': article_id
    }
    headers = {
        'Host': 'paper.7h365.com',
        'Connection': 'keep-alive',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'Referer': 'http://paper.7h365.com/Members/MemberIndex',
        'Accept-Encoding': 'gzip,deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    }
    if USE_COOKIE:
        headers['Cookie'] = LOGIN_COOKIE
    try:
        if USE_PROXIES:
            r = requests.get(article_url, params=params, headers=headers, proxies=USE_PROXIES)
        else:
            r = requests.get(article_url, params=params, headers=headers)
        # print('----' * 20)
        # print(r.text)
        # print('----' * 20)
        content = json.loads(r.text, strict=False)
        # print(article_url)
        # print(content)
        with open(save_path, 'w', encoding='utf8') as fp:
            json.dump(content, fp, indent=4)
    except Exception as e:
        print(f'获取{save_path}失败：{e}')
        logger.error(f'获取{save_path}失败：{e}')
    else:
        print(f'保存{save_path}成功！')


def get_pdf_file(today, page):   # 获取PDF文件
    base_url = 'http://paper.7h365.com/Content'
    with open(f'{today}_{page}.json', 'r') as fp:
        content = json.load(fp)
    total_page = content['total']
    records = content['records']

    pdf_files = [item['PdfAdress'] for item in content['rows']]
    pdf_files = list(set(pdf_files))
    print(f'{today} 第{page}页PDF版面{len(pdf_files)}个。')
    if len(pdf_files) % 2 == 1:
        logger.info(f'{today} PDF 版面 {len(pdf_files)} 个!')
    datetime_obj = datetime.datetime.strptime(today, '%Y-%m-%d')
    # folder = 'DailyPdf/%04d/%02d/%02d' % (datetime_obj.year, datetime_obj.month, datetime_obj.day)
    folder = 'DailyPdf/%04d/%s' % (datetime_obj.year, datetime_obj.strftime('%Y-%m-%d'))
    save_folder = pathlib.Path(folder)
    if not save_folder.exists():
        save_folder.mkdir(parents=True)
    for f_url in pdf_files:
        remote_url = base_url + f_url
        file_savepath = save_folder.joinpath(f'{today}-{pathlib.Path(f_url).name}')
        request_pdf_file(remote_url, save_path=str(file_savepath))
        if USE_COOKIE:
            sleep_time = random.randint(1, 3)
            time.sleep(sleep_time)
        else:
            time.sleep(2)

    if total_page > page:
        get_pdf_file(today, page+1)


def request_pdf_file(remote_url, save_path):
    headers = {
        'Host': 'paper.7h365.com',
        'Connection': 'keep-alive',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest',
        'Referer': 'http://paper.7h365.com/Members/MemberIndex',
        'Accept-Encoding': 'gzip,deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    }
    if USE_COOKIE:
        headers["Cookie"] = LOGIN_COOKIE
    try:
        if USE_PROXIES:
            r = requests.get(url=remote_url, headers=headers, proxies=USE_PROXIES)
        else:
            r = requests.get(url=remote_url, headers=headers)
        with open(save_path, 'wb') as fp:
            fp.write(r.content)
    except Exception as e:
        print(f'获取{save_path}失败：{e}')
        logger.error(f'获取{save_path}失败：{e}')
    else:
        print(f'保存{save_path}成功！')


def parse_article_to_server(today, page):
    with open(f'{today}_{page}.json', 'r') as fp:
        content = json.load(fp)
    total_page = content['total']
    datetime_obj = datetime.datetime.strptime(today, '%Y-%m-%d')
    # folder = 'DailyArticle/%04d/%02d/%02d' % (datetime_obj.year, datetime_obj.month, datetime_obj.day)
    folder = 'DailyArticle/%04d/%s' % (datetime_obj.year, datetime_obj.strftime('%Y-%m-%d'))
    save_folder = pathlib.Path(folder)
    for row in content['rows']:
        filepath = save_folder.joinpath(f'{today}_{row["Id"]}.json')
        with open(filepath, 'r') as f:
            file_content = json.load(f)[0]
        # 读取内容和create_time
        article_data = {
            'create_time': row["CreateTime"],
            'raw_time': file_content["createtime"].replace('/', '-'),
            'title': file_content['title'],
            'authors': file_content['author'],
            'words': file_content['wordnum'],
            'issue_num': row["Num"],
            'layout': row["Layout"],
            'is_special': row['IsSpecial'],
            'content': file_content['content']
        }

        if SERVER_SAVE:
            save_article_to_serve(article_data)  # 保存到服务器
            time.sleep(1)
        else:
            parser_article_to_file(article_data, today)  # 保存到本地HTML
            time.sleep(0.5)
    if total_page > page:
        parse_article_to_server(today, page+1)


def save_article_to_serve(article_data):
    # base_url = 'http://127.0.0.1:8000/admin/qhrb/article/'
    base_url = 'https://210.13.218.130:9000/v1/qhrb/article/'
    try:
        r = requests.post(base_url, json=article_data, verify=False)
        resp = r.json()
    except Exception as e:
        print(f'发起保存{article_data["create_time"]}-{article_data["title"]}失败...{e}')
        print(r.text)
    else:
        if resp.get('code', 0) == 201:
            print(f'服务器保存{article_data["create_time"]}-{article_data["title"]}  成功!')
        else:
            print(f'服务器保存{article_data["create_time"]}-{article_data["title"]}  失败...')


def parser_article_to_file(article_data, today):
    folder = pathlib.Path(f'DailyArticle/readable/{today[:4]}/{today}/')
    html_filename = f'{article_data["layout"]}_{article_data["title"].replace("/", "")}_{article_data["authors"]}.html'
    html_filename = html_filename.replace('?', '')  # 文件不支持问号
    # 保存为html
    if not folder.exists():
        folder.mkdir(parents=True)
    html_content = '<div style=margin-left:30px;margin-right:30px>'
    html_content += f'<p style=font-size:24px;text-align:center>{article_data["title"]}</p>'
    html_content += f'<div style="text-align: right;margin-right: 10%;font-size:14px; color:#363662; margin-left:10px; margin-top:20px;"><span style="margin-right:20px;" v-show="dialogArticle.authors">作者：{ article_data["authors"] }</span><span>发布时间：{ article_data["raw_time"] }</span></div>'
    html_content += article_data["content"] + '</div>'
    with open(folder.joinpath(html_filename), 'w', encoding='utf8') as html:
        html.write(html_content)
    print(f'本地保存 {article_data["title"]} HTML可读文件成功!')


def save_layout_to_server(today):
    base_url = 'https://210.13.218.130:9000/v1/qhrb/layout/'
    # base_url = 'http://127.0.0.1:8000/v1/qhrb/layout/'
    datetime_obj = datetime.datetime.strptime(today, '%Y-%m-%d')
    # folder = 'DailyPdf/%04d/%02d/%02d' % (datetime_obj.year, datetime_obj.month, datetime_obj.day)
    folder = 'DailyPdf/%04d/%s' % (datetime_obj.year, datetime_obj.strftime('%Y-%m-%d'))
    save_folder = pathlib.Path(folder)
    for file in save_folder.glob('*.pdf'):
        # 此处上传
        layout_file = open(file, 'rb')
        try:
            r = requests.post(base_url, data={'date': today}, files={'layout_file': layout_file}, verify=False)
            layout_file.close()
            if r.status_code != 200:
                raise ValueError(f'状态码 - {r.status_code}')
            if r.json()['code'] != 201:
                raise ValueError(f'服务返回状态码 - {r.json()["code"]}')
        except Exception as e:
            print(f'上传版面文件{file} 出错了：{e}')
        else:
            print(f'上传版面文件{file} 成功!')
        time.sleep(3)


def main(day=None, is_request=True):
    day = day if day else datetime.datetime.today().strftime('%Y-%m-%d')
    if SERVER_SAVE:
        save_layout_to_server(today=day)  # 保存版面文件到服务器
    else:
        if is_request:
            get_grid_json(today=day, page=1)  # 获取列表数据
            get_article_content(today=day, page=1)  # 获取文章内容
            get_pdf_file(today=day, page=1)  # 获取版面PDF

    parse_article_to_server(today=day, page=1)  # 保存文章内容到本地和服务器


def generate_date(start, end):
    date_list = []
    start = datetime.datetime.strptime(start, '%Y-%m-%d')
    end = datetime.datetime.strptime(end, '%Y-%m-%d')
    while start <= end:
        if start.weekday() < 5:
            date_list.append(start.strftime('%Y-%m-%d'))
        start += datetime.timedelta(days=1)
    return date_list


step_days = []
other_days = []

if __name__ == '__main__':
    # 2023-01-30,2023-01-31,2023-02-24,2023-06-08,2023-06-15,2023-06-16,2023-08-07,2023-08-15的数据丢失，无法获取了。。

    # SERVER_SAVE = 1  # 是否保存到服务器
    # REQUEST = 0
    # main(is_request=False)

    while 1:
        if datetime.datetime.now().strftime('%H:%M') == '07:07' and datetime.datetime.today().weekday() <= 4:
            print(datetime.datetime.now())
            SERVER_SAVE = 0  # 是否保存到服务器
            REQUEST = not SERVER_SAVE
            main(day=None, is_request=REQUEST)
            SERVER_SAVE = 1  # 是否保存到服务器
            REQUEST = not SERVER_SAVE
            main(day=None, is_request=REQUEST)
            print('{}处理完成!'.format(datetime.datetime.now()))
        time.sleep(20)
