import hashlib
import json
import random
import requests
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify
import feedparser
from elasticsearch import Elasticsearch, helpers
import threading, os
from utils.check_valid import check_valid
from utils.constant import DATA_FOLDER
from datetime import datetime, timedelta

# Elasticsearch服务器配置
es_host = "http://es-cn-27a3j1clw000734tp.public.elasticsearch.aliyuncs.com:9200"
es_username = "elastic"
es_password = "12qwaszxcv@"

# 连接到Elasticsearch
es = Elasticsearch(
    hosts=[es_host],
    basic_auth=(es_username, es_password),
    request_timeout=3600
)


# 百度翻译API
api_url = "http://api.fanyi.baidu.com/api/trans/vip/translate"
app_id = '20231012001844862'  # 替换为您的百度翻译开放平台 APP ID
app_key = 'y8xDHW_3h3sWzE6zypIq'


rssList = {
    "生活": 'https://news.yahoo.co.jp/rss/categories/life.xml',
    "国际": 'https://news.yahoo.co.jp/rss/categories/domestic.xml',
    "国内": 'https://news.yahoo.co.jp/rss/categories/world.xml',
    "经济": 'https://news.yahoo.co.jp/rss/categories/business.xml',
    "娱乐": 'https://news.yahoo.co.jp/rss/categories/entertainment.xml',
    "运动": 'https://news.yahoo.co.jp/rss/categories/sports.xml',
    "IT": 'https://news.yahoo.co.jp/rss/categories/it.xml',
    "科学": 'https://news.yahoo.co.jp/rss/categories/science.xml',
}

linkList = []
titleListJP = []
titleListZH = []
contentListJP = []
hasDone = False  # 判断是否需要开启线程
translateFlag = False  # 判断是否需要翻译标题
updateFlag = False  # 判断是否需要更新
isSearch = False # 判断是否是搜索的结果
lock = threading.Lock()
app = Flask(__name__)


def getMsg(type):
    # RSS源的URL
    rss_url = rssList[type]
    # 使用feedparser获取RSS数据
    feed = feedparser.parse(rss_url)
    global linkList, titleListJP
    titleListJP = [entry.title for entry in feed.entries]
    linkList = [entry.link for entry in feed.entries]


def translate(text, source, target):
    salt = random.randint(32768, 65536)
    sign_str = app_id + text + str(salt) + app_key
    sign = hashlib.md5(sign_str.encode()).hexdigest()
    response = requests.post(api_url, data={
        "q": text,
        "from": source,  # 源语言
        "to": target,  # 目标语言
        "appid": app_id,
        "salt": salt,  # 随机数
        "sign": sign  # 签名
    })
    # 解析API响应
    result = json.loads(response.text)
    translation = ""
    for i in result['trans_result']:
        translation += i['dst']
    return translation


def translateTitle(source, target):
    global titleListZH, translateFlag
    while True:
        if translateFlag:
            translateFlag = False
            titleListZH = []
            salt = random.randint(32768, 65536)
            for text in titleListJP:
                if translateFlag is True:
                    break
                sign_str = app_id + text + str(salt) + app_key
                sign = hashlib.md5(sign_str.encode()).hexdigest()
                response = requests.post(api_url, data={
                    "q": text,
                    "from": source,  # 源语言
                    "to": target,  # 目标语言
                    "appid": app_id,
                    "salt": salt,  # 随机数
                    "sign": sign  # 签名
                })
                # 解析API响应
                result = json.loads(response.text)
                translation = ""
                for res in result['trans_result']:
                    translation += res['dst']
                titleListZH.append(translation)


def update():
    global updateFlag, contentListJP
    while True:
        if updateFlag:
            updateFlag = False
            for i in range(len(titleListJP)):
                # 检查是否存在相同的标题
                query = {
                    "query": {
                        "match": {
                            "title": titleListJP[i]
                        }
                    }
                }
                response = es.search(index="news_", body=query, size=1)
                # 如果没有找到相同的标题，则准备上传
                if response['hits']['total']['value'] == 0:
                    content = getContent(linkList[i])
                    action = {
                        "_index": "news_",  # 索引名称
                        "_source": {
                            "title": titleListJP[i],
                            "content": content
                        }
                    }
                    # 执行批量存储文档的操作
                    helpers.bulk(es, [action])
                else:
                    break


def acquireContent(ID):
    if isSearch:
        return contentListJP[ID]
    else:
        return getContent(linkList[ID])


def getTitle(ID):
    while True:
        if len(titleListZH) > ID:
            return titleListZH[ID]


def getContent(link):
    content = ""
    response = requests.get(link)
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
        content += soup.find(class_='article_body highLightSearchTarget').get_text(separator="\n")
        pageNums = len(soup.findAll(class_='sc-cxxZvF dmcxKh'))
        if pageNums != None:
            for num in range(2, pageNums + 1):
                extraLink = link + f"&page={num}"
                response = requests.get(extraLink)
                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                    content += soup.find(class_='article_body highLightSearchTarget').get_text(separator="\n")
                else:
                    print("请求失败，错误编码:：", response.status_code)
    else:
        print("请求失败，错误编码：", response.status_code)
    return content


def get_count_by_hour(info_path):
    with open(info_path, 'r', encoding='utf-8') as file:
        info_data = json.load(file)
        return int(info_data["count"])

def process_date_folder(date_folder):
    date = os.path.basename(date_folder)
    date_dt = datetime.strptime(date, "%Y-%m-%d")

    daily_counts = 0

    for hour in range(24):
        hour_str = f"{hour:02d}"
        hour_folder = date_folder + "-" + hour_str
        info_path = os.path.join(hour_folder, "info.json")

        if os.path.exists(info_path):
            count = get_count_by_hour(info_path)
            daily_counts= daily_counts + count
        else:
            pass

    return date_dt, daily_counts


def update_or_insert(data, iso_year, iso_month, counts):
    found = False
    for item in data:
        if item['iso_year'] == iso_year and item['iso_month'] == iso_month:
            item['count'] += counts
            found = True
            break
    if not found:
        data.append({'count': counts, 'iso_year': iso_year, 'iso_month': iso_month})

def update_or_insert_weekly(data, iso_year, iso_week, counts):
    found = False
    for item in data:
        if item['iso_year'] == iso_year and item['iso_week'] == iso_week:
            item['count'] += counts
            found = True
            break
    if not found:
        data.append({'count': counts, 'iso_year': iso_year, 'iso_week': iso_week})


@app.route('/api/getNewsList', methods=['GET'])
def getNewsList():
    global translateFlag, updateFlag, isSearch
    category = request.args.get('newsCategory')
    getMsg(category)
    translateFlag = True
    updateFlag = True
    isSearch = False
    data = []
    for i, title in enumerate(titleListJP):
        data.append({"id": i, "title": title})
    return data


@app.route('/api/getNewsData', methods=['GET'])
def getNewsData():
    ID = int(request.args.get('id'))
    title = getTitle(ID)
    content = acquireContent(ID)
    translation = translate(content, "jp", "zh")
    result = {"title": title, "cn": translation, "jp": content}
    return jsonify(result)


@app.route('/api/searchNews', methods=['GET'])
def searchNews():
    keyword = request.args.get('keyword')
    keyword = translate(keyword, "zh", "jp")
    query = {
        "size": 50,
        "query": {
            "match": {
                "title": keyword
            }
        }
    }
    response = es.search(index="news_", body=query)
    global titleListJP, contentListJP, translateFlag, isSearch
    titleListJP = [hit["_source"]["title"] for hit in response['hits']['hits']]
    contentListJP = [hit["_source"]["content"] for hit in response['hits']['hits']]
    translateFlag = True
    isSearch = True
    result = []
    for i in range(len(titleListJP)):
        result.append({"id": i, "title": titleListJP[i]})
    return jsonify(result)


@app.route('/api/translate', methods=['POST'])
def getTitleZH():
    data = request.get_json()
    start = data["idStart"]
    end = data["idEnd"] + 1
    while True:
        if len(titleListZH) >= end:
            data = []
            for i, title in enumerate(titleListZH[start:end], start):
                data.append({"id": i, "title": title})
            return data

@app.route('/api/daily', methods=['GET'])
def daily():
    from_date_str = request.args.get('from', None)
    to_date_str = request.args.get('to', None)
    valid = check_valid(from_date_str, to_date_str)
    if valid == 2: return jsonify({'error': "index out of range"})

    from_date_str = from_date_str[:-3]
    to_date_str = to_date_str[:-3]

    # 将字符串转换为 datetime 对象
    from_date = datetime.strptime(from_date_str, "%Y-%m-%d")
    to_date = datetime.strptime(to_date_str, "%Y-%m-%d")

    # 存储每天的统计数据
    daily_counts = []

    current_date = from_date

    while current_date <= to_date:
        date_folder = os.path.join(DATA_FOLDER, current_date.strftime("%Y-%m-%d"))

        date_dt, daily_counts_item = process_date_folder(date_folder)
        daily_counts.append({'date': date_dt.strftime('%Y-%m-%d'), 'count': daily_counts_item})

        current_date += timedelta(days=1)

    # 按照 daily_counts 键降序排序
    sorted_data = sorted(daily_counts, key=lambda x: x["date"])
    
    return jsonify({
        'result': "success",
        'from': from_date_str,
        'to': to_date_str,
        'data': sorted_data
    }), 200


@app.route('/api/weekly', methods=['GET'])
def weekly():
    from_date_str = request.args.get('from', None)
    to_date_str = request.args.get('to', None)

    valid = check_valid(from_date_str, to_date_str)
    if valid == 2: return jsonify({'error': "index out of range"})


    # deal with data to every week
    from_date_str = from_date_str[:-3]
    to_date_str = to_date_str[:-3]

    from_date = datetime.strptime(from_date_str, "%Y-%m-%d")
    to_date = datetime.strptime(to_date_str, "%Y-%m-%d")

    data = []

    current_date = from_date

    while current_date <= to_date:
        date_folder = os.path.join(DATA_FOLDER, current_date.strftime("%Y-%m-%d"))

        date_dt, daily_counts = process_date_folder(date_folder)
        iso_year, iso_week, _ = date_dt.isocalendar()

        update_or_insert_weekly(data, iso_year, iso_week, daily_counts)

        current_date += timedelta(days=1)

    data_str = [{key: str(value) for key, value in d.items()} for d in data]

    sorted_data = sorted(data_str, key=lambda x: (x['iso_year'], x['iso_week']))

    return jsonify({
        'result': "success",
        'from': from_date_str,
        'to': to_date_str,
        'data': sorted_data
    })


@app.route('/api/monthly', methods=['GET'])
def monthly():
    from_date_str = request.args.get('from', None)
    to_date_str = request.args.get('to', None)

    valid = check_valid(from_date_str, to_date_str)
    if valid == 2: return jsonify({'error': "index out of range"})

    # deal with data to every week
    from_date_str = from_date_str[:-3]
    to_date_str = to_date_str[:-3]

    from_date = datetime.strptime(from_date_str, "%Y-%m-%d")
    to_date = datetime.strptime(to_date_str, "%Y-%m-%d")

    data = []

    current_date = from_date

    while current_date <= to_date:
        date_folder = os.path.join(DATA_FOLDER, current_date.strftime("%Y-%m-%d"))

        date_dt, daily_counts = process_date_folder(date_folder)
        iso_year, iso_week, _ = date_dt.isocalendar()
        iso_month = date_dt.month

        update_or_insert(data, iso_year, iso_month, daily_counts)

        current_date += timedelta(days=1)

    data_str = [{key: str(value) for key, value in d.items()} for d in data]

    sorted_data = sorted(data_str, key=lambda x: (x['iso_year'], x['iso_month']))

    return jsonify({
        'result': "success",
        'from': from_date_str,
        'to': to_date_str,
        'data': sorted_data
    }), 200
    

@app.route('/api/subscribe/', methods=['POST'])
def subscribe():
    data = request.get_json()
    phone = data.get("phone")
    category = data.get("category")

    filename = "records.json"  # 请将路径更改为实际的文件路径

    if not phone or not category:
        return jsonify({'result': 'fail', 'msg': '缺少必要的参数'}), 400

    if not os.path.exists(filename):
        with open(filename, 'w') as f:
            json.dump({}, f)

    if category not in rssList:
        return jsonify({'result': "fail", 'msg': "栏目不存在"}), 400

    if len(phone) == 11:
        phone = "+86" + phone

    if len(phone) != 14:
        return jsonify({'result': "fail", 'msg': "手机号不合法"}), 400

    records = []
    with open(filename, 'r', encoding='utf-8') as f:
        data = json.load(f)
        if isinstance(data, list):
            records = data
        else:
            print("Warning: File content is not a list.")
            records = []

    existing_phones = [record.get("phone") for record in records]

    if phone in existing_phones:
        for record in records:
            if record['phone'] == phone:
                if record['category'] != category:
                    record['category'] = category
                    with open(filename, 'w', encoding='utf-8') as f:
                        json.dump(records, f, ensure_ascii=False, indent=4)

                    return jsonify({'result': "success"}), 200
                else:
                    return jsonify({'result': "success"}), 200

    records.append({'phone': phone, 'category': category})
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(records, f, ensure_ascii=False, indent=4)

    return jsonify({'result': "success"}), 200



def main():
    thread1 = threading.Thread(target=translateTitle, args=("jp", "zh"))
    thread1.daemon = True  # 将线程设置为守护线程
    thread1.start()
    thread2 = threading.Thread(target=update)
    thread2.daemon = True  # 将线程设置为守护线程
    thread2.start()
    app.run(host='0.0.0.0', port=8088, debug=True)


main()

