import json
from elasticsearch import Elasticsearch
from urllib.parse import urlsplit
from elasticsearch.helpers import bulk
from args import *

import time
from datetime import datetime
from langdetect import detect
# from pyhanlp import *# 关键词提取
content = ""
return_json = []
def mapping_and_setting():
    print(client.ping())
    if not client.indices.exists(index=INDEX_NAME):
        result = client.indices.create(index=INDEX_NAME, body={**mapping, **settings}, ignore=400)
        if result.get("acknowledged"):
            print("索引创建成功")
        else:
            print("索引创建失败")
            print(result)
    else:
        print("索引无需重复创建！")

def read_all_jsons_path(root_dir):
    import os
    json_file_paths = []

    for item in os.listdir(root_dir):
        item_path = os.path.join(root_dir, item)
        if os.path.isdir(item_path):
            json_file_paths.extend(read_all_jsons_path(item_path))
        else:
            json_file_paths.append(item_path)

    return json_file_paths

def transdate(epoch: str):
    if epoch.count("-") == 2:
        return epoch
    else:
        timestamp = float(epoch) / 1000
        time_array = time.localtime(timestamp)
        return time.strftime("%Y-%m-%d %H:%M:%S", time_array)


video = "http://10.26.32.92:8000/ReadAssist/video/SampleVideo_1280x720_30mb.mp4"
image = "http://10.26.32.92:8000/ReadAssist/image/test12138.jpg"

def preprocess_data(json_data):


    for data in (json_data):
        data[NEWS_SOURCE] = "1"
        if "news_content" not in data or data["news_content"] is None or len(data["news_content"]) == 0:
            continue
        if is_foreign_news(data["news_content"]):
            continue

        if "news_sourcesite" in data:
            data[NEWS_WEBSITE] = data["news_sourcesite"]
        if NEWS_PUBLICTIME in data:
            data[NEWS_PUBLICTIME] = transdate(data[NEWS_PUBLICTIME])
        if "news_publictime_date" not in data and NEWS_PUBLICTIME in data:
            data[NEWS_PUBLICDATE] = data[NEWS_PUBLICTIME].split()[0]
        elif "news_publictime_date" in data:
            data[NEWS_PUBLICDATE] = transdate(data["news_publictime_date"])

        if "indextime" in data:
            data[CRAWL_TIME] = transdate(data["indextime"])

        if NEWS_URL in data:
            data[DOMAIN] = urlsplit(data[NEWS_URL]).hostname

        if NEWS_TITLE in data:
            data[NEWS_TITLE_ZH] = data[NEWS_TITLE]
        if NEWS_CONTENT in data:
            data[NEWS_CONTENT_ZH] = data[NEWS_CONTENT]

        del data["news_sourcesite"]
        del data["indextime"]


def is_foreign_news(text, domestic_language='zh-cn'):
    """
    判断新闻是否是外国新闻。

    :param text: 新闻文本。
    :param domestic_language: 当前国家的语言代码，默认为中文简体（'zh-cn'）。
    :return: 如果新闻是外国新闻，返回True；否则返回False。
    """
    try:
        # 检测文本的语言
        language = detect(text[:100])
        return language != domestic_language
    except Exception as e:
        print(f"Error in language detection: {e}")
        return False

def read_jsons(root_dir):
    all_jsons_path = read_all_jsons_path(root_dir)
    print(f"文件总个数 {len(all_jsons_path)}")
    count = 0
    for file in all_jsons_path:

        with open(file, "r", encoding="utf-8-sig") as f:
            text = [i.strip() for i in f.readlines()]
        json_data: list[dict] = [json.loads(i) for i in text if i != '']
        for i in range(len(json_data)):
            keys = list(json_data[i].keys())
            for k in keys:
                if k not in news_property and k not in ["news_sourcesite", "indextime"]:
                    del json_data[i][k]


        # generate_vector(json_data, file)
        preprocess_data(json_data)
        count += 1
        print(f"第{count}个文档处理完毕")
        print(f"正在上传。。。")
        index(json_data)
        print(f"第{count}个文档上传完毕")

# 分成一个个batch
def split_batch(init_list, batch_size):
    groups = zip(*(iter(init_list),) * batch_size)
    end_list = [list(i) for i in groups]
    count = len(init_list) % batch_size
    end_list.append(init_list[-count:]) if count != 0 else end_list
    return end_list

# 索引入库
def index(json_data):
    flag = False
    for j in json_data:
        temp = []
        temp.append(j)
        try:
            bulk(client, temp, index=INDEX_NAME, chunk_size=BATCH_SIZE_ES, timeout="100s")
        except:
            if not flag:
                print(f"wrong data: {j}")
            flag = True

def index2(json_data):
    for data in json_data:
        try:
            client.index(index=INDEX_NAME, document=data)
        except Exception as e:
            print(e)
            print(f"wrong data\n {data} \n\n")

if __name__ == "__main__":

        client = Elasticsearch(
            hosts=ELASTIC_ADDRESS, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD),
            timeout=10000)
        mapping_and_setting()
        read_jsons(DATA_FILE_PATH)



