import json
from elasticsearch import Elasticsearch
from urllib.parse import urlsplit
from elasticsearch.helpers import bulk
from args import *

import time
from datetime import datetime
# from pyhanlp import *# 关键词提取
content = ""
return_json = []
def mapping_and_setting():
    print(client.ping())
    if not client.indices.exists(index=INDEX_NAME):
        result = client.indices.create(index=INDEX_NAME, body={**mapping, **settings}, ignore=400)
        if result.get("acknowledged"):
            print("索引创建成功")
        else:
            print("索引创建失败")
            print(result)
    else:
        print("索引无需重复创建！")

def read_all_jsons_path(root_dir):
    import os
    json_file_paths = []

    for item in os.listdir(root_dir):
        item_path = os.path.join(root_dir, item)
        if os.path.isdir(item_path):
            json_file_paths.extend(read_all_jsons_path(item_path))
        else:
            json_file_paths.append(item_path)

    return json_file_paths

def transdate(epoch: str):
    if epoch.count("-") == 2:
        return epoch
    else:
        timestamp = float(epoch) / 1000
        time_array = time.localtime(timestamp)
        return time.strftime("%Y-%m-%d %H:%M:%S", time_array)


video = "http://10.26.32.92:8000/ReadAssist/video/SampleVideo_1280x720_30mb.mp4"
image = "http://10.26.32.92:8000/ReadAssist/image/test12138.jpg"
# 保持原始的日期
def preprocess_data(json_data):
    for data in (json_data):
        data[NEWS_SOURCE] = "1"
        if "news_sourcesite" in data:
            data[NEWS_WEBSITE] = data["news_sourcesite"]
        if NEWS_PUBLICTIME in data:
            data[NEWS_PUBLICTIME] = transdate(data[NEWS_PUBLICTIME])
        if "news_publictime_date" not in data and NEWS_PUBLICTIME in data:
            data[NEWS_PUBLICDATE] = data[NEWS_PUBLICTIME].split()[0]
        elif "news_publictime_date" in data:
            data[NEWS_PUBLICDATE] = transdate(data["news_publictime_date"])

        if "indextime" in data:
            data[CRAWL_TIME] = transdate(data["indextime"])

        if NEWS_URL in data:
            data[DOMAIN] = urlsplit(data[NEWS_URL]).hostname

        if NEWS_TITLE in data:
            data[NEWS_TITLE_ZH] = data[NEWS_TITLE]
        if NEWS_CONTENT in data:
            data[NEWS_CONTENT_ZH] = data[NEWS_CONTENT]

        del data["news_sourcesite"]
        del data["indextime"]


# 修改了原始日期
def preprocess_data2(json_data):
    for data in (json_data):
        data[NEWS_SOURCE] = "1"
        if "news_sourcesite" in data:
            data[NEWS_WEBSITE] = data["news_sourcesite"]
        data[NEWS_PUBLICDATE] = generate_date()
        data[NEWS_PUBLICTIME] = data[NEWS_PUBLICDATE] + " " + generate_time()

        if "indextime" in data:
            data[CRAWL_TIME] = data[NEWS_PUBLICTIME]

        if NEWS_URL in data:
            data[DOMAIN] = urlsplit(data[NEWS_URL]).hostname

        if NEWS_TITLE in data:
            data[NEWS_TITLE_ZH] = data[NEWS_TITLE]
        if NEWS_CONTENT in data:
            data[NEWS_CONTENT_ZH] = data[NEWS_CONTENT]

        del data["news_sourcesite"]
        del data["indextime"]


from datetime import datetime, timedelta
import random

def generate_date():
    start_date = datetime(1960, 1, 1)
    end_date = datetime(2024, 10, 20)
    total_days = (end_date - start_date).days

    # Generate a random date within the range
    random_days = random.randint(0, total_days)
    random_date = start_date + timedelta(days=random_days)
    return random_date.strftime("%Y-%m-%d")

def generate_time():
    random_hour = random.randint(0, 23)
    random_minute = random.randint(0, 59)
    random_second = random.randint(0, 59)

    # Format the random time to "hh:mm:ss"
    random_time = f"{random_hour:02}:{random_minute:02}:{random_second:02}"
    return random_time


def read_jsons(root_dir):
    all_jsons_path = read_all_jsons_path(root_dir)
    print(f"文件总个数 {len(all_jsons_path)}")
    count = 0
    for file in all_jsons_path:

        with open(file, "r", encoding="utf-8-sig") as f:
            text = [i.strip() for i in f.readlines()]
        json_data: list[dict] = [json.loads(i) for i in text if i != '']
        for i in range(len(json_data)):
            keys = list(json_data[i].keys())
            for k in keys:
                if k not in news_property and k not in ["news_sourcesite", "indextime"]:
                    del json_data[i][k]


        # generate_vector(json_data, file)
        preprocess_data2(json_data)
        count += 1
        print(f"第{count}个文档处理完毕")
        print(f"正在上传。。。")
        index2(json_data)
        print(f"第{count}个文档上传完毕")

# 分成一个个batch
def split_batch(init_list, batch_size):
    groups = zip(*(iter(init_list),) * batch_size)
    end_list = [list(i) for i in groups]
    count = len(init_list) % batch_size
    end_list.append(init_list[-count:]) if count != 0 else end_list
    return end_list

# 索引入库
# def index(json_data):
#     flag = False
#     for j in json_data:
#         temp = []
#         temp.append(j)
#         try:
#             bulk(client, temp, index=INDEX_NAME, chunk_size=BATCH_SIZE_ES, timeout="100s")
#         except:
#             if not flag:
#                 print(f"wrong data: {j}")
#             flag = True
    # success, _ = bulk(client, json_data, index=INDEX_NAME, chunk_size=BATCH_SIZE_ES, timeout="100s")
    # print(f'{success}条记录成功插入')

def index2(json_data):
    for data in json_data:
        try:
            client.index(index=INDEX_NAME, document=data)
        except Exception as e:
            print(e)
            print(f"wrong data\n {data} \n\n")

if __name__ == "__main__":
        client = Elasticsearch(
            hosts=ELASTIC_ADDRESS, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD),
            timeout=10000)
        mapping_and_setting()
        for i in range(20):
            read_jsons(DATA_FILE_PATH)




