
import json
from elasticsearch import Elasticsearch
from urllib.parse import urlsplit
from elasticsearch.helpers import bulk
from args import *

import time
from datetime import datetime
# from pyhanlp import *# 关键词提取
content = ""
return_json = []
def mapping_and_setting():
    print(client.ping())
    if not client.indices.exists(index=INDEX_NAME):
        result = client.indices.create(index=INDEX_NAME, body={**mapping, **settings}, ignore=400)
        if result.get("acknowledged"):
            print("索引创建成功")
        else:
            print("索引创建失败")
            print(result)
    else:
        print("索引无需重复创建！")

def read_all_jsons_path(root_dir):
    import os
    json_file_paths = []

    for item in os.listdir(root_dir):
        item_path = os.path.join(root_dir, item)
        if os.path.isdir(item_path):
            json_file_paths.extend(read_all_jsons_path(item_path))
        else:
            json_file_paths.append(item_path)

    return json_file_paths

def transdate(epoch: str):
    if epoch.count("-") == 2:
        return epoch
    else:
        timestamp = float(epoch) / 1000
        time_array = time.localtime(timestamp)
        return time.strftime("%Y-%m-%d %H:%M:%S", time_array)


video = "http://10.26.32.92:8000/ReadAssist/video/SampleVideo_1280x720_30mb.mp4"
image = "http://10.26.32.92:8000/ReadAssist/image/test12138.jpg"
category = ["中国", "美国", "英国", "日本", "俄罗斯", "乌克兰", "泰国"]
source = ["安保", "会计", "后勤", "财务", "法务", "战略"]
uids = ['1', '2', '3', '4', '5', '6', '7', '8']
def transdate(epoch: str):
    if epoch.count("-") == 2:
        return epoch
    else:
        timestamp = float(epoch) / 1000
        time_array = time.localtime(timestamp)
        return time.strftime("%Y-%m-%d %H:%M:%S", time_array)
# 保持原始的日期
def preprocess_data(json_data):
    for data in (json_data):

        if 'id' in data:
            id = data['id']
        else:
            id = ''

        material_title = data['news_title'] if 'news_title' in data else ""
        material_content = data['news_content'] if 'news_content' in data else ''
        material_time = transdate(data['news_publictime']) if 'news_publictime' in data else '2020-01-01 00:00:00'
        material_date = material_time.split()[0]
        material_summary = data['news_summary'] if 'news_summary' in data else ''
        material_category = category[random.randint(0, len(category) - 1)]
        material_source = source[random.randint(0, len(source) - 1)]
        available_uid = [i for i in uids]
        available_uid.pop(random.randint(0, len(available_uid) - 1))
        material_keywords = data['news_keywords'] if 'news_keywords' in data else None
        data.clear()
        data['id'] = id
        data['material_title'] = material_title
        data['material_content'] = material_content
        data['material_time'] = material_time
        data['material_date'] = material_date
        data['material_summary'] = material_summary
        data['material_category'] = material_category
        data['material_category'] = material_category
        data['available_uid'] = available_uid
        data['material_keywords'] = material_keywords
        data['material_source'] = material_source


from datetime import datetime, timedelta
import random

def generate_date():
    start_date = datetime(1960, 1, 1)
    end_date = datetime(2024, 10, 20)
    total_days = (end_date - start_date).days

    # Generate a random date within the range
    random_days = random.randint(0, total_days)
    random_date = start_date + timedelta(days=random_days)
    return random_date.strftime("%Y-%m-%d")

def generate_time():
    random_hour = random.randint(0, 23)
    random_minute = random.randint(0, 59)
    random_second = random.randint(0, 59)

    # Format the random time to "hh:mm:ss"
    random_time = f"{random_hour:02}:{random_minute:02}:{random_second:02}"
    return random_time


def read_jsons(root_dir):
    all_jsons_path = read_all_jsons_path(root_dir)
    print(f"文件总个数 {len(all_jsons_path)}")
    count = 0
    for file in all_jsons_path:

        with open(file, "r", encoding="utf-8-sig") as f:
            text = [i.strip() for i in f.readlines()]
        json_data: list[dict] = [json.loads(i) for i in text if i != '']
        for i in range(len(json_data)):
            keys = list(json_data[i].keys())



        # generate_vector(json_data, file)
        preprocess_data(json_data)
        count += 1
        print(f"第{count}个文档处理完毕")
        print(f"正在上传。。。")
        index2(json_data)
        print(f"第{count}个文档上传完毕")

# 分成一个个batch
def split_batch(init_list, batch_size):
    groups = zip(*(iter(init_list),) * batch_size)
    end_list = [list(i) for i in groups]
    count = len(init_list) % batch_size
    end_list.append(init_list[-count:]) if count != 0 else end_list
    return end_list

def index2(json_data):
    for data in json_data:
        try:
            client.index(index=INDEX_NAME, document=data)
        except Exception as e:
            print(e)
            print(f"wrong data\n {data} \n\n")

if __name__ == "__main__":
        client = Elasticsearch(
            hosts=ELASTIC_ADDRESS, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD),
            timeout=10000)
        mapping_and_setting()
        DATA_FILE_PATH = r"/mnt/sdn/zjh/es/test1"
        for i in range(20):
            read_jsons(DATA_FILE_PATH)




