from elasticsearch import Elasticsearch
from urllib.parse import urlsplit
from elasticsearch.helpers import bulk
from args import *
from logger import logger
import requests
import math
import random

from tqdm import tqdm
client = Elasticsearch(
    hosts=ELASTIC_ADDRESS, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD),
    timeout=10000)
query = {
            "query": {
                "range": {
                    "fssj": {
                        "gte": start_date,
                        "lte": end_date
                    }
                }
            }
        }
def mapping_and_setting(index_name=WRITE_INDEX_NAME):
    print(client.ping())
    if not client.indices.exists(index=index_name):
        result = client.indices.create(index=index_name, body={**mapping, **settings}, ignore=400)
        if result.get("acknowledged"):
            print("索引创建成功")
        else:
            print("索引创建失败")
            print(result)
    else:
        print("索引无需重复创建！")


def get_count(start_date:str=start_date, end_date:str=end_date):
    response = client.count(
        index=READ_INDEX_NAME, body=query
    )
    count = response.body['count']
    return count


def wrap_response(data):
    response = requests.post(area_service_url, json={
        "content": data
    }).json()['data']
    return response

def wrap_type(data):
    response = requests.post(type_service_url, json={
        "content": data
    }).json()['data']
    return response


def get_random_type():

    random_area = random.choice(area)
    return random_area

def wrap_data(news_list:list[dict]):


    for news in tqdm(news_list):
        title_content = ""
        title = news.get('sjmc', "")
        content = news.get('sjms', "")
        title_content += (f'{title} {content}')
        news["area"] = wrap_response(title_content)
        news['ssly'] = wrap_type(title_content)


def process_keywords(news_list:list[dict]):
    for news in news_list:
        keywords = news.get("keywords", [])
        keywords = [k['name'] for k in keywords]
        news['keywords'] = keywords

def read_batch():
    count = get_count()
    if count > MAX_COUNT:
        count = MAX_COUNT
    logger.info(f"start_date: {start_date}-end_date: {end_date} count: {count}")
    logger.info(f"read_batch_size: {READ_BATCH_SIZE}")
    split = int(math.ceil(count / READ_BATCH_SIZE))
    total_news = []
    for s in range(split):
        t_query = {}
        t_query['query'] = query['query']
        from_ = s * READ_BATCH_SIZE
        t_query['from'] = from_
        t_query['size'] = READ_BATCH_SIZE
        logger.info(f"processing from: {from_}, to: {min(from_ + READ_BATCH_SIZE, count)}")
        search = client.search(index=READ_INDEX_NAME, body=t_query)
        temp_news = search.body['hits']['hits']
        temp_news = [t['_source'] for t in temp_news]
        wrap_data(temp_news)
        total_news += temp_news
    logger.info(f"read data successfully!")
    return total_news

def index(json_data):
    success, _ = bulk(client, json_data, index=WRITE_INDEX_NAME, chunk_size=READ_BATCH_SIZE, timeout="100s")
    logger.info(f'{success}条记录成功插入')
def read_all_data(index_name=READ_INDEX_NAME):
    query = {
        "query": {
            "range": {
                "fssj": {
                    "gte": start_date,
                    "lte": end_date,
                    "format": "yyyy-MM-dd"
                }
            }
        }
    }
    return client.search(index=index_name, body=query)['hits']['hits']




if __name__ == "__main__":
    mapping_and_setting()
    total_news = read_batch()
    index(total_news)