import pandas as pd
from elasticsearch import Elasticsearch, helpers, exceptions
from datetime import datetime

from utils.es_upload_utils.new_case import main_run

# Connect to Elasticsearch
es = Elasticsearch("http://10.206.60.14:9200")

index_name = "case_index_1"


# es = Elasticsearch("http://143.114.0.42:9200")
# es = Elasticsearch("http://127.0.0.1:9200")


# Path to the CSV file
# csv_path = '/Users/chongwen/Downloads/裁判文书全量数据（已完成）/2021年裁判文书数据_马克数据网/2021年08月裁判文书数据.csv'
# df = pd.read_csv(csv_path)


# Function to create an index with the specified settings and mappings
def create_index(index_name):
    settings = {
        "settings": {
            "index": {
                "number_of_shards": 1,
                "number_of_replicas": 1,
                "analysis": {
                    "analyzer": {
                        "default": {
                            "tokenizer": "ik_max_word"
                        }
                    }
                }
            }
        },
        "mappings": {
            "properties": {
                "case_name": {"type": "text"},
                "case_number": {"type": "text"},
                "case_reason": {"type": "text"},
                "case_type": {"type": "text"},
                "case_type_code": {"type": "keyword"},
                "court": {"type": "text"},
                "full_text": {"type": "text"},
                "judgment_date": {"type": "text"},
                "legal_basis": {"type": "text"},
                "original_link": {"type": "keyword"},
                "parties": {"type": "text"},
                "publication_date": {"type": "text"},
                "region": {"type": "text"},
                "source": {"type": "keyword"},
                "trial_procedure": {"type": "text"}
            }
        }
    }
    try:
        if not es.indices.exists(index=index_name):
            es.indices.create(index=index_name, body=settings)
            print(f"Index {index_name} created successfully.")
        else:
            print(f"Index {index_name} already exists.")
    except exceptions.RequestError as e:
        print(f"An error occurred: {e}")


# Prepare data for bulk import into Elasticsearch
def generate_data(df):
    for index, row in df.iterrows():
        yield {
            "_index": index_name,
            "_source": {
                "original_link": None if pd.isnull(row['原始链接']) else row['原始链接'],
                "case_number": None if pd.isnull(row['案号']) else row['案号'],
                "case_name": None if pd.isnull(row['案件名称']) else row['案件名称'],
                "court": None if pd.isnull(row['法院']) else row['法院'],
                "region": None if pd.isnull(row['所属地区']) else row['所属地区'],
                "case_type": None if pd.isnull(row['案件类型']) else row['案件类型'],
                "case_type_code": None if pd.isnull(row['案件类型编码']) else row['案件类型编码'],
                "source": None if pd.isnull(row['来源']) else row['来源'],
                "trial_procedure": None if pd.isnull(row['审理程序']) else row['审理程序'],
                "judgment_date": None if pd.isnull(row['裁判日期']) else row['裁判日期'],
                "publication_date": None if pd.isnull(row['公开日期']) else row['公开日期'],
                "parties": None if pd.isnull(row['当事人']) else row['当事人'],
                "case_reason": None if pd.isnull(row['案由']) else row['案由'],
                "legal_basis": None if pd.isnull(row['法律依据']) else row['法律依据'],
                "full_text": None if pd.isnull(row['全文']) else row['全文'],
            }
        }


def delete_index(index):
    if es.indices.exists(index=index):
        response = es.indices.delete(index=index)
        print("Index deleted:", response)
    else:
        print("Index does not exist.")


def convert_date(date_str):
    year = date_str[:4]
    month = date_str[4:6]
    day = date_str[6:8]
    return f"{year}-{month}-{day}"


import os


def list_subdirectories(directory_path):
    try:
        subdirectories = [name for name in os.listdir(directory_path) if
                          os.path.isdir(os.path.join(directory_path, name))]
        return subdirectories
    except FileNotFoundError:
        return "Directory not found."


if __name__ == '__main__':
    # Create the index before importing data
    delete_index(index_name)
    create_index(index_name)

    directory_path = "/Users/chongwen/Desktop/工作项目/AI/济南AI项目/部署/案例数据/zip_bak"
    subdirectories = list_subdirectories(directory_path)
    for subdirectory in subdirectories:
        filePaths = directory_path + "/" + subdirectory
        time = convert_date(subdirectory)
        print(time)
        helpers.bulk(es, main_run.run(filePaths, time))
        print("Data import completed: " + subdirectory)

    #
    # # Bulk import data into Elasticsearch
    # # helpers.bulk(es, generate_data(df))
    #
