import pandas
import pymysql
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from tqdm import tqdm

from conf import es_url
from conf import mysql_host, mysql_user, mysql_password, mysql_db, mysql_port


def get_data():
    conn = pymysql.connect(host=mysql_host, port=mysql_port, user=mysql_user, password=mysql_password, database=mysql_db)
    cursor = conn.cursor()
    sql = "select * from t_conversation"
    cursor.execute(sql)
    results = cursor.fetchall()
    conn.close()
    return results


def create_es_data():
    es = Elasticsearch(hosts=es_url)
    index_name = 'conversation'

    mapping = {
        "mappings": {
            "properties": {
                "chat_id": {"type": "integer"},
                "content": {"analyzer": "standard", "type": "text", "fielddata": "true"},
                "end_time": {"type": "date", "format": "HH:mm:ss"},
                "id": {"type": "integer"},
                "start_time": {"type": "date", "format": "HH:mm:ss"},
                "talker_name": {"type": "keyword"},
                "teacher_info": {"type": "object"},
                "pause_time": {"type": "integer"},
            }
        }
    }

    try:
        es.indices.create(index=index_name, body=mapping)
        print("Index created successfully.")

        # 获取所有数据
        results = get_data()
        print("get data successfully")

        # 处理数据，转换日期格式为ISO8601格式
        processed_data = [
            {
                "_index": index_name,
                "_source": {
                    "id": row[0],
                    "chat_id": row[1],
                    "content": row[7],
                    "end_time": datetime.strptime(str(row[3]), '%H:%M:%S').strftime('%H:%M:%S'),
                    "start_time": datetime.strptime(str(row[2]), '%H:%M:%S').strftime('%H:%M:%S'),
                    "talker_name": row[4],
                    "teacher_info": get_teacher_info(row[5]),
                    "pause_time": row[6]
                }
            }
            for row in tqdm(results, desc="Processing data", unit="document")
        ]

        # 执行批量索引
        success, _ = bulk(es, processed_data, raise_on_error=True)
        print(f"Bulk insert completed successfully! Inserted {success} documents.")

    except Exception as e:
        print("Error:", str(e))


def get_teacher_info(talker_name):
    # 读取 CSV 文件
    df = pandas.read_csv('teacher.csv')
    teacher = df[(df['code'] == talker_name)]
    # 检查是否找到了对应的老师信息
    if not teacher.empty:
        teacher_info = teacher.iloc[0].to_dict()
    else:
        teacher_info = None

    return teacher_info


def delete_index():
    es = Elasticsearch(hosts=es_url)
    index_name = 'conversation'
    # 删除索引
    response = es.indices.delete(index=index_name)

    # 检查删除操作的结果
    if response and response.get('acknowledged'):
        print(f"Index '{index_name}' deleted successfully.")
    else:
        print(f"Failed to delete index '{index_name}'.")


if __name__ == "__main__":
    create_es_data()
