from elasticsearch import Elasticsearch
import re

# es = Elasticsearch("http://10.206.60.14:9200")
# es = Elasticsearch("http://143.114.0.42:9200")
es = Elasticsearch("http://127.0.0.1:9200")


def read_law_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        return file.readlines()


def clean_space(text):
    if text is None:
        return text
    return text.replace('\u3000', ' ').strip()


# law_name = "刑法"
law_name = "民法典"
# file_path = '/Users/chongwen/Desktop/工作项目/AI/济南AI项目/部署/法律法规/刑法.txt'
file_path = '/Users/chongwen/Desktop/工作项目/AI/济南AI项目/部署/法律法规/民法典.txt'


def parse_law_data(lines):
    articles = []
    context = {"volume": None, "subvolume": None, "chapter": None, "section": None}
    pattern_map = {
        "volume": re.compile(r"^\s*第[\d一二三四五六七八九十百零]+编\s+.+"),
        "subvolume": re.compile(r"^第[\d一二三四五六七八九十百零]+分编"),
        "chapter": re.compile(r"^第[\d一二三四五六七八九十百零]+章"),
        "section": re.compile(r"^第[\d一二三四五六七八九十百零]+节"),
        "article": re.compile(r"^第[\d一二三四五六七八九十百千零]+条")
    }
    collecting_article = False
    article_number = ""
    article_text = ""

    for line in lines:
        line = line.strip()
        if pattern_map["volume"].match(line):
            context = {"volume": line, "subvolume": None, "chapter": None, "section": None}
        elif pattern_map["subvolume"].match(line):
            context["subvolume"] = line
        elif pattern_map["chapter"].match(line):
            context["chapter"] = line
        elif pattern_map["section"].match(line):
            context["section"] = line
        elif pattern_map["article"].match(line):
            if collecting_article:  # 如果已经在收集条文内容，则保存当前条文
                articles.append({
                    "law_name": clean_space(law_name),
                    "volume": clean_space(context["volume"]),
                    "subvolume": clean_space(context["subvolume"]),
                    "chapter": clean_space(context["chapter"]),
                    "section": clean_space(context["section"]),
                    "article_number": clean_space(article_number),
                    "article_text": clean_space(article_text.strip())
                })
            # 开始新的条文
            article_number = line.split("　")[0]
            article_text = line.split("　", 1)[1] if len(line.split("　")) > 1 else ""
            collecting_article = True
        elif collecting_article:
            article_text += " " + line  # 继续收集条文内容

    # 保存最后一个条文
    if collecting_article:
        articles.append({
            "law_name": clean_space(law_name),
            "volume": clean_space(context["volume"]),
            "subvolume": clean_space(context["subvolume"]),
            "chapter": clean_space(context["chapter"]),
            "section": clean_space(context["section"]),
            "article_number": clean_space(article_number),
            "article_text": clean_space(article_text.strip())
        })

    return articles


index_name = "laws_index"


def upload_to_es(articles):
    for article in articles:
        res = es.index(index=index_name, body=article)
        print(f"Uploaded: {res['result']}")


# 索引名称


# 删除索引
def delete_index(index):
    if es.indices.exists(index=index):
        response = es.indices.delete(index=index)
        print("Index deleted:", response)
    else:
        print("Index does not exist.")


# 索引配置
index_body = {
    "settings": {
        "analysis": {
            "analyzer": {
                "ik_max_analyzer": {
                    "type": "custom",
                    "tokenizer": "ik_max_word"
                },
                "ik_smart_analyzer": {
                    "type": "custom",
                    "tokenizer": "ik_smart"
                }
            }
        }
    },
    "mappings": {
        "properties": {
            "law_name": {
                "type": "text",
                "analyzer": "ik_smart_analyzer"
            },
            "volume": {
                "type": "text",
                "analyzer": "ik_smart_analyzer"
            },
            "subvolume": {
                "type": "text",
                "analyzer": "ik_smart_analyzer"
            },
            "chapter": {
                "type": "text",
                "analyzer": "ik_smart_analyzer"
            },
            "section": {
                "type": "text",
                "analyzer": "ik_smart_analyzer"
            },
            "article_number": {
                "type": "keyword"
            },
            "article_text": {
                "type": "text",
                "analyzer": "ik_max_analyzer",
                "search_analyzer": "ik_smart_analyzer"
            }
        }
    }
}
# 索引配置
# index_body = {
#     "settings": {
#         "analysis": {
#             "analyzer": {
#                 "jieba_index": {
#                     "type": "custom",
#                     "tokenizer": "jieba_index"
#                 },
#                 "jieba_search": {
#                     "type": "custom",
#                     "tokenizer": "jieba_search"
#                 }
#             }
#         }
#     },
#     "mappings": {
#         "properties": {
#             "law_name": {
#                 "type": "text",
#                 "analyzer": "jieba_search"
#             },
#             "volume": {
#                 "type": "text",
#                 "analyzer": "jieba_search"
#             },
#             "subvolume": {
#                 "type": "text",
#                 "analyzer": "jieba_search"
#             },
#             "chapter": {
#                 "type": "text",
#                 "analyzer": "jieba_search"
#             },
#             "section": {
#                 "type": "text",
#                 "analyzer": "jieba_search"
#             },
#             "article_number": {
#                 "type": "keyword"
#             },
#             "article_text": {
#                 "type": "text",
#                 "analyzer": "jieba_index",
#                 "search_analyzer": "jieba_search"
#             }
#         }
#     }
# }

# 创建索引
def create_index(index_name):
    # 检查索引是否已存在
    if es.indices.exists(index=index_name):
        print(f"Index {index_name} already exists.")
    else:
        # 创建索引
        response = es.indices.create(index=index_name, body=index_body)
        if 'acknowledged' in response:
            if response['acknowledged'] == True:
                print(f"Index {index_name} created successfully.")
        else:
            print("Error creating index:", response)


# 主程序
if __name__ == "__main__":
    # delete_index("laws_test")
    # create_index(index_name)

    law_lines = read_law_data(file_path)
    parsed_data = parse_law_data(law_lines)
    print(len(parsed_data))
    # print(parsed_data)
    upload_to_es(parsed_data)
