from elasticsearch import Elasticsearch
import json
import requests

class ElasticSearchHelper:
    def __init__(self, es_host, es_port):
        self.es_url = f"http://{es_host}:{es_port}"
        self.es = Elasticsearch(self.es_url)

    def export_data(self, index_name, export_path):
        # url = f'{self.es_url}/{index_name}/_search'
        # headers = {'Content-Type': 'application/json'}
        query = {
        "query": {
            "match_all": {}
        }
        }

        scroll_size = 1000

        res = self.es.search(index=index_name, body=query, size=scroll_size, scroll='1m')

        with open(f'{export_path}data.json', 'w') as f:
            for hit in res['hits']['hits']:
                doc = hit['_source']
                json.dump(doc, f)
                f.write('\n')

            scroll_id = res['_scroll_id']

            while len(res['hits']['hits']) > 0:
                res = self.es.scroll(scroll_id=scroll_id, scroll='1m')
                for hit in res['hits']['hits']:
                    doc = hit['_source']
                    json.dump(doc, f)
                    f.write('\n')
        
        return True
    
    def create_index(self, index_name):
        # 检查索引是否存在
        if not self.es.indices.exists(index=index_name):
            # 索引不存在，创建新的索引
            self.es.indices.create(index=index_name)
            print(f"索引 '{index_name}' 已创建")
            return False
        else:
            print(f"索引 '{index_name}' 已存在")
            return True
        
    def import_data(self, index_name, import_path):

        self.create_index(index_name)

        with open(import_path, 'r') as file:
            # 逐行读取数据
            for line in file:
                # 解析json数据
                data = json.loads(line)

                # 上传数据到Elasticsearch
                self.es.index(index=index_name, body=data)

        return True
    
    # 直接转发，未调试
    def upload_data(self, es_source_url, es_destination_url, source_index, destination_index=None):
        es_source = Elasticsearch(es_source_url)
        es_destination = Elasticsearch(es_destination_url)

        if not destination_index: 
            destination_index = source_index

        self.create_index(destination_index)

        query = {
            "query": {
                "match_all": {}
            }
        }

        scroll_size = 1000

        res = es_source.search(index=source_index, body=query, size=scroll_size, scroll='1m')

        try:
            while len(res['hits']['hits']) > 0:
                for hit in res['hits']['hits']:
                    doc = hit['_source']
                    index_info = es_destination.index(index=destination_index, body=doc)
                    if 'result' in index_info and index_info['result'] in ['created', 'updated']:
                        print("文档成功插入或更新")
                    else:
                        print("文档插入或更新失败")

                scroll_id = res['_scroll_id']
                res = es_source.scroll(scroll_id=scroll_id, scroll='1m')
        except Exception as e:
            print("发生异常:", e)
            return False

        return True

    # 查询索引大小
    def get_index_size(self, index_name):
        index_stats = self.es.indices.stats(index=index_name)
        index_size = index_stats['_all']['total']['store']['size_in_bytes']
        index_size_gb = index_size / 1024 / 1024 / 1024
        return index_size_gb

# 调用示例
# es_host = '192.103.4.165'
es_host = '192.11.59.183'
es_port = 9200
index_name = '&haikou.air_vocs_hour_stats'
es_helper = ElasticSearchHelper(es_host, es_port)

# 获取索引大小
# index_size_gb = es_helper.get_index_size(index_name)
# print(f"The size of index {index_name} is {index_size_gb:.2f} GB")

# 导出数据
# export_path = './'
# es_helper.export_data(index_name, export_path)
# print(f"out data ok!")

# 导入数据
# import_path = './data.json'
# es_helper.import_data(index_name, import_path)

# 同步索引
es_source_url = "http://192.103.4.165:9200/"
es_destination_url = "http://192.11.59.183:9200/"
es_helper.upload_data(es_source_url, es_destination_url, index_name)