# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 批量写入数据(多线程原始bulk写入).py.py
# @Author: dongguangwen
# @Date  : 2025-05-31 18:33
from elasticsearch import Elasticsearch
import json
import time
from concurrent.futures import ThreadPoolExecutor, as_completed


# 初始化ES连接
es = Elasticsearch(hosts='http://192.168.1.119:9200')


# 单个线程处理的最大文档数
BATCH_SIZE = 500
# 线程数
THREAD_COUNT = 4


# 构造符合 bulk API 格式的字符串
def make_bulk_data(line):
    _id = line.get('id')
    doc = {
        "id": line.get('id'),
        "name": line.get('name'),
        "sex": line.get('sex'),
        "age": line.get('age'),
        "email": line.get('email'),
        "simple_profile": {
            "username": line.get('simple_profile').get('username'),
            "name": line.get('simple_profile').get('name'),
            "sex": line.get('simple_profile').get('sex'),
            "address": line.get('simple_profile').get('address'),
            "mail": line.get('simple_profile').get('mail'),
            "birthdate": line.get('simple_profile').get('birthdate'),
        },
        "profile": {
            "job": line.get('profile').get('job'),
            "company": line.get('profile').get('company'),
            "ssn": line.get('profile').get('ssn'),
            "residence": line.get('profile').get('residence'),
            "blood_group": line.get('profile').get('blood_group'),
            "website": line.get('profile').get('website'),
            "username": line.get('profile').get('username'),
            "name": line.get('profile').get('name'),
            "sex": line.get('profile').get('sex'),
            "address": line.get('profile').get('address'),
            "mail": line.get('profile').get('mail'),
            "birthdate": line.get('profile').get('birthdate'),
        },
        "location": {
            "country": line.get('country'),
            "province": line.get('province'),
            "city": line.get('city'),
            "street_name": line.get('street_name'),
            "street_address": line.get('street_address'),
            "address": line.get('address'),
            "postcode": line.get('postcode'),
            "gps": line.get('gps')
        },
        "net_info": {
            "ipv4": line.get('ipv4'),
            "ipv6": line.get('ipv6'),
            "uri": line.get('uri'),
            "url": line.get('url'),
            "img_url": line.get('img_url'),
            "domain": line.get('domain'),
            "user_agent": line.get('user_agent')
        },
        "character": line.get('character'),
        "long_text": line.get('long_text'),
        "time": line.get('time')
    }
    return f'{{"index": {{"_index": "persons", "_id": "{_id}"}}\n{json.dumps(doc)}\n'


# 每个线程执行的任务
def bulk_insert(data_batch):
    bulk_lines = ''.join(data_batch)
    es.bulk(body=bulk_lines)


# 主函数
def main():
    start_time = time.time()

    # 读取文件
    data_batches = []
    with open('./person_info_100w.json', 'r', encoding='utf-8') as f:
        batch = []
        for line in f:
            line_data = json.loads(line.strip())
            bulk_line = make_bulk_data(line_data)
            batch.append(bulk_line)
            if len(batch) >= BATCH_SIZE:
                data_batches.append(batch)
                batch = []
        if batch:
            data_batches.append(batch)

    print(f'总共分成了 {len(data_batches)} 个批次')

    # 多线程写入
    with ThreadPoolExecutor(max_workers=THREAD_COUNT) as executor:
        futures = [executor.submit(bulk_insert, batch) for batch in data_batches]
        for future in as_completed(futures):
            try:
                future.result()
            except Exception as e:
                print("Error during bulk insert:", e)

    end_time = time.time()
    print("总耗时：", end_time - start_time, "秒")


if __name__ == '__main__':
    main()

"""
总耗时： 667.6843385696411 秒
"""