"""
@author: wy
@file: consumer.py
@time: 2025/1/7 13:47
"""
from kafka import KafkaConsumer
from multiprocessing import Process, current_process
import time
import json
import requests
import pymysql
from config import *   # 配置文件
from datetime import datetime


def consume_kafka_partition(topic, group_id, partition):
    """
    进程执行的函数，用于消费指定分区的Kafka消息
    """
    consumer = KafkaConsumer(
        group_id=group_id,
        bootstrap_servers=[kafka_host_1, kafka_host_2, kafka_host_3,],
        auto_offset_reset='earliest',  # 可按需调整偏移量重置策略
        enable_auto_commit=True,       # 自动提交偏移量
        auto_commit_interval_ms=5000,  # 每5秒提交一次偏移量
        value_deserializer=lambda x: x.decode('utf-8')  # 将消息值解码为UTF-8字符串  -  (反序列化)
    )
    # 为消费者订阅的主题
    consumer.subscribe(topic)

    for message in consumer:
        print(f"进程 {current_process().name} 消费到来自分区 {partition} 的消息: {message.value}")
        message_info = json.loads(message.value).get('message').split()
        ip_address = message_info[0]   # IP地址
        raw_time = message_info[3][1:]
        flow_rate = float(message_info[9])

        # 将字符串解析为datetime对象
        dt = datetime.strptime(raw_time, '%d/%b/%Y:%H:%M:%S')
        # 将datetime对象格式化为目标格式的字符串
        used_date = dt.strftime('%Y-%m-%d %H:%M:%S')
        ip_result = get_ip_info(ip_address)
        # get方法获取value提高代码健壮性
        pro = ip_result.get('pro',"")
        Operator = ip_result.get('addr',"").split()[-1]
        for index, province in enumerate(provinces):
            if province == pro:
                pro = index
                break
        for index, operator in enumerate(operators):
            if operator == Operator:
                Operator = index
        result = {'ip_address': ip_address,'pro': pro, 'Operator': Operator,'used_date':used_date,'flow_rate':flow_rate}
        result = list(result.values())
        print(result)
        write2db(result)

# 解析IP
def get_ip_info(ip_address):
    url = 'https://whois.pconline.com.cn/ipJson.jsp'
    params = {
        'ip': ip_address,
        'json': 'true'
    }
    try:
        response = requests.get(url, params=params)
        response.raise_for_status()  # 检查请求是否成功，如果状态码不是200则引发异常
        data = response.json()
        return data
    except requests.RequestException as e:
        print(f"请求出错: {e}")
        # 请求出错返回空子典
        return {}


def write2db(data):
    # 创建数据库的连接
    # host:我们要连接的主机
    conn = pymysql.connect(host='192.168.10.137',
                           user="root",
                           passwd="123456",
                           port=3306,
                           db="test1"
                           )

    # 创建游标   =>  真正执行sql语句
    # 开启了一个隐形的事务
    cur = conn.cursor()
    # 创建表 如果存在就不创建， 起初会删除同名的数据表（初始化）
    sql = """
    CREATE TABLE IF NOT EXISTS ip_info(
        ip_address VARCHAR(255),
        pro VARCHAR(255),
        Operator VARCHAR(255),
        used_date VARCHAR(255),
        flow_rate VARCHAR(255)
    )
    """
    cur.execute(sql)
    # 插入数据
    sql = "INSERT INTO ip_info(ip_address,pro,Operator,used_date,flow_rate) VALUES(%s, %s, %s, %s, %s)"
    cur.execute(sql, data)
    conn.commit()
    # 关闭连接释放内存
    conn.close()

#consume_kafka_partition("my_topic", "test_group", 0)

if __name__ == "__main__":
    topic = "nginxlog"  # 根据实际情况修改要消费的主题名称
    group_id = "message_group3"  # 根据实际情况修改消费组名称  只要更改消费组的名称就可以重新拿取数据（所有的旧数据）
    partitions = [0, 1, 2]  # 定义要消费的分区列表，可按需调整

    # 设置进程名称（可选）
    def start_process(target, args):
        p = Process(target=target, args=args)
        p.name = f"Consumer-Partition-{args[2]}"  # 设置进程名称
        p.start()
        return p

    processes = []
    for partition in partitions:
        p = start_process(consume_kafka_partition, (topic, group_id, partition))
        processes.append(p)

    for p in processes:
        p.join()

    print("所有进程已结束，指定分区消费完成")
