import json
from kafka import KafkaProducer
from datetime import datetime
import pytz

tz = pytz.timezone('Asia/Shanghai')

# 创建 KafkaProducer 实例 由于数据含有图片数据，增加消息大小为64MB，此举并不提倡，消息队列还是只发送文本信息最好
producer = KafkaProducer(
    bootstrap_servers=['172.16.1.38:9092'],  # 阿里云服务器kafka：172.16.1.184:9092；星图kafka：10.1.109.157:9092
    value_serializer=lambda x: json.dumps(x).encode('utf-8'),   # 将python对象转化为数组
    max_request_size=67108864,  # 64MB (默认1MB)
    buffer_memory=67108864,    # 64MB
    compression_type='gzip',   # 启用压缩
    # request_timeout_ms=50000
)

def send_data(topic, message):
    # topic: aili_test
    # message: json_dic
    producer.send(topic, value=message)  # 将消息放入发送队列
    producer.flush()  # 立即刷新，全部发出


def send_log(topic, workid, exeid, link, node1, abnormal):
    # 传参格式：topic, times, loglev, workid, exeid, node, msg
    # 1. 开始爬取
    if node1 == "Crawling start":
        loglev = "INFO"
        msg = str({'result': f'Start crawl {link}'})
    # 2. 爬取网页
    elif node1 == "Simulate browser scraping":
        loglev = "INFO"
        msg = str({'result': f'Successfully fetched the webpage {link}'})
    # 3. 解析网页
    elif node1 == "Analyze webpage":
        loglev = "INFO"
        msg = str({'result': f'Successfully parsed the webpage {link}'})
    # 4. 图片下载
    elif node1 == "Image download":
        loglev = "INFO"
        msg = str({'result': f'Download images {link}'})
    # 5. 创建html
    elif node1 == "Create HTML":
        loglev = "INFO"
        msg = str({'result': f'HTML saved successfully {link}'})
    # 6. 创建json
    elif node1 == "Create JSON":
        loglev = "INFO"
        msg = str({'result': f'JSON saved successfully {link}'})
    # 7. 爬取完成
    elif node1 == "Crawling finished":
        loglev = "INFO"
        msg = str({'result': f'Finish crawl {link}'})
    # 8. 任务报错
    elif node1 == "Error":
        loglev = "ERROR"
        msg = str({'result': f'Crawl error：{str(abnormal)} {link}'})
    # 9. Kafka传参报错
    else:
        loglev = "ERROR"
        msg = str({'result': f'Kafka param error：{link}'})


    times = datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
    data_dict = {
        "time": times,
        "logLevel": loglev,
        "workflowId": workid,
        "executionId": exeid,
        "node": "The current node is crawling",
        "node1": node1,
        "node2": "",
        "msg": msg
    }
    # topic: spider_log
    producer.send(topic, value=data_dict)
    producer.flush()


  
