from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import os
import time
from PIL import Image
import socket

# 设置环境变量，指定使用的GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.manual_seed(1234)

# 加载分词器
tokenizer = AutoTokenizer.from_pretrained("/root/.cache/modelscope/hub/qwen/Qwen-VL-Chat", trust_remote_code=True)

# 加载模型，并设置设备映射为自动
model = AutoModelForCausalLM.from_pretrained(
    "/root/.cache/modelscope/hub/qwen/Qwen-VL-Chat",
    device_map="auto",
    trust_remote_code=True
).eval()

# 定义TCP服务器函数
def tcp_server(response):
    # 设置服务器的IP地址和端口号
    server_address = ('172.17.0.2', 8089)

    # 创建一个TCP/IP socket
    server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server_socket.bind(server_address)

    # 监听端口，最多允许一个连接
    server_socket.listen(1)
    print(f"服务器正在监听 {server_address}...")

    while True:
        # 等待客户端连接
        client_socket, client_address = server_socket.accept()
        try:
            print(f"连接来自 {client_address}")

            # 接收数据
            data = client_socket.recv(1024)
            if data:
                print("收到的数据:", data.decode('utf-8'))

                # 发送信息
                client_socket.sendall(response.encode('utf-8'))

        except Exception as e:
            print(f"传输过程中发生错误: {e}")

        finally:
            print("保持连接")

while True:
    query = tokenizer.from_list_format([
        {'image': '/root/qwen2/frame.jpg'},
        {'text': '你看到了什么'},
    ])
    start_time = time.time()
    response, history = model.chat(tokenizer, query, history=None)
    end_time = time.time()
    elapsed_time = end_time - start_time
    timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    formatted_response = f"[{timestamp}] 回答如下: {response}"
    print(f"运行时间: {elapsed_time:.2f} 秒")
    print(formatted_response)

    tcp_server(formatted_response)
    # 等待5秒后继续
    time.sleep(5)