import time
import requests
import json
from tqdm import tqdm
import threading
from zhipuai import ZhipuAI
import random

# api key

key1 = "2b3d832c9ee84e38a3ad94586d6e4d01.aUEHaqeRkinBFz2H"
key2 = "d241b0eef9e2474d81a2937cb242a572.grD24JlLUwkkXd7I"
key3 = "86e99e4867bb45e1bd7d2abd87549bf9.VJCZ4YqHYQz3a6wD"
key4 = "d59b2e6255994d199f6d251a65056147.UAuIN71r0O3w31GG"



class APIModel:
    def __init__(self, model="glm-4-plus", api_key=key1) -> None:
        self.keys = ["2b3d832c9ee84e38a3ad94586d6e4d01.aUEHaqeRkinBFz2H", "d241b0eef9e2474d81a2937cb242a572.grD24JlLUwkkXd7I", "86e99e4867bb45e1bd7d2abd87549bf9.VJCZ4YqHYQz3a6wD", "d59b2e6255994d199f6d251a65056147.UAuIN71r0O3w31GG"]
        self.model = model
        self.client = ""
        

    def __req(self, text, temperature, max_try = 5):
        self.client = ZhipuAI(api_key=random.choice(self.keys))  # 填写您自己的APIKey
        retry = 0
        response = ""
        while retry < max_try:
            try:
                response = self.client.chat.completions.create(
                    model=self.model,  # 填写需要调用的模型编码
                    messages=[
                        {"role": "system", "content": "You are a helpful assistant."},
                        {"role": "user", "content": text}
                    ],
                )
                response = response.choices[0].message.content
                break
            except Exception as e:
                print(f"error: {e}")
                retry += 1

        return response

    
    def chat(self, text, temperature=1):
        response = self.__req(text, temperature=temperature, max_try=5)
        return response

    def __chat(self, text, temperature, res_l, idx):
        
        response = self.__req(text, temperature=temperature, max_try=5)
        res_l[idx] = response
        return response
        
    def batch_chat(self, text_batch, temperature=0):
        max_threads=15 # limit max concurrent threads using model API
        res_l = ['No response'] * len(text_batch)
        thread_l = []
        for i, text in zip(range(len(text_batch)), text_batch):
            thread = threading.Thread(target=self.__chat, args=(text, temperature, res_l, i))
            thread_l.append(thread)
            thread.start()
            while len(thread_l) >= max_threads: 
                for t in thread_l:
                    if not t .is_alive():
                        thread_l.remove(t)
                time.sleep(0.3) # Short delay to avoid busy-waiting

        for thread in tqdm(thread_l):
            thread.join()
        return res_l
