import time
import requests
import json
from tqdm import tqdm
import threading

class APIModel:

    def __init__(self, model, api_key, api_url) -> None:
        self.__api_key = api_key
        self.__api_url = api_url
        self.model = model
        
    def __req(self, text, temperature, max_try = 5):
        url = f"{self.__api_url}"
        pay_load_dict = {"model": f"{self.model}","messages": [{
                "role": "user",
                "temperature":temperature,
                "content": f"{text}"}]}
        payload = json.dumps(pay_load_dict)
        headers = {
        'Accept': 'application/json',
        'Authorization': f'Bearer {self.__api_key}',
        'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
        'Content-Type': 'application/json'
        }
        for attempt in range(max_try):
            try:
                response = requests.post(url, headers=headers, data=payload,timeout=(5, 90))
                
                # 确保返回的 status_code 为 200
                if response.status_code != 200:
                    print(f"Request failed with status {response.status_code}: {response.text}")
                    time.sleep(0.1)
                    continue

                response_json = response.json()

                # 确保 response_json 里包含 choices
                if 'choices' in response_json and response_json['choices']:
                    return response_json['choices'][0]['message']['content']

                print(f"Unexpected API response format: {response_json}")
                
            except requests.exceptions.RequestException as e:
                print(f"Request failed: {e}")
            except json.JSONDecodeError:
                print(f"Failed to parse JSON response: {response.text}")
            time.sleep(0.1)

    def chat(self, text, temperature=0.3):
        response = self.__req(text, temperature=temperature, max_try=5)
        return response

    # def __chat(self, text, temperature, res_l, idx, pbar = None):
    #     # Simulate a chat response (replace with actual API call)
    #     response = self.__req(text, temperature=temperature, max_try=5)
    #     res_l[idx] = response
    #     if pbar: 
    #         pbar.update(1)
                
    #     return response
    def __chat(self, text, temperature, res_l, idx, pbar=None):
        try:
            response = self.__req(text, temperature=temperature, max_try=5)
            # if response == None:
            #     response = self.__req(text, temperature=temperature, max_try=5)
                # import ipdb;ipdb.set_trace()
            res_l[idx] = response
        except Exception as e:
            res_l[idx] = f"ERROR: {str(e)}"  # 记录错误信息
            print(f"Error processing index {idx}: {str(e)}")
        finally:
            if pbar: 
                pbar.update(1)
    
    def __chat_with_validation(self, text, temperature, res_l, index, pbar, max_retries):
        retries = 0
        valid_responses = {'0', '1', '2', '3', '4', '5'}
        
        while retries < max_retries:
            # Simulate a chat response (replace with actual API call)
            time.sleep(1)  # Simulating some processing time
            response = self.chat(text, temperature=temperature)
            
            if response in valid_responses:
                res_l[index] = response
                pbar.update(1)  # Update the progress bar from within the thread
                break
            else:
                retries += 1
                print(f"Invalid response. Retrying... ({retries}/{max_retries})")
        
        if retries == max_retries:
            print(f"Max retries reached. Setting to '0'.")
            res_l[index] = '0'
            pbar.update(1)

    def batch_chat(self, text_batch, temperature=0, validate_response=False, max_threads=10,timeout=180):
        # max_threads = max_retries  # limit max concurrent threads using model API
        res_l = ['0'] * len(text_batch)
        thread_l = []
        active_threads = []
        # import ipdb;ipdb.set_trace()
        # Initialize the progress bar before starting any threads
        with tqdm(total=len(text_batch), desc="Processing") as pbar:
            for i, text in enumerate(text_batch):
                if validate_response:
                    thread = threading.Thread(target=self.__chat_with_validation, 
                                              args=(text, temperature, res_l, i, pbar, 3))
                else:
                    thread = threading.Thread(target=self.__chat, 
                                              args=(text, temperature, res_l, i, pbar))
                thread_l.append(thread)
                thread.start()
                active_threads.append(thread)

                # Wait if we've reached the max number of threads
                while len(active_threads) >= max_threads:
                    for t in active_threads.copy():
                        if not t.is_alive():
                            active_threads.remove(t)
                    time.sleep(0.3)  # Short delay to avoid busy-waiting
                  
            # Wait for all remaining threads to complete
            for t in active_threads:
                t.join()

        return res_l
    
    
    # def batch_chat(self, text_batch, temperature=0):
    #     max_threads=15 # limit max concurrent threads using model API
    #     res_l = ['No response'] * len(text_batch)
    #     thread_l = []
    #     for i, text in zip(range(len(text_batch)), text_batch):
    #         thread = threading.Thread(target=self.__chat, args=(text, temperature, res_l, i))
    #         thread_l.append(thread)
    #         thread.start()
    #         while len(thread_l) >= max_threads: 
    #             for t in thread_l:
    #                 if not t .is_alive():
    #                     thread_l.remove(t)
    #             time.sleep(0.3) # Short delay to avoid busy-waiting

    #     for thread in tqdm(thread_l):
    #         thread.join()
    #     return res_l
