from prompt import *


class GenericLLM:
    def __init__(self, api_key, model_name):
        self.api_key = api_key
        self.model_name = model_name
        # 在此可以初始化和配置通用LLM类的其他设置

    def predict(self, prompt):
        raise NotImplementedError("This function is not yet implemented")

    def get_similarity(self, prompt):
        # 使用通用LLM进行匹配分析，根据实际情况调用相应的模型
        # 这里假设您的通用LLM类有一个名为 predict 的方法
        similarity = self.predict(prompt)
        return similarity


class DummyLLM(GenericLLM):
    def __init__(self, api_key='dummy-key', model_name='dummy-llm'):
        api_key = 'dummy-key'
        model_name = 'dummy-llm'
        super().__init__(api_key, model_name)

    def predict(self, prompt):
        response = ""
        # Assuming the response contains the generated text
        generated_text = ""
        return generated_text

    def get_similarity(self, prompt):
        # 使用通用LLM进行匹配分析，根据实际情况调用相应的模型
        # 这里假设您的通用LLM类有一个名为 predict 的方法
        similarity = self.predict(prompt)
        return similarity


class GPTModel(GenericLLM):
    import openai

    def __init__(self, api_key, model_name):
        super().__init__(api_key, model_name)

    def predict(self, prompt):
        response = openai.Completion.create(
            engine=self.model_name,
            prompt=prompt,
            max_tokens=150
        )
        # Assuming the response contains the generated text
        generated_text = response.choices[0].text
        # You might want to implement a more sophisticated way to extract similarity
        # For simplicity, here we just return the length of the generated text
        similarity = len(generated_text)
        return similarity
