from flask import Flask, request, Response, stream_with_context
import requests, json, os
from datetime import datetime, timezone
from sseclient import SSEClient

app = Flask(__name__)

# 目标服务器的地址
SILICONFLOW_TARGET_URL = "https://api.siliconflow.cn"


def get_siliconflow_embedding(model: str, input: str) -> list[float]:
    path = "/v1/embeddings"
    data = {
        "model": model,
        "input": input,
        "encoding_format": "float",
    }

    # 请求头
    api_key = os.environ.get("SILICONFLOW_API_KEY", "")
    headers = {key: value for key, value in request.headers if key != "Host"}
    headers["Authorization"] = f"Bearer {api_key}"

    # 根据请求方法转发请求
    resp = requests.request(
        method="POST",
        url=f"{SILICONFLOW_TARGET_URL}/{path}",
        headers=headers,
        # data=data,  # request.get_data(),
        json=data,
        cookies=request.cookies,
        allow_redirects=False,
    )

    # 解析
    # print(resp.content)
    vecrors = json.loads(resp.content)["data"][0]["embedding"]
    return vecrors


# embedding ollama <=> siliconflow
@app.route("/api/embed", methods=["POST"])
def api_embed():
    # ollama embedding 输入
    # let payload = json!({
    #         "model": self.model,
    #         "input": docs, // Vec<String>
    #     });
    raw_data = json.loads(request.data)
    model_id = raw_data["model"]
    vectors_list = []
    for content in raw_data["input"]:
        print(content)
        data = get_siliconflow_embedding(model_id, content)
        # print(data)
        resp_headers = {
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Methods": "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT",
            "Allow": "OPTIONS, POST, HEAD, DELETE, GET, PUT",
            "vary": "Origin",
        }

        # 构建返回值
        vectors_list.append(data)

    resp = {"model": model_id, "embeddings": vectors_list}
    response = Response(json.dumps(resp), 200, resp_headers)
    return response


def get_reranker_from_siliconflow(query: str, documents: list[str]) -> list:
    path = "/v1/rerank"
    data = {
        "model": "BAAI/bge-reranker-v2-m3",
        "query": query,
        "documents": documents,
        "encoding_format": "float",
        "top_n": len(documents),
        "return_documents": False,
        "max_chunks_per_doc": 1024,
        "overlap_tokens": 80,
    }

    # 请求头
    api_key = os.environ.get("SILICONFLOW_API_KEY", "")
    headers = {key: value for key, value in request.headers if key != "Host"}
    headers["Authorization"] = f"Bearer {api_key}"

    # 根据请求方法转发请求
    resp = requests.request(
        method="POST",
        url=f"{SILICONFLOW_TARGET_URL}/{path}",
        headers=headers,
        # data=data,  # request.get_data(),
        json=data,
        cookies=request.cookies,
        allow_redirects=False,
    )

    # 解析
    # print(resp.content)
    scores = json.loads(resp.content)["results"]
    return scores


# rerank llama.cpp <=> siliconflow
@app.route("/rerank", methods=["POST"])
def rerank():
    # # # llama.cpp
    # #[derive(Serialize, Debug)]
    # struct RerankReq {
    #     query: String,
    #     documents: Vec<String>,
    # }
    raw_data = json.loads(request.data)
    print(json.dumps(raw_data, ensure_ascii=False))
    query: str = raw_data["query"]
    documents: list[str] = raw_data["documents"]
    scores = get_reranker_from_siliconflow(query, documents)

    resp_headers = {
        "Access-Control-Allow-Origin": "*",
        "Access-Control-Allow-Methods": "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT",
        "Allow": "OPTIONS, POST, HEAD, DELETE, GET, PUT",
        "vary": "Origin",
    }
    resp = {
        "model": "BAAI/bge-reranker-v2-m3",
        "object": "",
        "usage": {"prompt_tokens": 0, "total_tokens": 0},
        "results": scores,
    }
    response = Response(json.dumps(resp), 200, resp_headers)
    return response


def code_completion_from_siliconflow(model_id: str, prefix: str, suffix: str) -> list:
    path = "/v1/completions"
    data = {
        "model": model_id,
        "prompt": prefix,
        "suffix": suffix,
        "stream": False,
        "max_tokens": 256,
    }

    # 请求头
    api_key = os.environ.get("SILICONFLOW_API_KEY", "")
    headers = {key: value for key, value in request.headers if key != "Host"}
    headers["Authorization"] = f"Bearer {api_key}"

    # 根据请求方法转发请求
    resp = requests.request(
        method="POST",
        url=f"{SILICONFLOW_TARGET_URL}/{path}",
        headers=headers,
        # data=data,  # request.get_data(),
        json=data,
        cookies=request.cookies,
        allow_redirects=False,
    )

    # 解析
    # print(resp.content)
    scores = json.loads(resp.content)["choices"][0]["text"]
    return scores


def code_completion_from_siliconflow_sse(
    model_id: str, prefix: str, suffix: str
) -> list:
    path = "/v1/completions"
    data = {
        "model": model_id,
        "prompt": prefix,
        "suffix": suffix,
        "stream": True,
        "max_tokens": 256,
    }

    # 请求头
    api_key = os.environ.get("SILICONFLOW_API_KEY", "")
    headers = {key: value for key, value in request.headers if key != "Host"}
    headers["Authorization"] = f"Bearer {api_key}"

    # 根据请求方法转发请求
    resp = requests.request(
        method="POST",
        url=f"{SILICONFLOW_TARGET_URL}/{path}",
        headers=headers,
        # data=data,  # request.get_data(),
        json=data,
        cookies=request.cookies,
        allow_redirects=False,
        stream=True,
    )

    # 测试
    # client = SSEClient(resp)
    # for event in client.events():
    #     print(f"{event.data}")

    return resp


# 代码补全
@app.route("/api/generate", methods=["POST"])
def api_generate_from_siliconflow():
    raw_data = json.loads(request.data)
    print("<<<", json.dumps(raw_data))
    prompt = raw_data["prompt"]
    model_id = raw_data["model"]
    is_stream = raw_data["stream"] or True

    resp_headers = {
        "Access-Control-Allow-Origin": "*",
        "Access-Control-Allow-Methods": "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT",
        "Allow": "OPTIONS, POST, HEAD, DELETE, GET, PUT",
        "vary": "Origin",
    }

    if is_stream:
        print(f"use stream, model_id:{model_id} , {prompt}")
        resp = code_completion_from_siliconflow_sse(model_id, prompt, "")
        client = SSEClient(resp)

        # 流式输出
        def generate():
            for event in client.events():
                # {"id":"0196ebbc6724f04bfb48f92fe32e7da7","object":"text_completion","created":1747711715,"model":"Qwen/Qwen2.5-Coder-32B-Instruct","choices":[{"text":".","index":0,"finish_reason":"","logprobs":{"tokens":null,"token_logprobs":null,"top_logprobs":null,"text_offset":null}}],"usage":{"prompt_tokens":309,"completion_tokens":173,"total_tokens":482}}
                # {"id":"0196ebbc6724f04bfb48f92fe32e7da7","object":"text_completion","created":1747711715,"model":"Qwen/Qwen2.5-Coder-32B-Instruct","choices":[{"text":"","index":0,"finish_reason":"stop","logprobs":{"tokens":null,"token_logprobs":null,"top_logprobs":null,"text_offset":null}}],"usage":{"prompt_tokens":309,"completion_tokens":173,"total_tokens":482}}
                # [DONE]

                # 解析
                print(">>>", event.data)
                if event.data != '[DONE]':
                    msg = json.loads(event.data)
                    now_str = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
                    model_id = msg["model"]
                    for choice in msg["choices"]:
                        resp_obj = {
                            "model": model_id,
                            "created_at": now_str,
                            "response": choice["text"],
                            "done": False,
                        }
                        yield json.dumps(resp_obj, ensure_ascii=True) + "\n"
                else:
                    resp_obj = {
                        "model": model_id,
                        "created_at": now_str,
                        "response": "",
                        "done": True,
                    }
                    yield json.dumps(resp_obj, ensure_ascii=True)

        response = Response(stream_with_context(generate()), 200, resp_headers)
        return response
    else:
        complete_content = code_completion_from_siliconflow(model_id, prompt, "")
        print(">>>", complete_content)
        now_str = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
        resp_content = json.dumps(
            {
                "model": model_id,
                "created_at": now_str,
                "response": complete_content,
                "done": True,
            }
        )

    response = Response(resp_content, 200, resp_headers)
    return response


# 代码补全
@app.route("/api/show", methods=["POST"])
def api_show():
    resp = {
        "license": "",
        "modelfile": '# Modelfile generated by "ollama show"\n# To build a new Modelfile based on this, replace FROM with:\n# FROM llama3.2:latest\n\nFROM /var/lib/ollama/blobs/sha256-dde5aa3fc5ffc17176b5e8bdc82f587b24b2678c6c66101bf7da77af9f7ccdff\nTEMPLATE """<|start_header_id|>system<|end_header_id|>\n\nCutting Knowledge Date: December 2023\n\n{{ if .System }}{{ .System }}\n{{- end }}\n{{- if .Tools }}When you receive a tool call response, use the output to format an answer to the orginal user question.\n\nYou are a helpful assistant with tool calling capabilities.\n{{- end }}<|eot_id|>\n{{- range $i, $_ := .Messages }}\n{{- $last := eq (len (slice $.Messages $i)) 1 }}\n{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>\n{{- if and $.Tools $last }}\n\nGiven the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.\n\nRespond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.\n\n{{ range $.Tools }}\n{{- . }}\n{{ end }}\n{{ .Content }}<|eot_id|>\n{{- else }}\n\n{{ .Content }}<|eot_id|>\n{{- end }}{{ if $last }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ end }}\n{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>\n{{- if .ToolCalls }}\n{{ range .ToolCalls }}\n{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}\n{{- else }}\n\n{{ .Content }}\n{{- end }}{{ if not $last }}<|eot_id|>{{ end }}\n{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>\n\n{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ end }}\n{{- end }}\n{{- end }}"""\nPARAMETER stop <|start_header_id|>\nPARAMETER stop <|end_header_id|>\nPARAMETER stop <|eot_id|>',
        "details": {
            "parent_model": "",
            "format": "gguf",
            "family": "llama",
            "families": ["llama"],
            "parameter_size": "3.2B",
            "quantization_level": "Q4_K_M",
        },
        "model_info": {
            "general.architecture": "llama",
            "general.basename": "Llama-3.2",
            "general.file_type": 15,
            "general.finetune": "Instruct",
            "general.languages": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
            "general.parameter_count": 3212749888,
            "general.quantization_version": 2,
            "general.size_label": "3B",
            "general.tags": [
                "facebook",
                "meta",
                "pytorch",
                "llama",
                "llama-3",
                "text-generation",
            ],
            "general.type": "model",
            "llama.attention.head_count": 24,
            "llama.attention.head_count_kv": 8,
            "llama.attention.key_length": 128,
            "llama.attention.layer_norm_rms_epsilon": 0.00001,
            "llama.attention.value_length": 128,
            "llama.block_count": 28,
            "llama.context_length": 131072,
            "llama.embedding_length": 3072,
            "llama.feed_forward_length": 8192,
            "llama.rope.dimension_count": 128,
            "llama.rope.freq_base": 500000,
            "llama.vocab_size": 128256,
            "tokenizer.ggml.bos_token_id": 128000,
            "tokenizer.ggml.eos_token_id": 128009,
            "tokenizer.ggml.merges": None,
            "tokenizer.ggml.model": "gpt2",
            "tokenizer.ggml.pre": "llama-bpe",
            "tokenizer.ggml.token_type": None,
            "tokenizer.ggml.tokens": None,
        },
        "capabilities": ["completion", "tools"],
        "modified_at": "2025-03-25T00:33:16.62503115+08:00",
    }
    resp_headers = {
        "Access-Control-Allow-Origin": "*",
        "Access-Control-Allow-Methods": "DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT",
        "Allow": "OPTIONS, POST, HEAD, DELETE, GET, PUT",
        "vary": "Origin",
    }
    response = Response(json.dumps(resp), 200, resp_headers)
    return response


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=11435)
