import * as dotenv from 'dotenv';
import 'isomorphic-fetch';
import type { ChatGPTAPIOptions, ChatMessage, SendMessageOptions } from 'chatgpt';
import { ChatGPTAPI, ChatGPTUnofficialProxyAPI } from 'chatgpt';
import { SocksProxyAgent } from 'socks-proxy-agent';
import fetch from 'node-fetch';
import { sendResponse } from '../utils';
import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types';

const ErrorCodeMessage: Record<string, string> = {
  401: '[OpenAI] 提供错误的API密钥 | Incorrect API key provided',
	403: '[OpenAI] 服务器拒绝访问，请稍后再试 | Server refused to access, please try again later',
	502: '[OpenAI] 错误的网关 | Bad Gateway',
	503: '[OpenAI] 服务器繁忙，请稍后再试 | Server is busy, please try again later',
	504: '[OpenAI] 网关超时 | Gateway Time-out',
	500: '[OpenAI] 服务器繁忙，请稍后再试 | Internal Server Error',
};

dotenv.config();

const timeoutMs: number = !isNaN(+process.env.TIMEOUT_MS) ? +process.env.TIMEOUT_MS : 30 * 1000;

let apiModel: ApiModel;
let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI;

if (!process.env.OPENAI_API_KEY && !process.env.OPENAI_ACCESS_TOKEN) {
	throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable');
}

(async () => {
	if (process.env.OPENAI_API_KEY) {
		const OPENAI_API_MODEL = process.env.OPENAI_API_MODEL || 'gpt-3.5-turbo';

		const options: ChatGPTAPIOptions = {
			apiKey: process.env.OPENAI_API_KEY,
			completionParams: { model: OPENAI_API_MODEL },
			debug: false,
		};

		if (process.env.OPENAI_API_BASE_URL) {
			options.apiBaseUrl = process.env.OPENAI_API_BASE_URL.trim();
		}

		if (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) {
			const agent = new SocksProxyAgent({
				hostname: process.env.SOCKS_PROXY_HOST,
				port: process.env.SOCKS_PROXY_PORT,
			});
			options.fetch = (url, options) => {
				return fetch(url, { agent, ...options });
			};
		}

		api = new ChatGPTAPI(options);
		apiModel = 'ChatGPTAPI';
	} else {
		const options: ChatGPTUnofficialProxyAPIOptions = {
			accessToken: process.env.OPENAI_ACCESS_TOKEN,
			debug: false,
		};

		if (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT) {
			const agent = new SocksProxyAgent({
				hostname: process.env.SOCKS_PROXY_HOST,
				port: process.env.SOCKS_PROXY_PORT,
			});
			options.fetch = (url, options) => {
				return fetch(url, { agent, ...options });
			};
		}

		if (process.env.API_REVERSE_PROXY) {
			options.apiReverseProxyUrl = process.env.API_REVERSE_PROXY;
		}

		api = new ChatGPTUnofficialProxyAPI(options);
		apiModel = 'ChatGPTUnofficialProxyAPI';
	}
})();

async function chatReplyProcess(
	message: string,
	lastContext?: { conversationId?: string; parentMessageId?: string },
	process?: (chat: ChatMessage) => void,
) {
	try {
		let options: SendMessageOptions = { timeoutMs };

		if (lastContext) {
			options = apiModel === 'ChatGPTAPI'
				? { parentMessageId: lastContext.parentMessageId }
				: { ...lastContext };
		}

		const response = await api.sendMessage(message, {
			...options,
			onProgress: (partialResponse) => {
				process?.(partialResponse);
			},
		});

		return sendResponse({ type: 'Success', data: response });
	} catch (error: any) {
		const code = error.statusCode;
		console.log(error);
		if (ErrorCodeMessage[code]) {
			return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] });
		}
		return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' });
	}
}

async function chatConfig() {
	return sendResponse({
		type: 'Success',
		data: {
			apiModel,
			reverseProxy: process.env.API_REVERSE_PROXY,
			timeoutMs,
			socksProxy: process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT
				? `${process.env.SOCKS_PROXY_HOST}:${process.env.SOCKS_PROXY_PORT}`
				: '-',
		} as ModelConfig,
	});
}

export type { ChatContext, ChatMessage };

export { chatReplyProcess, chatConfig };
