ngoctuanai commited on
Commit
076ace8
1 Parent(s): 39d0384

Upload 135 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
.commitlintrc.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "extends": ["@commitlint/config-conventional"]
3
+ }
.dockerignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ **/node_modules
2
+ */node_modules
3
+ node_modules
4
+ Dockerfile
5
+ .*
6
+ */.*
7
+ !.env
.editorconfig ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Editor configuration, see http://editorconfig.org
2
+
3
+ root = true
4
+
5
+ [*]
6
+ charset = utf-8
7
+ indent_style = tab
8
+ indent_size = 2
9
+ end_of_line = lf
10
+ trim_trailing_whitespace = true
11
+ insert_final_newline = true
.env ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Glob API URL
2
+ VITE_GLOB_API_URL=/api
3
+
4
+ VITE_APP_API_BASE_URL=http://127.0.0.1:3002/
5
+
6
+ # Whether long replies are supported, which may result in higher API fees
7
+ VITE_GLOB_OPEN_LONG_REPLY=false
8
+
9
+ # When you want to use PWA
10
+ VITE_GLOB_APP_PWA=false
.eslintignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ docker-compose
2
+ kubernetes
.eslintrc.cjs ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ module.exports = {
2
+ root: true,
3
+ extends: ['@antfu'],
4
+ }
.gitattributes ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "*.vue" eol=lf
2
+ "*.js" eol=lf
3
+ "*.ts" eol=lf
4
+ "*.jsx" eol=lf
5
+ "*.tsx" eol=lf
6
+ "*.cjs" eol=lf
7
+ "*.cts" eol=lf
8
+ "*.mjs" eol=lf
9
+ "*.mts" eol=lf
10
+ "*.json" eol=lf
11
+ "*.html" eol=lf
12
+ "*.css" eol=lf
13
+ "*.less" eol=lf
14
+ "*.scss" eol=lf
15
+ "*.sass" eol=lf
16
+ "*.styl" eol=lf
17
+ "*.md" eol=lf
.gitignore ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ .DS_Store
12
+ dist
13
+ dist-ssr
14
+ coverage
15
+ *.local
16
+
17
+ /cypress/videos/
18
+ /cypress/screenshots/
19
+
20
+ # Editor directories and files
21
+ .vscode/*
22
+ !.vscode/settings.json
23
+ !.vscode/extensions.json
24
+ .idea
25
+ *.suo
26
+ *.ntvs*
27
+ *.njsproj
28
+ *.sln
29
+ *.sw?
30
+
31
+ # Environment variables files
32
+ /service/.env
.npmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ strict-peer-dependencies=false
Dockerfile ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # build front-end
2
+ FROM node:lts-alpine AS frontend
3
+
4
+ RUN npm install pnpm -g
5
+
6
+ WORKDIR /app
7
+
8
+ COPY ./package.json /app
9
+
10
+ COPY ./pnpm-lock.yaml /app
11
+
12
+ RUN pnpm install
13
+
14
+ COPY . /app
15
+
16
+ RUN pnpm run build
17
+
18
+ # build backend
19
+ FROM node:lts-alpine as backend
20
+
21
+ RUN npm install pnpm -g
22
+
23
+ WORKDIR /app
24
+
25
+ COPY /service/package.json /app
26
+
27
+ COPY /service/pnpm-lock.yaml /app
28
+
29
+ RUN pnpm install
30
+
31
+ COPY /service /app
32
+
33
+ RUN pnpm build
34
+
35
+ # service
36
+ FROM node:lts-alpine
37
+
38
+ RUN npm install pnpm -g
39
+
40
+ WORKDIR /app
41
+
42
+ COPY /service/package.json /app
43
+
44
+ COPY /service/pnpm-lock.yaml /app
45
+
46
+ RUN pnpm install --production && rm -rf /root/.npm /root/.pnpm-store /usr/local/share/.cache /tmp/*
47
+
48
+ COPY /service /app
49
+
50
+ COPY --from=frontend /app/dist /app/public
51
+
52
+ COPY --from=backend /app/build /app/build
53
+
54
+ EXPOSE 3002
55
+
56
+ CMD ["pnpm", "run", "prod"]
config/index.ts ADDED
@@ -0,0 +1 @@
 
 
1
+ export * from './proxy'
config/proxy.ts ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { ProxyOptions } from 'vite'
2
+
3
+ export function createViteProxy(isOpenProxy: boolean, viteEnv: ImportMetaEnv) {
4
+ if (!isOpenProxy)
5
+ return
6
+
7
+ const proxy: Record<string, string | ProxyOptions> = {
8
+ '/api': {
9
+ target: viteEnv.VITE_APP_API_BASE_URL,
10
+ changeOrigin: true,
11
+ rewrite: path => path.replace('/api/', '/'),
12
+ },
13
+ }
14
+
15
+ return proxy
16
+ }
docker-compose/docker-compose.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3'
2
+
3
+ services:
4
+ app:
5
+ container_name: chatgpt-web
6
+ image: chenzhaoyu94/chatgpt-web # 总是使用latest,更新时重新pull该tag镜像即可
7
+ ports:
8
+ - 3002:3002
9
+ environment:
10
+ # 二选一
11
+ OPENAI_API_KEY:
12
+ # 二选一
13
+ OPENAI_ACCESS_TOKEN:
14
+ # API接口地址,可选,设置 OPENAI_API_KEY 时可用
15
+ OPENAI_API_BASE_URL:
16
+ # API模型,可选,设置 OPENAI_API_KEY 时可用
17
+ OPENAI_API_MODEL:
18
+ # 反向代理,可选
19
+ API_REVERSE_PROXY:
20
+ # 访问权限密钥,可选
21
+ AUTH_SECRET_KEY:
22
+ # 每小时最大请求次数,可选,默认无限
23
+ MAX_REQUEST_PER_HOUR: 0
24
+ # 超时,单位毫秒,可选
25
+ TIMEOUT_MS: 60000
26
+ # Socks代理,可选,和 SOCKS_PROXY_PORT 一起时生效
27
+ SOCKS_PROXY_HOST:
28
+ # Socks代理端口,可选,和 SOCKS_PROXY_HOST 一起时生效
29
+ SOCKS_PROXY_PORT:
30
+ # Socks代理用户名,可选,和 SOCKS_PROXY_HOST & SOCKS_PROXY_PORT 一起时生效
31
+ SOCKS_PROXY_USERNAME:
32
+ # Socks代理密码,可选,和 SOCKS_PROXY_HOST & SOCKS_PROXY_PORT 一起时生效
33
+ SOCKS_PROXY_PASSWORD:
34
+ # HTTPS_PROXY 代理,可选
35
+ HTTPS_PROXY:
36
+ nginx:
37
+ container_name: nginx
38
+ image: nginx:alpine
39
+ ports:
40
+ - '80:80'
41
+ expose:
42
+ - '80'
43
+ volumes:
44
+ - ./nginx/html:/usr/share/nginx/html
45
+ - ./nginx/nginx.conf:/etc/nginx/conf.d/default.conf
46
+ links:
47
+ - app
docker-compose/nginx/nginx.conf ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ server {
2
+ listen 80;
3
+ server_name localhost;
4
+ charset utf-8;
5
+ error_page 500 502 503 504 /50x.html;
6
+
7
+ # 防止爬虫抓取
8
+ if ($http_user_agent ~* "360Spider|JikeSpider|Spider|spider|bot|Bot|2345Explorer|curl|wget|webZIP|qihoobot|Baiduspider|Googlebot|Googlebot-Mobile|Googlebot-Image|Mediapartners-Google|Adsbot-Google|Feedfetcher-Google|Yahoo! Slurp|Yahoo! Slurp China|YoudaoBot|Sosospider|Sogou spider|Sogou web spider|MSNBot|ia_archiver|Tomato Bot|NSPlayer|bingbot")
9
+ {
10
+ return 403;
11
+ }
12
+
13
+ location / {
14
+ root /usr/share/nginx/html;
15
+ try_files $uri /index.html;
16
+ }
17
+
18
+ location /api {
19
+ proxy_set_header X-Real-IP $remote_addr; #转发用户IP
20
+ proxy_pass http://app:3002;
21
+ }
22
+
23
+ proxy_set_header Host $host;
24
+ proxy_set_header X-Real-IP $remote_addr;
25
+ proxy_set_header REMOTE-HOST $remote_addr;
26
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
27
+ }
docker-compose/readme.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### docker-compose 部署教程
2
+ - 将打包好的前端文件放到 `nginx/html` 目录下
3
+ - ```shell
4
+ # 启动
5
+ docker-compose up -d
6
+ ```
7
+ - ```shell
8
+ # 查看运行状态
9
+ docker ps
10
+ ```
11
+ - ```shell
12
+ # 结束运行
13
+ docker-compose down
14
+ ```
index.html ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <link rel="icon" type="image/svg+xml" href="https://pnghive.com/core/images/full/chat-gpt-logo-png-1680406057.png">
6
+ <meta content="yes" name="apple-mobile-web-app-capable"/>
7
+ <link rel="apple-touch-icon" href="https://pnghive.com/core/images/full/chat-gpt-logo-png-1680406057.png">
8
+ <meta name="viewport"
9
+ content="width=device-width, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0, viewport-fit=cover" />
10
+ <title>ChatGPT</title>
11
+ </head>
12
+
13
+ <body class="dark:bg-black">
14
+ <div id="app">
15
+ <style>
16
+ .loading-wrap {
17
+ display: flex;
18
+ justify-content: center;
19
+ align-items: center;
20
+ height: 100vh;
21
+ }
22
+
23
+ .balls {
24
+ width: 4em;
25
+ display: flex;
26
+ flex-flow: row nowrap;
27
+ align-items: center;
28
+ justify-content: space-between;
29
+ }
30
+
31
+ .balls div {
32
+ width: 0.8em;
33
+ height: 0.8em;
34
+ border-radius: 50%;
35
+ background-color: #4b9e5f;
36
+ }
37
+
38
+ .balls div:nth-of-type(1) {
39
+ transform: translateX(-100%);
40
+ animation: left-swing 0.5s ease-in alternate infinite;
41
+ }
42
+
43
+ .balls div:nth-of-type(3) {
44
+ transform: translateX(-95%);
45
+ animation: right-swing 0.5s ease-out alternate infinite;
46
+ }
47
+
48
+ @keyframes left-swing {
49
+
50
+ 50%,
51
+ 100% {
52
+ transform: translateX(95%);
53
+ }
54
+ }
55
+
56
+ @keyframes right-swing {
57
+ 50% {
58
+ transform: translateX(-95%);
59
+ }
60
+
61
+ 100% {
62
+ transform: translateX(100%);
63
+ }
64
+ }
65
+
66
+ @media (prefers-color-scheme: dark) {
67
+ body {
68
+ background: #121212;
69
+ }
70
+ }
71
+ </style>
72
+ <div class="loading-wrap">
73
+ <div class="balls">
74
+ <div></div>
75
+ <div></div>
76
+ <div></div>
77
+ </div>
78
+ </div>
79
+ </div>
80
+ <script type="module" src="/src/main.ts"></script>
81
+ </body>
82
+
83
+ </html>
kubernetes/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ## 增加一个Kubernetes的部署方式
2
+ ```
3
+ kubectl apply -f deploy.yaml
4
+ ```
5
+
6
+ ### 如果需要Ingress域名接入
7
+ ```
8
+ kubectl apply -f ingress.yaml
9
+ ```
kubernetes/deploy.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: apps/v1
2
+ kind: Deployment
3
+ metadata:
4
+ name: chatgpt-web
5
+ labels:
6
+ app: chatgpt-web
7
+ spec:
8
+ replicas: 1
9
+ selector:
10
+ matchLabels:
11
+ app: chatgpt-web
12
+ strategy:
13
+ type: RollingUpdate
14
+ template:
15
+ metadata:
16
+ labels:
17
+ app: chatgpt-web
18
+ spec:
19
+ containers:
20
+ - image: chenzhaoyu94/chatgpt-web
21
+ name: chatgpt-web
22
+ imagePullPolicy: Always
23
+ ports:
24
+ - containerPort: 3002
25
+ env:
26
+ - name: OPENAI_API_KEY
27
+ value: sk-xxx
28
+ - name: OPENAI_API_BASE_URL
29
+ value: 'https://api.openai.com'
30
+ - name: OPENAI_API_MODEL
31
+ value: gpt-3.5-turbo
32
+ - name: API_REVERSE_PROXY
33
+ value: https://ai.fakeopen.com/api/conversation
34
+ - name: AUTH_SECRET_KEY
35
+ value: '123456'
36
+ - name: TIMEOUT_MS
37
+ value: '60000'
38
+ - name: SOCKS_PROXY_HOST
39
+ value: ''
40
+ - name: SOCKS_PROXY_PORT
41
+ value: ''
42
+ - name: HTTPS_PROXY
43
+ value: ''
44
+ resources:
45
+ limits:
46
+ cpu: 500m
47
+ memory: 500Mi
48
+ requests:
49
+ cpu: 300m
50
+ memory: 300Mi
51
+ ---
52
+ apiVersion: v1
53
+ kind: Service
54
+ metadata:
55
+ labels:
56
+ app: chatgpt-web
57
+ name: chatgpt-web
58
+ spec:
59
+ ports:
60
+ - name: chatgpt-web
61
+ port: 3002
62
+ protocol: TCP
63
+ targetPort: 3002
64
+ selector:
65
+ app: chatgpt-web
66
+ type: ClusterIP
kubernetes/ingress.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: networking.k8s.io/v1
2
+ kind: Ingress
3
+ metadata:
4
+ annotations:
5
+ kubernetes.io/ingress.class: nginx
6
+ nginx.ingress.kubernetes.io/proxy-connect-timeout: '5'
7
+ name: chatgpt-web
8
+ spec:
9
+ rules:
10
+ - host: chatgpt.example.com
11
+ http:
12
+ paths:
13
+ - backend:
14
+ service:
15
+ name: chatgpt-web
16
+ port:
17
+ number: 3002
18
+ path: /
19
+ pathType: ImplementationSpecific
20
+ tls:
21
+ - secretName: chatgpt-web-tls
license ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ Copyright (c) 2023 chokiproai
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9
+ of the Software, and to permit persons to whom the Software is furnished to do
10
+ so, subject to the following conditions:
11
+
12
+ ## Conditions
13
+
14
+ The above copyright notice and this permission notice (including the next
15
+ paragraph) shall be included in all copies or substantial portions of the
16
+ Software.
17
+
18
+ ## Disclaimer
19
+
20
+ **THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
22
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
23
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
24
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.**
package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
package.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "chatgpt-web",
3
+ "version": "2.11.1",
4
+ "private": false,
5
+ "description": "ChatGPT Web",
6
+ "author": "ChenZhaoYu <chenzhaoyu1994@gmail.com>",
7
+ "keywords": [
8
+ "chatgpt-web",
9
+ "chatgpt",
10
+ "chatbot",
11
+ "vue"
12
+ ],
13
+ "scripts": {
14
+ "dev": "vite",
15
+ "build": "run-p type-check build-only",
16
+ "preview": "vite preview",
17
+ "build-only": "vite build",
18
+ "type-check": "vue-tsc --noEmit",
19
+ "lint": "eslint .",
20
+ "lint:fix": "eslint . --fix",
21
+ "bootstrap": "pnpm install && pnpm run common:prepare",
22
+ "common:cleanup": "rimraf node_modules && rimraf pnpm-lock.yaml",
23
+ "common:prepare": "husky install"
24
+ },
25
+ "dependencies": {
26
+ "@traptitech/markdown-it-katex": "^3.6.0",
27
+ "@vueuse/core": "^9.13.0",
28
+ "highlight.js": "^11.7.0",
29
+ "html2canvas": "^1.4.1",
30
+ "katex": "^0.16.4",
31
+ "markdown-it": "^13.0.1",
32
+ "naive-ui": "^2.34.3",
33
+ "pinia": "^2.0.33",
34
+ "vue": "^3.2.47",
35
+ "vue-i18n": "^9.2.2",
36
+ "vue-router": "^4.1.6"
37
+ },
38
+ "devDependencies": {
39
+ "@antfu/eslint-config": "^0.35.3",
40
+ "@commitlint/cli": "^17.4.4",
41
+ "@commitlint/config-conventional": "^17.4.4",
42
+ "@iconify/vue": "^4.1.0",
43
+ "@types/crypto-js": "^4.1.1",
44
+ "@types/katex": "^0.16.0",
45
+ "@types/markdown-it": "^12.2.3",
46
+ "@types/markdown-it-link-attributes": "^3.0.1",
47
+ "@types/node": "^18.14.6",
48
+ "@vitejs/plugin-vue": "^4.0.0",
49
+ "autoprefixer": "^10.4.13",
50
+ "axios": "^1.3.4",
51
+ "crypto-js": "^4.1.1",
52
+ "eslint": "^8.35.0",
53
+ "husky": "^8.0.3",
54
+ "less": "^4.1.3",
55
+ "lint-staged": "^13.1.2",
56
+ "markdown-it-link-attributes": "^4.0.1",
57
+ "npm-run-all": "^4.1.5",
58
+ "postcss": "^8.4.21",
59
+ "rimraf": "^4.2.0",
60
+ "tailwindcss": "^3.2.7",
61
+ "typescript": "~4.9.5",
62
+ "vite": "^4.2.0",
63
+ "vite-plugin-pwa": "^0.14.4",
64
+ "vue-tsc": "^1.2.0"
65
+ },
66
+ "lint-staged": {
67
+ "*.{ts,tsx,vue}": [
68
+ "pnpm lint:fix"
69
+ ]
70
+ }
71
+ }
pnpm-lock.yaml ADDED
The diff for this file is too large to render. See raw diff
 
postcss.config.js ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ module.exports = {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
+ }
public/favicon.ico ADDED
public/favicon.svg ADDED
public/pwa-192x192.png ADDED
public/pwa-512x512.png ADDED
service/.env.example ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenAI API Key - https://platform.openai.com/overview
2
+ OPENAI_API_KEY=
3
+
4
+ # change this to an `accessToken` extracted from the ChatGPT site's `https://chat.openai.com/api/auth/session` response
5
+ OPENAI_ACCESS_TOKEN=
6
+
7
+ # OpenAI API Base URL - https://api.openai.com
8
+ OPENAI_API_BASE_URL=
9
+
10
+ # OpenAI API Model - https://platform.openai.com/docs/models
11
+ OPENAI_API_MODEL=
12
+
13
+ # set `true` to disable OpenAI API debug log
14
+ OPENAI_API_DISABLE_DEBUG=
15
+
16
+ # Reverse Proxy - Available on accessToken
17
+ # Default: https://ai.fakeopen.com/api/conversation
18
+ # More: https://github.com/transitive-bullshit/chatgpt-api#reverse-proxy
19
+ API_REVERSE_PROXY=
20
+
21
+ # timeout
22
+ TIMEOUT_MS=100000
23
+
24
+ # Rate Limit
25
+ MAX_REQUEST_PER_HOUR=
26
+
27
+ # Secret key
28
+ AUTH_SECRET_KEY=
29
+
30
+ # Socks Proxy Host
31
+ SOCKS_PROXY_HOST=
32
+
33
+ # Socks Proxy Port
34
+ SOCKS_PROXY_PORT=
35
+
36
+ # Socks Proxy Username
37
+ SOCKS_PROXY_USERNAME=
38
+
39
+ # Socks Proxy Password
40
+ SOCKS_PROXY_PASSWORD=
41
+
42
+ # HTTPS PROXY
43
+ HTTPS_PROXY=
44
+
service/.eslintrc.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "root": true,
3
+ "ignorePatterns": ["build"],
4
+ "extends": ["@antfu"]
5
+ }
service/.gitignore ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ .DS_Store
12
+ dist
13
+ dist-ssr
14
+ coverage
15
+ *.local
16
+
17
+ /cypress/videos/
18
+ /cypress/screenshots/
19
+
20
+ # Editor directories and files
21
+ .vscode/*
22
+ !.vscode/settings.json
23
+ !.vscode/extensions.json
24
+ .idea
25
+ *.suo
26
+ *.ntvs*
27
+ *.njsproj
28
+ *.sln
29
+ *.sw?
30
+
31
+ build
service/.npmrc ADDED
@@ -0,0 +1 @@
 
 
1
+ enable-pre-post-scripts=true
service/.vscode/extensions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "recommendations": ["dbaeumer.vscode-eslint"]
3
+ }
service/.vscode/settings.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "prettier.enable": false,
3
+ "editor.formatOnSave": false,
4
+ "editor.codeActionsOnSave": {
5
+ "source.fixAll.eslint": true
6
+ },
7
+ "eslint.validate": [
8
+ "javascript",
9
+ "typescript",
10
+ "json",
11
+ "jsonc",
12
+ "json5",
13
+ "yaml"
14
+ ],
15
+ "cSpell.words": [
16
+ "antfu",
17
+ "chatgpt",
18
+ "esno",
19
+ "GPTAPI",
20
+ "OPENAI"
21
+ ]
22
+ }
service/package.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "chatgpt-web-service",
3
+ "version": "1.0.0",
4
+ "private": false,
5
+ "description": "ChatGPT Web Service",
6
+ "author": "ChenZhaoYu <chenzhaoyu1994@gmail.com>",
7
+ "keywords": [
8
+ "chatgpt-web",
9
+ "chatgpt",
10
+ "chatbot",
11
+ "express"
12
+ ],
13
+ "engines": {
14
+ "node": "^16 || ^18 || ^19"
15
+ },
16
+ "scripts": {
17
+ "start": "esno ./src/index.ts",
18
+ "dev": "esno watch ./src/index.ts",
19
+ "prod": "node ./build/index.mjs",
20
+ "build": "pnpm clean && tsup",
21
+ "clean": "rimraf build",
22
+ "lint": "eslint .",
23
+ "lint:fix": "eslint . --fix",
24
+ "common:cleanup": "rimraf node_modules && rimraf pnpm-lock.yaml"
25
+ },
26
+ "dependencies": {
27
+ "axios": "^1.3.4",
28
+ "chatgpt": "^5.1.2",
29
+ "dotenv": "^16.0.3",
30
+ "esno": "^0.16.3",
31
+ "express": "^4.18.2",
32
+ "express-rate-limit": "^6.7.0",
33
+ "https-proxy-agent": "^5.0.1",
34
+ "isomorphic-fetch": "^3.0.0",
35
+ "node-fetch": "^3.3.0",
36
+ "socks-proxy-agent": "^7.0.0"
37
+ },
38
+ "devDependencies": {
39
+ "@antfu/eslint-config": "^0.35.3",
40
+ "@types/express": "^4.17.17",
41
+ "@types/node": "^18.14.6",
42
+ "eslint": "^8.35.0",
43
+ "rimraf": "^4.3.0",
44
+ "tsup": "^6.6.3",
45
+ "typescript": "^4.9.5"
46
+ }
47
+ }
service/pnpm-lock.yaml ADDED
The diff for this file is too large to render. See raw diff
 
service/src/chatgpt/index.ts ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import * as dotenv from 'dotenv'
2
+ import 'isomorphic-fetch'
3
+ import type { ChatGPTAPIOptions, ChatMessage, SendMessageOptions } from 'chatgpt'
4
+ import { ChatGPTAPI, ChatGPTUnofficialProxyAPI } from 'chatgpt'
5
+ import { SocksProxyAgent } from 'socks-proxy-agent'
6
+ import httpsProxyAgent from 'https-proxy-agent'
7
+ import fetch from 'node-fetch'
8
+ import { sendResponse } from '../utils'
9
+ import { isNotEmptyString } from '../utils/is'
10
+ import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types'
11
+ import type { RequestOptions, SetProxyOptions, UsageResponse } from './types'
12
+
13
+ const { HttpsProxyAgent } = httpsProxyAgent
14
+
15
+ dotenv.config()
16
+
17
+ const ErrorCodeMessage: Record<string, string> = {
18
+ 401: '[OpenAI] Đã cung cấp khóa API không chính xác',
19
+ 403: '[OpenAI] Máy chủ từ chối truy cập, vui lòng thử lại sau',
20
+ 502: '[OpenAI] Cổng xấu',
21
+ 503: '[OpenAI] Máy chủ đang bận, vui lòng thử lại sau',
22
+ 504: '[OpenAI] Hết thời gian yêu cầu',
23
+ 500: '[OpenAI] Lỗi máy chủ nội bộ',
24
+ 429: '[OpenAI] Máy chủ quá tải',
25
+ }
26
+
27
+ const timeoutMs: number = !isNaN(+process.env.TIMEOUT_MS) ? +process.env.TIMEOUT_MS : 100 * 1000
28
+ const disableDebug: boolean = process.env.OPENAI_API_DISABLE_DEBUG === 'true'
29
+
30
+ let apiModel: ApiModel
31
+ const model = isNotEmptyString(process.env.OPENAI_API_MODEL) ? process.env.OPENAI_API_MODEL : 'gpt-3.5-turbo'
32
+
33
+ if (!isNotEmptyString(process.env.OPENAI_API_KEY) && !isNotEmptyString(process.env.OPENAI_ACCESS_TOKEN))
34
+ throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable')
35
+
36
+ let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI
37
+
38
+ (async () => {
39
+ // More Info: https://github.com/transitive-bullshit/chatgpt-api
40
+
41
+ if (isNotEmptyString(process.env.OPENAI_API_KEY)) {
42
+ const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
43
+
44
+ let randomApiKey = process.env.OPENAI_API_KEY;
45
+
46
+ if (isNotEmptyString(process.env.OPENAI_API_KEY_ARR)){
47
+ const OPENAI_API_KEY_ARR = JSON.parse(process.env.OPENAI_API_KEY_ARR);
48
+ const randomIndex = Math.floor(Math.random() * OPENAI_API_KEY_ARR.length);
49
+ randomApiKey = OPENAI_API_KEY_ARR[randomIndex];
50
+ }
51
+
52
+ const options: ChatGPTAPIOptions = {
53
+ apiKey: randomApiKey,
54
+ completionParams: { model },
55
+ debug: !disableDebug,
56
+ }
57
+
58
+ // increase max token limit if use gpt-4
59
+ if (model.toLowerCase().includes('gpt-4')) {
60
+ // if use 32k model
61
+ if (model.toLowerCase().includes('32k')) {
62
+ options.maxModelTokens = 32768
63
+ options.maxResponseTokens = 8192
64
+ }
65
+ else {
66
+ options.maxModelTokens = 8192
67
+ options.maxResponseTokens = 2048
68
+ }
69
+ }
70
+
71
+ if (isNotEmptyString(OPENAI_API_BASE_URL))
72
+ options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
73
+
74
+ setupProxy(options)
75
+
76
+ api = new ChatGPTAPI({ ...options })
77
+ apiModel = 'ChatGPTAPI'
78
+ }
79
+ else {
80
+ console.log('OPENAI_ACCESS_TOKEN',OPENAI_ACCESS_TOKEN);
81
+ const options: ChatGPTUnofficialProxyAPIOptions = {
82
+ accessToken: process.env.OPENAI_ACCESS_TOKEN,
83
+ apiReverseProxyUrl: isNotEmptyString(process.env.API_REVERSE_PROXY) ? process.env.API_REVERSE_PROXY : 'https://ai.fakeopen.com/api/conversation',
84
+ model,
85
+ debug: !disableDebug,
86
+ }
87
+
88
+ setupProxy(options)
89
+
90
+ api = new ChatGPTUnofficialProxyAPI({ ...options })
91
+ apiModel = 'ChatGPTUnofficialProxyAPI'
92
+ }
93
+ })()
94
+
95
+ async function chatReplyProcess(options: RequestOptions) {
96
+ const { message, lastContext, process, systemMessage, temperature, top_p } = options
97
+ try {
98
+ let options: SendMessageOptions = { timeoutMs }
99
+
100
+ if (apiModel === 'ChatGPTAPI') {
101
+ if (isNotEmptyString(systemMessage))
102
+ options.systemMessage = systemMessage
103
+ options.completionParams = { model, temperature, top_p }
104
+ }
105
+
106
+ if (lastContext != null) {
107
+ if (apiModel === 'ChatGPTAPI')
108
+ options.parentMessageId = lastContext.parentMessageId
109
+ else
110
+ options = { ...lastContext }
111
+ }
112
+
113
+ const response = await api.sendMessage(message, {
114
+ ...options,
115
+ onProgress: (partialResponse) => {
116
+ process?.(partialResponse)
117
+ },
118
+ })
119
+
120
+ return sendResponse({ type: 'Success', data: response })
121
+ }
122
+ catch (error: any) {
123
+ const code = error.statusCode
124
+ global.console.log(error)
125
+ if (Reflect.has(ErrorCodeMessage, code))
126
+ return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] })
127
+ return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' })
128
+ }
129
+ }
130
+
131
+ async function fetchUsage() {
132
+ let OPENAI_API_KEY = process.env.OPENAI_API_KEY
133
+ const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
134
+
135
+ if (isNotEmptyString(process.env.OPENAI_API_KEY_ARR)){
136
+ const OPENAI_API_KEY_ARR = JSON.parse(process.env.OPENAI_API_KEY_ARR);
137
+ const randomIndex = Math.floor(Math.random() * OPENAI_API_KEY_ARR.length);
138
+ OPENAI_API_KEY = OPENAI_API_KEY_ARR[randomIndex];
139
+ }
140
+
141
+ if (!isNotEmptyString(OPENAI_API_KEY))
142
+ return Promise.resolve('-')
143
+
144
+ const API_BASE_URL = isNotEmptyString(OPENAI_API_BASE_URL)
145
+ ? OPENAI_API_BASE_URL
146
+ : 'https://api.openai.com'
147
+
148
+ const [startDate, endDate] = formatDate()
149
+
150
+ // 每月使用量
151
+ const urlUsage = `${API_BASE_URL}/v1/dashboard/billing/usage?start_date=${startDate}&end_date=${endDate}`
152
+
153
+ const headers = {
154
+ 'Authorization': `Bearer ${OPENAI_API_KEY}`,
155
+ 'Content-Type': 'application/json',
156
+ }
157
+
158
+ const options = {} as SetProxyOptions
159
+
160
+ setupProxy(options)
161
+
162
+ try {
163
+ // 获取已使用量
164
+ const useResponse = await options.fetch(urlUsage, { headers })
165
+ if (!useResponse.ok)
166
+ throw new Error('获取使用量失败')
167
+ const usageData = await useResponse.json() as UsageResponse
168
+ const usage = Math.round(usageData.total_usage) / 100
169
+ return Promise.resolve(usage ? `$${usage}` : '-')
170
+ }
171
+ catch (error) {
172
+ global.console.log(error)
173
+ return Promise.resolve('-')
174
+ }
175
+ }
176
+
177
+ function formatDate(): string[] {
178
+ const today = new Date()
179
+ const year = today.getFullYear()
180
+ const month = today.getMonth() + 1
181
+ const lastDay = new Date(year, month, 0)
182
+ const formattedFirstDay = `${year}-${month.toString().padStart(2, '0')}-01`
183
+ const formattedLastDay = `${year}-${month.toString().padStart(2, '0')}-${lastDay.getDate().toString().padStart(2, '0')}`
184
+ return [formattedFirstDay, formattedLastDay]
185
+ }
186
+
187
+ async function chatConfig() {
188
+ const usage = await fetchUsage()
189
+ const reverseProxy = process.env.API_REVERSE_PROXY ?? '-'
190
+ const httpsProxy = (process.env.HTTPS_PROXY || process.env.ALL_PROXY) ?? '-'
191
+ const socksProxy = (process.env.SOCKS_PROXY_HOST && process.env.SOCKS_PROXY_PORT)
192
+ ? (`${process.env.SOCKS_PROXY_HOST}:${process.env.SOCKS_PROXY_PORT}`)
193
+ : '-'
194
+ return sendResponse<ModelConfig>({
195
+ type: 'Success',
196
+ data: { apiModel, reverseProxy, timeoutMs, socksProxy, httpsProxy, usage },
197
+ })
198
+ }
199
+
200
+ function setupProxy(options: SetProxyOptions) {
201
+ if (isNotEmptyString(process.env.SOCKS_PROXY_HOST) && isNotEmptyString(process.env.SOCKS_PROXY_PORT)) {
202
+ const agent = new SocksProxyAgent({
203
+ hostname: process.env.SOCKS_PROXY_HOST,
204
+ port: process.env.SOCKS_PROXY_PORT,
205
+ userId: isNotEmptyString(process.env.SOCKS_PROXY_USERNAME) ? process.env.SOCKS_PROXY_USERNAME : undefined,
206
+ password: isNotEmptyString(process.env.SOCKS_PROXY_PASSWORD) ? process.env.SOCKS_PROXY_PASSWORD : undefined,
207
+ })
208
+ options.fetch = (url, options) => {
209
+ return fetch(url, { agent, ...options })
210
+ }
211
+ }
212
+ else if (isNotEmptyString(process.env.HTTPS_PROXY) || isNotEmptyString(process.env.ALL_PROXY)) {
213
+ const httpsProxy = process.env.HTTPS_PROXY || process.env.ALL_PROXY
214
+ if (httpsProxy) {
215
+ const agent = new HttpsProxyAgent(httpsProxy)
216
+ options.fetch = (url, options) => {
217
+ return fetch(url, { agent, ...options })
218
+ }
219
+ }
220
+ }
221
+ else {
222
+ options.fetch = (url, options) => {
223
+ return fetch(url, { ...options })
224
+ }
225
+ }
226
+ }
227
+
228
+ function currentModel(): ApiModel {
229
+ return apiModel
230
+ }
231
+
232
+ export type { ChatContext, ChatMessage }
233
+
234
+ export { chatReplyProcess, chatConfig, currentModel }
service/src/chatgpt/types.ts ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { ChatMessage } from 'chatgpt'
2
+ import type fetch from 'node-fetch'
3
+
4
+ export interface RequestOptions {
5
+ message: string
6
+ lastContext?: { conversationId?: string; parentMessageId?: string }
7
+ process?: (chat: ChatMessage) => void
8
+ systemMessage?: string
9
+ temperature?: number
10
+ top_p?: number
11
+ }
12
+
13
+ export interface SetProxyOptions {
14
+ fetch?: typeof fetch
15
+ }
16
+
17
+ export interface UsageResponse {
18
+ total_usage: number
19
+ }
service/src/index.ts ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import express from 'express'
2
+ import type { RequestProps } from './types'
3
+ import type { ChatMessage } from './chatgpt'
4
+ import { chatConfig, chatReplyProcess, currentModel } from './chatgpt'
5
+ import { auth } from './middleware/auth'
6
+ import { limiter } from './middleware/limiter'
7
+ import { isNotEmptyString } from './utils/is'
8
+
9
+ const app = express()
10
+ const router = express.Router()
11
+
12
+ app.use(express.static('public'))
13
+ app.use(express.json())
14
+
15
+ app.all('*', (_, res, next) => {
16
+ res.header('Access-Control-Allow-Origin', '*')
17
+ res.header('Access-Control-Allow-Headers', 'authorization, Content-Type')
18
+ res.header('Access-Control-Allow-Methods', '*')
19
+ next()
20
+ })
21
+
22
+ router.post('/chat-process', [auth, limiter], async (req, res) => {
23
+ res.setHeader('Content-type', 'application/octet-stream')
24
+
25
+ try {
26
+ const { prompt, options = {}, systemMessage, temperature, top_p } = req.body as RequestProps
27
+ let firstChunk = true
28
+ await chatReplyProcess({
29
+ message: prompt,
30
+ lastContext: options,
31
+ process: (chat: ChatMessage) => {
32
+ res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`)
33
+ firstChunk = false
34
+ },
35
+ systemMessage,
36
+ temperature,
37
+ top_p,
38
+ })
39
+ }
40
+ catch (error) {
41
+ res.write(JSON.stringify(error))
42
+ }
43
+ finally {
44
+ res.end()
45
+ }
46
+ })
47
+
48
+ router.post('/config', auth, async (req, res) => {
49
+ try {
50
+ const response = await chatConfig()
51
+ res.send(response)
52
+ }
53
+ catch (error) {
54
+ res.send(error)
55
+ }
56
+ })
57
+
58
+ router.post('/session', async (req, res) => {
59
+ try {
60
+ const AUTH_SECRET_KEY = process.env.AUTH_SECRET_KEY
61
+ const hasAuth = isNotEmptyString(AUTH_SECRET_KEY)
62
+ res.send({ status: 'Success', message: '', data: { auth: hasAuth, model: currentModel() } })
63
+ }
64
+ catch (error) {
65
+ res.send({ status: 'Fail', message: error.message, data: null })
66
+ }
67
+ })
68
+
69
+ router.post('/verify', async (req, res) => {
70
+ try {
71
+ const { token } = req.body as { token: string }
72
+ if (!token)
73
+ throw new Error('Secret key is empty')
74
+
75
+ if (process.env.AUTH_SECRET_KEY !== token)
76
+ throw new Error('密钥无效 | Secret key is invalid')
77
+
78
+ res.send({ status: 'Success', message: 'Verify successfully', data: null })
79
+ }
80
+ catch (error) {
81
+ res.send({ status: 'Fail', message: error.message, data: null })
82
+ }
83
+ })
84
+
85
+ app.use('', router)
86
+ app.use('/api', router)
87
+ app.set('trust proxy', 1)
88
+
89
+ app.listen(3002, () => globalThis.console.log('Server is running on port 3002'))
service/src/middleware/auth.ts ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { isNotEmptyString } from '../utils/is'
2
+
3
+ const auth = async (req, res, next) => {
4
+ const AUTH_SECRET_KEY = process.env.AUTH_SECRET_KEY
5
+ if (isNotEmptyString(AUTH_SECRET_KEY)) {
6
+ try {
7
+ const Authorization = req.header('Authorization')
8
+ if (!Authorization || Authorization.replace('Bearer ', '').trim() !== AUTH_SECRET_KEY.trim())
9
+ throw new Error('Error: 无访问权限 | No access rights')
10
+ next()
11
+ }
12
+ catch (error) {
13
+ res.send({ status: 'Unauthorized', message: error.message ?? 'Please authenticate.', data: null })
14
+ }
15
+ }
16
+ else {
17
+ next()
18
+ }
19
+ }
20
+
21
+ export { auth }
service/src/middleware/limiter.ts ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { rateLimit } from 'express-rate-limit'
2
+ import { isNotEmptyString } from '../utils/is'
3
+
4
+ const MAX_REQUEST_PER_HOUR = process.env.MAX_REQUEST_PER_HOUR
5
+
6
+ const maxCount = (isNotEmptyString(MAX_REQUEST_PER_HOUR) && !isNaN(Number(MAX_REQUEST_PER_HOUR)))
7
+ ? parseInt(MAX_REQUEST_PER_HOUR)
8
+ : 0 // 0 means unlimited
9
+
10
+ const limiter = rateLimit({
11
+ // windowMs: 60 * 60 * 1000, // Maximum number of accesses within an hour
12
+ max: maxCount,
13
+ statusCode: 200, // 200 means success,but the message is 'Too many request from this IP in 1 hour'
14
+ message: async (req, res) => {
15
+ res.send({ status: 'Fail', message: 'Giới hạn trò chuyện trong 1h.', data: null })
16
+ },
17
+ })
18
+
19
+ export { limiter }
service/src/types.ts ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { FetchFn } from 'chatgpt'
2
+
3
+ export interface RequestProps {
4
+ prompt: string
5
+ options?: ChatContext
6
+ systemMessage: string
7
+ temperature?: number
8
+ top_p?: number
9
+ }
10
+
11
+ export interface ChatContext {
12
+ conversationId?: string
13
+ parentMessageId?: string
14
+ }
15
+
16
+ export interface ChatGPTUnofficialProxyAPIOptions {
17
+ accessToken: string
18
+ apiReverseProxyUrl?: string
19
+ model?: string
20
+ debug?: boolean
21
+ headers?: Record<string, string>
22
+ fetch?: FetchFn
23
+ }
24
+
25
+ export interface ModelConfig {
26
+ apiModel?: ApiModel
27
+ reverseProxy?: string
28
+ timeoutMs?: number
29
+ socksProxy?: string
30
+ httpsProxy?: string
31
+ usage?: string
32
+ }
33
+
34
+ export type ApiModel = 'ChatGPTAPI' | 'ChatGPTUnofficialProxyAPI' | undefined
service/src/utils/index.ts ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ interface SendResponseOptions<T = any> {
2
+ type: 'Success' | 'Fail'
3
+ message?: string
4
+ data?: T
5
+ }
6
+
7
+ export function sendResponse<T>(options: SendResponseOptions<T>) {
8
+ if (options.type === 'Success') {
9
+ return Promise.resolve({
10
+ message: options.message ?? null,
11
+ data: options.data ?? null,
12
+ status: options.type,
13
+ })
14
+ }
15
+
16
+ // eslint-disable-next-line prefer-promise-reject-errors
17
+ return Promise.reject({
18
+ message: options.message ?? 'Failed',
19
+ data: options.data ?? null,
20
+ status: options.type,
21
+ })
22
+ }
service/src/utils/is.ts ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export function isNumber<T extends number>(value: T | unknown): value is number {
2
+ return Object.prototype.toString.call(value) === '[object Number]'
3
+ }
4
+
5
+ export function isString<T extends string>(value: T | unknown): value is string {
6
+ return Object.prototype.toString.call(value) === '[object String]'
7
+ }
8
+
9
+ export function isNotEmptyString(value: any): boolean {
10
+ return typeof value === 'string' && value.length > 0
11
+ }
12
+
13
+ export function isBoolean<T extends boolean>(value: T | unknown): value is boolean {
14
+ return Object.prototype.toString.call(value) === '[object Boolean]'
15
+ }
16
+
17
+ export function isFunction<T extends (...args: any[]) => any | void | never>(value: T | unknown): value is T {
18
+ return Object.prototype.toString.call(value) === '[object Function]'
19
+ }
service/tsconfig.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "target": "es2020",
4
+ "lib": [
5
+ "esnext"
6
+ ],
7
+ "allowJs": true,
8
+ "skipLibCheck": true,
9
+ "strict": false,
10
+ "forceConsistentCasingInFileNames": true,
11
+ "esModuleInterop": true,
12
+ "module": "esnext",
13
+ "moduleResolution": "node",
14
+ "resolveJsonModule": true,
15
+ "isolatedModules": true,
16
+ "baseUrl": ".",
17
+ "outDir": "build",
18
+ "noEmit": true
19
+ },
20
+ "exclude": [
21
+ "node_modules",
22
+ "build"
23
+ ],
24
+ "include": [
25
+ "**/*.ts"
26
+ ]
27
+ }
service/tsup.config.ts ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig } from 'tsup'
2
+
3
+ export default defineConfig({
4
+ entry: ['src/index.ts'],
5
+ outDir: 'build',
6
+ target: 'es2020',
7
+ format: ['esm'],
8
+ splitting: false,
9
+ sourcemap: true,
10
+ minify: false,
11
+ shims: true,
12
+ dts: false,
13
+ })
src/App.vue ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script setup lang="ts">
2
+ import { NConfigProvider } from 'naive-ui'
3
+ import { NaiveProvider } from '@/components/common'
4
+ import { useTheme } from '@/hooks/useTheme'
5
+ import { useLanguage } from '@/hooks/useLanguage'
6
+
7
+ const { theme, themeOverrides } = useTheme()
8
+ const { language } = useLanguage()
9
+ </script>
10
+
11
+ <template>
12
+ <NConfigProvider
13
+ class="h-full"
14
+ :theme="theme"
15
+ :theme-overrides="themeOverrides"
16
+ :locale="language"
17
+ >
18
+ <NaiveProvider>
19
+ <RouterView />
20
+ </NaiveProvider>
21
+ </NConfigProvider>
22
+ </template>
src/api/index.ts ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { AxiosProgressEvent, GenericAbortSignal } from 'axios'
2
+ import { post } from '@/utils/request'
3
+ import { useAuthStore, useSettingStore } from '@/store'
4
+
5
+ export function fetchChatAPI<T = any>(
6
+ prompt: string,
7
+ options?: { conversationId?: string; parentMessageId?: string },
8
+ signal?: GenericAbortSignal,
9
+ ) {
10
+ return post<T>({
11
+ url: '/chat',
12
+ data: { prompt, options },
13
+ signal,
14
+ })
15
+ }
16
+
17
+ export function fetchChatConfig<T = any>() {
18
+ return post<T>({
19
+ url: '/config',
20
+ })
21
+ }
22
+
23
+ export function fetchChatAPIProcess<T = any>(
24
+ params: {
25
+ prompt: string
26
+ options?: { conversationId?: string; parentMessageId?: string }
27
+ signal?: GenericAbortSignal
28
+ onDownloadProgress?: (progressEvent: AxiosProgressEvent) => void },
29
+ ) {
30
+ const settingStore = useSettingStore()
31
+ const authStore = useAuthStore()
32
+
33
+ let data: Record<string, any> = {
34
+ prompt: params.prompt,
35
+ options: params.options,
36
+ }
37
+
38
+ if (authStore.isChatGPTAPI) {
39
+ data = {
40
+ ...data,
41
+ systemMessage: settingStore.systemMessage,
42
+ temperature: settingStore.temperature,
43
+ top_p: settingStore.top_p,
44
+ }
45
+ }
46
+
47
+ return post<T>({
48
+ url: '/chat-process',
49
+ data,
50
+ signal: params.signal,
51
+ onDownloadProgress: params.onDownloadProgress,
52
+ })
53
+ }
54
+
55
+ export function fetchSession<T>() {
56
+ return post<T>({
57
+ url: '/session',
58
+ })
59
+ }
60
+
61
+ export function fetchVerify<T>(token: string) {
62
+ return post<T>({
63
+ url: '/verify',
64
+ data: { token },
65
+ })
66
+ }
src/assets/avatar.jpg ADDED
src/assets/recommend.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "key": "chatgpt-Prompt-vn",
4
+ "desc": "Prompt được thu thập bởi chokipro ai (Ngoctuanai)",
5
+ "downloadUrl": "https://raw.githubusercontent.com/chokiproai/prompt/main/vi.json",
6
+ "url": "https://github.com/chokiproai/prompt/blob/main/vi.json"
7
+ },
8
+ {
9
+ "key": "chatgpt-Prompt-en",
10
+ "desc": "Prompt collected by chokipro ai (Ngoctuanai)",
11
+ "downloadUrl": "https://raw.githubusercontent.com/chokiproai/prompt/main/en.json",
12
+ "url": "https://github.com/chokiproai/prompt/blob/main/en.json"
13
+ }
14
+ ]