File size: 1,723 Bytes
b585c7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
{{- if .Values.h2ogpt.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "h2ogpt.fullname" . }}-web
namespace: {{ .Release.Namespace }}
{{- with .Values.h2ogpt.service.webServiceAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
app: {{ include "h2ogpt.fullname" . }}
ports:
- protocol: TCP
port: {{ .Values.h2ogpt.service.webPort }}
targetPort: 7860
type: {{ .Values.h2ogpt.service.type }}
{{- end }}
---
{{- if .Values.h2ogpt.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "h2ogpt.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
selector:
app: {{ include "h2ogpt.fullname" . }}
ports:
- protocol: TCP
port: {{ .Values.h2ogpt.service.gptPort }}
targetPort: 8888
type: {{ .Values.h2ogpt.service.type }}
{{- end }}
---
{{- if and (.Values.tgi.enabled) (not .Values.h2ogpt.stack.enabled ) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "h2ogpt.fullname" . }}-tgi-inference
namespace: {{ .Release.Namespace }}
spec:
selector:
app: {{ include "h2ogpt.fullname" . }}-tgi-inference
ports:
- protocol: TCP
port: {{ .Values.tgi.service.port }}
targetPort: 80
type: {{ .Values.tgi.service.type }}
{{- end }}
---
{{- if and (.Values.vllm.enabled) (not .Values.h2ogpt.stack.enabled ) }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "h2ogpt.fullname" . }}-vllm-inference
namespace: {{ .Release.Namespace }}
spec:
selector:
app: {{ include "h2ogpt.fullname" . }}-vllm-inference
ports:
- protocol: TCP
port: {{ .Values.vllm.service.port }}
targetPort: 5000
type: {{ .Values.vllm.service.type }}
{{- end }}
|