mabunday commited on
Commit
822f2f3
1 Parent(s): 0d15f3e

Update quantization methods for various models

Browse files
Files changed (1) hide show
  1. modal.json +36 -4
modal.json CHANGED
@@ -1,6 +1,8 @@
1
  {
2
- "inference": {
3
- "model": [
 
 
4
  {
5
  "id": "amazon/FalconLite",
6
  "instanceType": "ml.g5.12xlarge",
@@ -67,6 +69,13 @@
67
  "numGpu": 4,
68
  "containerStartupHealthCheckTimeout": 300
69
  },
 
 
 
 
 
 
 
70
  {
71
  "id": "ehartford/WizardLM-Uncensored-Falcon-40b",
72
  "instanceType": "ml.g5.12xlarge",
@@ -983,7 +992,29 @@
983
  "id": "amazon/FalconLite2",
984
  "instanceType": "ml.p5.48xlarge",
985
  "numGpu": 8,
986
- "containerStartupHealthCheckTimeout": 2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
987
  },
988
  {
989
  "id": "llm-agents/tora-70b-v1.0",
@@ -1986,5 +2017,6 @@
1986
  }
1987
  ]
1988
  },
1989
- "training": {}
 
1990
  }
 
1
  {
2
+ "inference":
3
+ {
4
+ "model":
5
+ [
6
  {
7
  "id": "amazon/FalconLite",
8
  "instanceType": "ml.g5.12xlarge",
 
69
  "numGpu": 4,
70
  "containerStartupHealthCheckTimeout": 300
71
  },
72
+ {
73
+ "id": "TheBloke/WizardLM-7B-uncensored-GPTQ",
74
+ "instanceType": "ml.g5.2xlarge",
75
+ "numGpu": 1,
76
+ "containerStartupHealthCheckTimeout": 300,
77
+ "quantizationMethod": "gptq"
78
+ },
79
  {
80
  "id": "ehartford/WizardLM-Uncensored-Falcon-40b",
81
  "instanceType": "ml.g5.12xlarge",
 
992
  "id": "amazon/FalconLite2",
993
  "instanceType": "ml.p5.48xlarge",
994
  "numGpu": 8,
995
+ "containerStartupHealthCheckTimeout": 2100,
996
+ "quantizationMethod": "gptq"
997
+ },
998
+ {
999
+ "id": "TheBloke/Llama-2-7B-GPTQ",
1000
+ "instanceType": "ml.g5.2xlarge",
1001
+ "numGpu": 1,
1002
+ "containerStartupHealthCheckTimeout": 300,
1003
+ "quantizationMethod": "gptq"
1004
+ },
1005
+ {
1006
+ "id": "TheBloke/Wizard-Vicuna-13B-Uncensored-SuperHOT-8K-GPTQ",
1007
+ "instanceType": "ml.g5.2xlarge",
1008
+ "numGpu": 1,
1009
+ "containerStartupHealthCheckTimeout": 300,
1010
+ "quantizationMethod": "gptq"
1011
+ },
1012
+ {
1013
+ "id": "TheBloke/Llama-2-13B-GPTQ",
1014
+ "instanceType": "ml.g5.2xlarge",
1015
+ "numGpu": 1,
1016
+ "containerStartupHealthCheckTimeout": 300,
1017
+ "quantizationMethod": "gptq"
1018
  },
1019
  {
1020
  "id": "llm-agents/tora-70b-v1.0",
 
2017
  }
2018
  ]
2019
  },
2020
+ "training":
2021
+ {}
2022
  }