File size: 1,082 Bytes
a8f706c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
MODEL_NAME=	open-llama-3b
PYTHON?=	python
LLAMA_TAG=	5c64a09
LLAMA_TAR=	master-$(LLAMA_TAG).tar.gz
HF_REPO=	openlm-research/open_llama_3b
HF_REF=		main
HF_FILES=	pytorch_model.bin tokenizer.model
$(HF_FILES): 	SITE=	https://huggingface.co/$(HF_REPO)/resolve/$(HF_REF)
$(LLAMA_TAR): 	SITE=	https://github.com/ggerganov/llama.cpp/archive/refs/tags

FILES= $(HF_FILES) $(LLAMA_TAR)

QUANTS=		f16 q4_0 q4_1 q5_0 q5_1 q8_0
MODEL_FILES=	$(addsuffix .bin,$(addprefix $(MODEL_NAME)-,$(QUANTS)))

.PHONY: all
all: $(MODEL_FILES) SHA256SUMS

$(FILES):
	curl -L -o $@ --url $(SITE)/$@

llama.cpp: $(LLAMA_TAR)
	mkdir -p $@
	tar -xf $< --strip-components=1 -C $@

llama.cpp/quantize: llama.cpp
	$(MAKE) -C llama.cpp quantize

convert.py: convert.py.diff | llama.cpp
	patch -ru llama.cpp/convert.py -i $< -o $@

$(MODEL_NAME)-f16.bin: $(HF_FILES) | convert.py
	$(PYTHON) convert.py --outtype f16 --outfile $@ .

$(MODEL_NAME)-q%.bin: $(MODEL_NAME)-f16.bin | llama.cpp/quantize
	llama.cpp/quantize $< $@ q$*

%.sha: %
	sha256sum $< > $@

SHA256SUMS: $(addsuffix .sha,$(MODEL_FILES))
	cat $^ > $@