anemll commited on
Commit
1d031b5
·
verified ·
1 Parent(s): eaf8763

Upload folder using huggingface_hub

Browse files
DeepSeek_FFN_PF_lut6_chunk_01of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7cba1e56ae4985f5dac6544f3b359828094014a1f42fab7ea10298631deee90
3
+ size 669239587
DeepSeek_FFN_PF_lut6_chunk_02of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:594f51db0d54a80babf8d7a973383321d10d5620e87f7c72d5b948713e16dca1
3
+ size 670031183
DeepSeek_FFN_PF_lut6_chunk_03of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80d2099f3b33176e5a9553dd6381ddb8df82c39ba4c777acc39da63fa72e752a
3
+ size 669823421
DeepSeek_FFN_PF_lut6_chunk_04of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85e6ed34944904856082a0fcc4e9f7525f7e43e1ff56f5489d36359a47715cc8
3
+ size 669617459
DeepSeek_FFN_PF_lut6_chunk_05of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7050812c0d94bc9f4d3f2089b963fc2e1b5a7c3842cf500d2f8405ae817aca7e
3
+ size 670113729
DeepSeek_FFN_PF_lut6_chunk_06of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:799756ba028c2317cadaa49b46465d95fcdb40a80b1a63aa4e4c3b8b3e7eb2fc
3
+ size 670565625
DeepSeek_FFN_PF_lut6_chunk_07of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0121da9932daa48bfc759c2703896d494d734b267ac896cd7c94f0fba63a09e4
3
+ size 670528924
DeepSeek_FFN_PF_lut6_chunk_08of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af18eccfae6b3f10b70d2e1025e7e1eeabd4ed560b505d0ec86807a98cc87be8
3
+ size 669521711
DeepSeek_embeddings.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b597fd3bec7e16d70a01b11aa003d3a05d27c25f9ec573f6e95b9fc759cb617
3
+ size 807369928
DeepSeek_lm_head_lut6.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:062dd5854b297e3996aa7117808ca1093283e4ec1e22bbd29f0bdd3761a61fb9
3
+ size 807760513
README.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - coreml
5
+ - ANE
6
+ - DeepSeek
7
+ - Apple
8
+ - Apple Neural Engine
9
+ ---
10
+ # ANEMLL
11
+
12
+ **ANEMLL** (pronounced like "animal") is an open-source project focused on accelerating the porting of Large Language Models (LLMs) to tensor processors, starting with the Apple Neural Engine (ANE).
13
+
14
+ The goal is to provide a fully open-source pipeline from model conversion to inference for common LLM architectures running on ANE.
15
+
16
+ This enables seamless integration and on-device inference for low-power applications on edge devices, ensuring maximum privacy and security.
17
+
18
+ This is critical for autonomous applications, where models run directly on the device without requiring an internet connection.
19
+
20
+ ---
21
+
22
+ ## License
23
+
24
+ ANEMLL is licensed under the [MIT License](https://opensource.org/license/mit).
25
+ The model is based on Meta's LLaMA 3.2 and may require a separate license.
26
+
27
+ This test model is exclusively for the Meta's LLaMA 3.2 1B (1024 context) model converted for CoreML, released before the official launch of the ANEMLL repository and minimal documentation. It is intended for early adopters only who requested an early release.
28
+
29
+ ---
30
+
31
+ ## Requirements
32
+
33
+ - **macOS Sequoia** with Apple Neural Engine and 16GB RAM
34
+ - **CoreML Tools** and **HuggingFace Transformers** libraries
35
+ - **Python 3.9**
36
+
37
+ `chat.py` provides a sample inference script.
38
+ `chat_full.py` provides a sample inference script with history and conversation management.
39
+
40
+ **Installation**
41
+
42
+ 1. Download the model from Hugging Face:
43
+ ```bash
44
+ # Install required tools
45
+ pip install huggingface_hub
46
+
47
+ # Install Git LFS (Large File Support)
48
+ # macOS with Homebrew:
49
+ brew install git-lfs
50
+ # Or Ubuntu/Debian:
51
+ # sudo apt-get install git-lfs
52
+
53
+ # Initialize Git LFS
54
+ git lfs install
55
+
56
+ # Clone the repository with model files
57
+ git clone https://huggingface.co/anemll/anemll-DeepSeekR1-8B-ctx1024_0.1.1
58
+ ```
59
+
60
+ 2. Extract model files:
61
+ ```bash
62
+ # Navigate to cloned directory
63
+ cd anemll-DeepSeekR1-8B-ctx1024_0.1.1
64
+
65
+ # Pull LFS files (model weights)
66
+ git lfs pull
67
+
68
+ # Extract CoreML model files
69
+ find . -type f -name "*.zip" -exec unzip {} \;
70
+ ```
71
+
72
+ 3. Install dependencies:
73
+ ```bash
74
+ pip install coremltools transformers
75
+ ```
76
+
77
+ **Coremltools:**
78
+
79
+ See coremltools installation guide at https://coremltools.readme.io/v4.0/docs/installation
80
+
81
+ **How to Run**
82
+
83
+ 1. Basic chat interface:
84
+ who
85
+
86
+ 2. Full conversation mode with history:
87
+ ```bash
88
+ python chat_full.py --meta ./meta.yaml
89
+ ```
90
+
91
+ > Note: The first time the model loads, macOS will take some time to place it on the device.
92
+ > Subsequent loads will be instantaneous.
93
+ > Use Ctrl-D to exit, Ctrl-C to interrupt inference.
94
+
95
+ **More Info**
96
+ Please check following links for later updates:
97
+
98
+ • https://huggingface.co/anemll
99
+ • https://x.com/anemll
100
+ • https://github.com/anemll
101
+ • https://anemll.com
102
+
103
+ realanemll@gmail.com
chat.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+
32
+ class TokenPrinter:
33
+ """Handles background printing of generated tokens."""
34
+ def __init__(self, tokenizer):
35
+ self.tokenizer = tokenizer
36
+ self.token_queue = queue.Queue()
37
+ self.stop_event = threading.Event()
38
+ self.thread = None
39
+ self.buffer = ""
40
+ self.lock = threading.Lock()
41
+ self.thinking = True # Track if we're still in thinking mode
42
+ self.decoding_buffer = [] # Buffer for token IDs
43
+ # Add token counting and timing
44
+ self.start_time = time.time()
45
+ self.token_count = 0
46
+ self.start()
47
+
48
+ def start(self):
49
+ """Start the printer thread."""
50
+ if self.thread is None:
51
+ self.thread = threading.Thread(target=self._print_worker)
52
+ self.thread.daemon = True
53
+ self.thread.start()
54
+
55
+ def add_token(self, token_id):
56
+ """Add a token to the print queue."""
57
+ if not self.stop_event.is_set():
58
+ self.token_queue.put(token_id)
59
+ self.token_count += 1
60
+
61
+ def drain_buffer(self):
62
+ """Decode token IDs from decoding_buffer in the main thread."""
63
+ if not self.decoding_buffer:
64
+ return
65
+
66
+ # Decode all tokens at once in the main thread
67
+ token_str = self.tokenizer.decode(self.decoding_buffer)
68
+ self.decoding_buffer.clear()
69
+
70
+ # Color-handling logic
71
+ if self.thinking and "</think>" in token_str:
72
+ self.thinking = False
73
+ parts = token_str.split("</think>")
74
+ if len(parts) > 0:
75
+ print(parts[0] + "</think>", end='', flush=True)
76
+ if len(parts) > 1:
77
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
78
+ else:
79
+ if not self.thinking:
80
+ print(LIGHT_BLUE + token_str, end='', flush=True)
81
+ else:
82
+ print(token_str, end='', flush=True)
83
+
84
+ def _print_worker(self):
85
+ """Worker thread that takes token_ids from the queue."""
86
+ while not self.stop_event.is_set():
87
+ try:
88
+ token_id = self.token_queue.get(timeout=0.01)
89
+ with self.lock:
90
+ self.decoding_buffer.append(token_id)
91
+ self.token_queue.task_done()
92
+ except queue.Empty:
93
+ continue
94
+ except Exception as e:
95
+ print(f"\nError: Token printer error: {str(e)}")
96
+ break
97
+
98
+ def stop(self):
99
+ """Stop the printer thread."""
100
+ if self.thread and self.thread.is_alive():
101
+ self.stop_event.set()
102
+ try:
103
+ self.thread.join(timeout=1.0)
104
+ except Exception:
105
+ pass
106
+ # Calculate and print tokens/s with shorter format in blue
107
+ elapsed = time.time() - self.start_time
108
+ if elapsed > 0 and self.token_count > 0:
109
+ tokens_per_sec = self.token_count / elapsed
110
+ print(f"\n{DARK_BLUE}{tokens_per_sec:.1f} t/s{RESET_COLOR}")
111
+ else:
112
+ print(RESET_COLOR) # Reset color at the end
113
+ return self.buffer
114
+
115
+ def parse_model_path(path):
116
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
117
+ path = Path(path)
118
+
119
+ # If path exists exactly as specified, return it
120
+ if path.exists():
121
+ return str(path)
122
+
123
+ # Try with both extensions
124
+ candidates = [
125
+ path, # Original path
126
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
127
+ path.with_suffix('.mlpackage'), # With .mlpackage
128
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
129
+ Path(str(path) + '.mlpackage')
130
+ ]
131
+
132
+ # Try all possible paths
133
+ for candidate in candidates:
134
+ if candidate.exists():
135
+ print(f"Found model at: {candidate}")
136
+ return str(candidate)
137
+
138
+ # If we get here, no valid path was found
139
+ print("\nError: Model not found. Tried following paths:")
140
+ for candidate in candidates:
141
+ print(f" {candidate}")
142
+ raise FileNotFoundError(f"Model not found: {path}")
143
+
144
+ def parse_ffn_filename(path):
145
+ """Parse FFN model filename to extract chunk information."""
146
+ path = Path(path)
147
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
148
+ match = re.search(pattern, path.name)
149
+
150
+ if match:
151
+ current_chunk = int(match.group(1))
152
+ total_chunks = int(match.group(2))
153
+ return current_chunk, total_chunks
154
+ return None, None
155
+
156
+ def find_all_chunks(base_path):
157
+ """Find all chunk files matching the base FFN path pattern."""
158
+ path = Path(base_path)
159
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
160
+ return sorted(glob.glob(pattern))
161
+
162
+ def load_model(path, function_name=None):
163
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
164
+ path = Path(path)
165
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
166
+
167
+ try:
168
+ if path.suffix == '.mlmodelc':
169
+ # For compiled models (.mlmodelc), use CompiledMLModel
170
+ if function_name:
171
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
172
+ else:
173
+ return ct.models.CompiledMLModel(str(path), compute_unit)
174
+ else:
175
+ # For packages (.mlpackage)
176
+ if function_name:
177
+ return ct.models.MLModel(str(path), function_name=function_name)
178
+ else:
179
+ return ct.models.MLModel(str(path))
180
+
181
+ except RuntimeError as e:
182
+ if "valid manifest does not exist" in str(e):
183
+ print(f"\nError: Could not load compiled model at {path}")
184
+ print("This might be because:")
185
+ print("1. The model is not properly compiled")
186
+ print("2. The model was compiled for a different OS version")
187
+ print("3. The model needs to be recompiled")
188
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
189
+ raise
190
+
191
+ def load_metadata(model,args):
192
+ # Extract metadata and config parameters
193
+ metadata = {}
194
+ if hasattr(model, 'user_defined_metadata'):
195
+ meta = model.user_defined_metadata
196
+
197
+ # Extract key parameters with defaults
198
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
199
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
200
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
201
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
202
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
203
+
204
+ print("\nExtracted Parameters:")
205
+ print(f" Context Length: {metadata['context_length']}")
206
+ print(f" State Length: {metadata['state_length']}")
207
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
208
+ print(f" LUT Bits: {metadata['lut_bits']}")
209
+ print(f" Number of Chunks: {metadata['num_chunks']}")
210
+
211
+ # Print model info
212
+ print("\nModel Info:")
213
+ if 'com.anemll.info' in meta:
214
+ print(f" {meta['com.anemll.info']}")
215
+ if 'com.github.apple.coremltools.version' in meta:
216
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
217
+
218
+ # Print model input/output shapes
219
+ print("\nModel Shapes:")
220
+ if hasattr(model, 'input_description'):
221
+ print(" Inputs:")
222
+ for name, desc in model.input_description.items():
223
+ print(f" {name}: {desc}")
224
+ if hasattr(model, 'output_description'):
225
+ print(" Outputs:")
226
+ for name, desc in model.output_description.items():
227
+ print(f" {name}: {desc}")
228
+ else:
229
+ print("\nWarning: No metadata found in model")
230
+
231
+ # Check if model directory name contains context length pattern (ctxXXX)
232
+ ctx_len = 512
233
+ if args.context_length is None:
234
+ import re
235
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
236
+ if ctx_match:
237
+ ctx_len0 = int(ctx_match.group(1))
238
+ if 512 <= ctx_len0 <= 8096:
239
+ ctx_len = ctx_len0
240
+ print(f"\nDetected context length {ctx_len} from directory name")
241
+ else:
242
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
243
+ else:
244
+ ctx_len = args.context_length
245
+
246
+ # Use defaults
247
+ metadata['context_length'] = ctx_len
248
+ metadata['state_length'] = ctx_len
249
+ metadata['batch_size'] = 64
250
+ metadata['lut_bits'] = 4
251
+ metadata['num_chunks'] = 4
252
+ print("\nUsing default parameters:")
253
+ print(f" Context Length: {metadata['context_length']}")
254
+ print(f" State Length: {metadata['state_length']}")
255
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
256
+ print(f" LUT Bits: {metadata['lut_bits']}")
257
+ print(f" Number of Chunks: {metadata['num_chunks']}")
258
+ return metadata
259
+
260
+ def load_models(args,metadata):
261
+ """Load all required models and extract metadata."""
262
+ print("\nLoading models...")
263
+
264
+ try:
265
+ # Load embeddings model
266
+ print("\nLoading embeddings model...")
267
+ embed_path = parse_model_path(args.embed)
268
+ print(f"Loading from: {embed_path}")
269
+ embed_model = load_model(embed_path)
270
+ print("Embeddings model loaded successfully")
271
+ metadata = load_metadata(embed_model,args)
272
+
273
+
274
+
275
+ # Load LM head model
276
+ print("\nLoading LM head model...")
277
+ lmhead_path = parse_model_path(args.lmhead)
278
+ print(f"Loading from: {lmhead_path}")
279
+ lmhead_model = load_model(lmhead_path)
280
+ print("LM head model loaded successfully")
281
+
282
+ # Parse FFN path and find chunks if needed
283
+ print("\nLoading FFN+PREFILL model(s)...")
284
+ ffn_path = parse_model_path(args.ffn)
285
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
286
+
287
+ ffn_models = []
288
+ if chunk_no and total_chunks:
289
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
290
+ # Find and load all chunks
291
+ chunk_paths = find_all_chunks(ffn_path)
292
+ if len(chunk_paths) != total_chunks:
293
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
294
+
295
+ for chunk_path in chunk_paths:
296
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
297
+ try:
298
+ # For chunked models, we need both infer and prefill functions
299
+ ffn_models.append({
300
+ 'infer': load_model(chunk_path, function_name='infer'),
301
+ 'prefill': load_model(chunk_path, function_name='prefill')
302
+ })
303
+ print("Chunk loaded successfully")
304
+ except Exception as e:
305
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
306
+ raise
307
+ metadata = load_metadata(ffn_models[0],args)
308
+
309
+ else:
310
+ print("\nLoading single FFN model...")
311
+ ffn_models.append(load_model(ffn_path))
312
+ print("FFN model loaded successfully")
313
+
314
+ return embed_model, ffn_models, lmhead_model, metadata
315
+
316
+ except Exception as e:
317
+ print(f"\nError loading models: {str(e)}")
318
+ print("\nPlease ensure all model files exist and are accessible.")
319
+ print("Expected files:")
320
+ print(f" Embeddings: {args.embed}")
321
+ print(f" LM Head: {args.lmhead}")
322
+ print(f" FFN: {args.ffn}")
323
+ raise
324
+
325
+ # At the top of the file, make this a default path
326
+
327
+ def initialize_tokenizer(model_path=None):
328
+ """Initialize and configure the tokenizer."""
329
+ try:
330
+
331
+
332
+ tokenizer = AutoTokenizer.from_pretrained(
333
+ str(model_path),
334
+ use_fast=False,
335
+ trust_remote_code=True
336
+ )
337
+
338
+ print("\nTokenizer Configuration:")
339
+ print(f"Tokenizer type: {type(tokenizer)}")
340
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
341
+ print(f"Vocabulary size: {len(tokenizer)}")
342
+ print(f"Model max length: {tokenizer.model_max_length}")
343
+
344
+ if tokenizer.pad_token is None:
345
+ tokenizer.pad_token = tokenizer.eos_token
346
+ tokenizer.pad_token_id = tokenizer.eos_token_id
347
+ print("Set PAD token to EOS token")
348
+
349
+ tokenizer.padding_side = "left"
350
+
351
+ print(f"\nSpecial Tokens:")
352
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
353
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
354
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
355
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
356
+
357
+ return tokenizer
358
+
359
+ except Exception as e:
360
+ print(f"\nError: Failed to load tokenizer from {model_path}")
361
+ print(f"Error details: {str(e)}")
362
+ print(f"Error type: {type(e)}")
363
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
364
+ print("Please provide the path to a Llama 3.2 model directory.")
365
+ import traceback
366
+ traceback.print_exc()
367
+ raise
368
+
369
+
370
+
371
+ def make_causal_mask(length, start):
372
+ """Create causal attention mask."""
373
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
374
+ row_indices = np.arange(length).reshape(length, 1)
375
+ col_indices = np.arange(length).reshape(1, length)
376
+ mask[:, :, col_indices <= (row_indices + start)] = 0
377
+ return mask
378
+
379
+ def run_prefill(embed_model, ffn_models, input_ids, context_pos, context_length, batch_size=64, state=None):
380
+ """Run prefill on the input sequence."""
381
+ # Create causal mask
382
+ causal_mask = make_causal_mask(context_length, 0)
383
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
384
+
385
+ # Process in batches
386
+ batch_pos = 0
387
+ while batch_pos < context_pos:
388
+ batch_end = min(batch_pos + batch_size, context_pos)
389
+ current_batch_size = batch_end - batch_pos
390
+
391
+ # Get current batch
392
+ batch_input = input_ids[:, batch_pos:batch_end]
393
+
394
+ # Always pad to full batch size for prefill
395
+ batch_input = F.pad(
396
+ batch_input,
397
+ (0, batch_size - current_batch_size),
398
+ value=0
399
+ )
400
+
401
+ # Generate position IDs for full batch size
402
+ position_ids = torch.arange(batch_size, dtype=torch.int32) # Changed: Always use full batch size
403
+ batch_causal_mask = causal_mask[:, :, :batch_size, :] # Changed: Use full batch size
404
+
405
+ # Run embeddings
406
+ hidden_states = torch.from_numpy(
407
+ embed_model.predict({'input_ids': batch_input.numpy()})['hidden_states']
408
+ )
409
+
410
+ # Run through FFN chunks with state
411
+ for ffn_model in ffn_models:
412
+ if isinstance(ffn_model, dict):
413
+ inputs = {
414
+ 'hidden_states': hidden_states.numpy(), # [1, 64, hidden_size]
415
+ 'position_ids': position_ids.numpy(), # [64]
416
+ 'causal_mask': batch_causal_mask.numpy(), # [1, 1, 64, context_length]
417
+ 'current_pos': np.array([batch_pos], dtype=np.int32) # [1]
418
+ }
419
+ output = ffn_model['prefill'].predict(inputs, state)
420
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
421
+
422
+ batch_pos = batch_end
423
+
424
+ return torch.tensor([context_pos], dtype=torch.int32)
425
+
426
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state=None, temperature=0.0):
427
+ """Generate the next token."""
428
+ # Get current token
429
+ current_token = input_ids[:, pos-1:pos] # [1, 1]
430
+
431
+ # Run embeddings
432
+ hidden_states = torch.from_numpy(
433
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
434
+ ) # [1, 1, hidden_size]
435
+
436
+ # Create masks
437
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
438
+ update_mask[0, 0, pos-1, 0] = 1.0
439
+ position_ids = torch.tensor([pos-1], dtype=torch.int32) # [1]
440
+ causal_mask = make_causal_mask(context_length, 0)
441
+ causal_mask = torch.tensor(causal_mask[:, :, pos-1:pos, :], dtype=torch.float16) # [1, 1, 1, context_length]
442
+
443
+ # Run through FFN chunks with state
444
+ for ffn_model in ffn_models:
445
+ if isinstance(ffn_model, dict):
446
+ inputs = {
447
+ 'hidden_states': hidden_states.numpy(),
448
+ 'update_mask': update_mask.numpy(),
449
+ 'position_ids': position_ids.numpy(),
450
+ 'causal_mask': causal_mask.numpy(),
451
+ 'current_pos': position_ids.numpy()
452
+ }
453
+ output = ffn_model['infer'].predict(inputs, state)
454
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
455
+
456
+ # Run LM head
457
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
458
+ # Debug print
459
+ #print("\nLM Head output keys:", list(lm_output.keys()))
460
+
461
+ # Combine logits1-8 if they exist
462
+ if 'logits1' in lm_output:
463
+ # Concatenate all logits parts
464
+ logits_parts = []
465
+ for i in range(1, 9):
466
+ key = f'logits{i}'
467
+ if key in lm_output:
468
+ logits_parts.append(torch.from_numpy(lm_output[key]))
469
+ logits = torch.cat(logits_parts, dim=-1) # Concatenate along vocab dimension
470
+ else:
471
+ # Try output_logits as fallback
472
+ logits = torch.from_numpy(lm_output['output_logits'])
473
+
474
+ # Apply temperature and sample
475
+ if temperature > 0:
476
+ logits = logits / temperature
477
+ probs = F.softmax(logits[0, -1, :], dim=-1)
478
+ next_token = torch.multinomial(probs, num_samples=1).item()
479
+ else:
480
+ next_token = torch.argmax(logits[0, -1, :]).item()
481
+
482
+ return next_token
483
+
484
+ def create_unified_state(ffn_models, context_length):
485
+ """Create unified KV cache state for transformer."""
486
+ if isinstance(ffn_models[0], dict):
487
+ # Use first FFN model's prefill function to create state
488
+ state = ffn_models[0]['prefill'].make_state()
489
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
490
+ return state
491
+ else:
492
+ state = ffn_models[0].make_state()
493
+ print("\nCreated unified transformer state")
494
+ return state
495
+
496
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, auto_prompt=None, warmup=False):
497
+ """Interactive chat loop."""
498
+ context_length = metadata.get('context_length')
499
+ batch_size = metadata.get('batch_size', 64)
500
+
501
+ if not warmup:
502
+ print(f"\nUsing context length: {context_length}")
503
+ print("\nStarting chat session. Press Ctrl+D to exit.")
504
+ print("Type your message and press Enter to chat.")
505
+
506
+ # Check if tokenizer has chat template and if it works
507
+ has_chat_template = False
508
+ try:
509
+ # Test if chat template works
510
+ test_messages = [{"role": "user", "content": "test"}]
511
+ tokenizer.apply_chat_template(test_messages, return_tensors="pt")
512
+ has_chat_template = True
513
+ if not warmup:
514
+ print("\nUsing chat template for prompts")
515
+ except:
516
+ if not warmup:
517
+ print("\nUsing manual formatting for prompts")
518
+
519
+ conversation = []
520
+
521
+ try:
522
+ while True:
523
+ try:
524
+ if not warmup:
525
+ print(f"\n{LIGHT_GREEN}You:{RESET_COLOR}", end=' ', flush=True)
526
+ if auto_prompt is not None:
527
+ user_input = auto_prompt
528
+ if not warmup:
529
+ print(user_input)
530
+ else:
531
+ user_input = input().strip()
532
+ except EOFError:
533
+ if not warmup:
534
+ print("\nExiting chat...")
535
+ break
536
+
537
+ if not user_input:
538
+ continue
539
+
540
+ # Format prompt based on tokenizer capabilities
541
+ if has_chat_template:
542
+ messages = [{"role": "user", "content": user_input}]
543
+ input_ids = tokenizer.apply_chat_template(
544
+ messages,
545
+ return_tensors="pt",
546
+ add_generation_prompt=True
547
+ ).to(torch.int32)
548
+ else:
549
+ # Manual formatting for Llama models without chat template
550
+ formatted_prompt = f"[INST] {user_input} [/INST]"
551
+ input_ids = tokenizer(
552
+ formatted_prompt,
553
+ return_tensors="pt",
554
+ add_special_tokens=True
555
+ ).input_ids.to(torch.int32)
556
+
557
+ context_pos = input_ids.size(1)
558
+
559
+ if not warmup:
560
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
561
+
562
+ # Initialize token printer
563
+ token_printer = TokenPrinter(tokenizer)
564
+ tokens_generated = 0 # Track number of tokens
565
+
566
+ try:
567
+ # Start prefill timing
568
+ prefill_start = time.time()
569
+
570
+ # Run prefill with state
571
+ current_pos = run_prefill(
572
+ embed_model,
573
+ ffn_models,
574
+ input_ids,
575
+ context_pos,
576
+ context_length,
577
+ batch_size,
578
+ state
579
+ )
580
+
581
+ # Calculate prefill timing
582
+ prefill_time = time.time() - prefill_start
583
+ prefill_tokens = context_pos # Number of tokens in input
584
+ prefill_tokens_per_sec = prefill_tokens / prefill_time if prefill_time > 0 else 0
585
+
586
+ # Generation loop with state
587
+ input_ids = input_ids
588
+ pos = context_pos
589
+ inference_start = time.time()
590
+ inference_tokens = 0
591
+
592
+ while pos < context_length - 1:
593
+ # Generate next token
594
+ next_token = generate_next_token(
595
+ embed_model,
596
+ ffn_models,
597
+ lmhead_model,
598
+ input_ids,
599
+ pos,
600
+ context_length,
601
+ state
602
+ )
603
+
604
+ # Add token to sequence
605
+ if pos < input_ids.size(1):
606
+ input_ids[0, pos] = next_token
607
+ else:
608
+ input_ids = torch.cat([
609
+ input_ids,
610
+ torch.tensor([[next_token]], dtype=torch.int32)
611
+ ], dim=1)
612
+
613
+ # Add to printer only if not in warmup
614
+ if not warmup:
615
+ token_printer.add_token(next_token)
616
+ token_printer.drain_buffer()
617
+
618
+ pos += 1
619
+ tokens_generated += 1
620
+ inference_tokens += 1
621
+
622
+ # Check limits
623
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
624
+ break
625
+
626
+ if next_token == tokenizer.eos_token_id:
627
+ break
628
+
629
+ # Calculate inference timing
630
+ inference_time = time.time() - inference_start
631
+ inference_tokens_per_sec = inference_tokens / inference_time if inference_time > 0 else 0
632
+
633
+ # Get final response and add to conversation
634
+ if not warmup:
635
+ response = token_printer.stop()
636
+ # Print timing stats
637
+ prefill_ms = prefill_time * 1000 # Convert to milliseconds
638
+ print(f"\nPrefill: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s)")
639
+ print(f"Inference: {inference_tokens_per_sec:.1f} t/s")
640
+ print(f"Total: Generated {tokens_generated} tokens in {prefill_time + inference_time:.2f}s")
641
+ conversation.append({"role": "assistant", "content": response})
642
+ else:
643
+ token_printer.stop() # Clean up without printing stats
644
+
645
+ # Exit after one response in auto_prompt mode
646
+ if auto_prompt is not None:
647
+ break
648
+
649
+ except KeyboardInterrupt:
650
+ print("\nGeneration interrupted")
651
+ token_printer.stop()
652
+ continue
653
+
654
+ except Exception as e:
655
+ print(f"\nError in chat loop: {str(e)}")
656
+ import traceback
657
+ traceback.print_exc()
658
+
659
+ def parse_args():
660
+ parser = argparse.ArgumentParser(description='Chat with CoreML LLaMA (c) 2025 Anemll')
661
+
662
+ # Add meta.yaml option
663
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
664
+
665
+ # Model paths
666
+ parser.add_argument('--d', '--dir', type=str, default='.',
667
+ help='Directory containing model files (default: current directory)')
668
+ parser.add_argument('--embed', type=str, required=False,
669
+ help='Path to embeddings model (relative to --dir)')
670
+ parser.add_argument('--ffn', type=str, required=False,
671
+ help='Path to FFN model (can be chunked, relative to --dir)')
672
+ parser.add_argument('--lmhead', type=str, required=False,
673
+ help='Path to LM head model (relative to --dir)')
674
+ parser.add_argument('--tokenizer', type=str, required=False,
675
+ help='Path to tokenizer')
676
+
677
+ # Add new argument for auto-generation
678
+ parser.add_argument('--prompt', type=str,
679
+ help='If specified, run once with this prompt and exit')
680
+
681
+ # Model configuration
682
+ parser.add_argument('--context-length', type=int,
683
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
684
+
685
+ args = parser.parse_args()
686
+
687
+ # If meta.yaml is provided, load parameters from it
688
+ if args.meta:
689
+ try:
690
+ with open(args.meta, 'r') as f:
691
+ meta = yaml.safe_load(f)
692
+ params = meta['model_info']['parameters']
693
+
694
+ # Set model directory to meta.yaml directory if not specified
695
+ if not args.d or args.d == '.':
696
+ args.d = str(Path(args.meta).parent)
697
+
698
+ # Build model paths based on parameters
699
+ prefix = params.get('model_prefix', 'llama') # Default to 'llama' if not specified
700
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
701
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
702
+ num_chunks = int(params['num_chunks'])
703
+
704
+ # Set model paths if not specified
705
+ if not args.embed:
706
+ args.embed = f'{prefix}_embeddings'
707
+ if not args.lmhead:
708
+ args.lmhead = f'{prefix}_lm_head{lut_lmhead}'
709
+ if not args.ffn:
710
+ args.ffn = f'{prefix}_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
711
+ if not args.tokenizer:
712
+ args.tokenizer = args.d
713
+
714
+ # Set other parameters
715
+ args.context_length = int(params['context_length'])
716
+ args.batch_size = int(params['batch_size'])
717
+ args.num_chunks = num_chunks
718
+
719
+ print(f"\nLoaded parameters from {args.meta}:")
720
+ print(f" Context Length: {args.context_length}")
721
+ print(f" Batch Size: {args.batch_size}")
722
+ print(f" Num Chunks: {args.num_chunks}")
723
+ print(f" Models Directory: {args.d}")
724
+ print(f" Embeddings: {args.embed}")
725
+ print(f" LM Head: {args.lmhead}")
726
+ print(f" FFN: {args.ffn}")
727
+
728
+ except Exception as e:
729
+ print(f"\nError loading meta.yaml: {str(e)}")
730
+ sys.exit(1)
731
+
732
+ return args
733
+
734
+ def main():
735
+ args = parse_args()
736
+
737
+ # Convert directory to absolute path
738
+ model_dir = Path(args.d).resolve()
739
+ if not model_dir.exists():
740
+ print(f"\nError: Model directory not found: {model_dir}")
741
+ return 1
742
+
743
+ print(f"\nUsing model directory: {model_dir}")
744
+ print(f"Context length: {args.context_length}")
745
+
746
+ try:
747
+ # Update paths to be relative to model directory
748
+ args.embed = str(model_dir / args.embed)
749
+ args.ffn = str(model_dir / args.ffn)
750
+ args.lmhead = str(model_dir / args.lmhead)
751
+
752
+ # Handle tokenizer path separately since it's not relative to model_dir
753
+ if args.tokenizer is None:
754
+ args.tokenizer = str(model_dir)
755
+
756
+ if not Path(args.tokenizer).exists():
757
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
758
+ return 1
759
+
760
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
761
+ print(f"Using tokenizer path: {args.tokenizer}")
762
+
763
+ metadata = {}
764
+ # Load models and extract metadata
765
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
766
+
767
+ print(f"\nMetadata befor args.context_length: {metadata}")
768
+
769
+ # Override context length from command line if provided
770
+ if args.context_length is not None:
771
+ metadata['context_length'] = args.context_length
772
+ metadata['state_length'] = args.context_length # Also update state_length
773
+ print(f"\nOverriding context length from command line: {args.context_length}")
774
+
775
+ print(f"\nMetadata after load_models: {metadata}")
776
+
777
+ # Load tokenizer with resolved path
778
+ tokenizer = initialize_tokenizer(args.tokenizer)
779
+ if tokenizer is None:
780
+ raise RuntimeError("Failed to initialize tokenizer")
781
+
782
+ # Create unified state once
783
+ state = create_unified_state(ffn_models, metadata['context_length'])
784
+
785
+ # Warmup runs to prevent Python GIL issues with CoreML !
786
+ for i in range(2):
787
+ chat_loop(
788
+ embed_model=embed_model,
789
+ ffn_models=ffn_models,
790
+ lmhead_model=lmhead_model,
791
+ tokenizer=tokenizer,
792
+ metadata=metadata,
793
+ state=state,
794
+ warmup=True,
795
+ auto_prompt="who are you?"
796
+ )
797
+
798
+ # Main run
799
+ chat_loop(
800
+ embed_model=embed_model,
801
+ ffn_models=ffn_models,
802
+ lmhead_model=lmhead_model,
803
+ tokenizer=tokenizer,
804
+ metadata=metadata,
805
+ state=state,
806
+ warmup=False,
807
+ auto_prompt=args.prompt
808
+ )
809
+
810
+ except Exception as e:
811
+ print(f"\nError: {str(e)}")
812
+ import traceback
813
+ traceback.print_exc()
814
+ return 1
815
+
816
+ return 0
817
+
818
+ if __name__ == "__main__":
819
+ exit(main())
chat_full.py ADDED
@@ -0,0 +1,853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at the top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+
32
+ class TokenPrinter:
33
+ """Handles background printing of generated tokens."""
34
+ def __init__(self, tokenizer):
35
+ self.tokenizer = tokenizer
36
+ self.token_queue = queue.Queue()
37
+ self.stop_event = threading.Event()
38
+ self.thread = None
39
+ self.buffer = ""
40
+ self.lock = threading.Lock()
41
+ self.thinking = True # Track if we're still in thinking mode
42
+ self.decoding_buffer = [] # Buffer for token IDs
43
+ # Timing and stats tracking
44
+ self.start_time = time.time()
45
+ self.token_count = 0
46
+ self.prefill_time = 0
47
+ self.inference_time = 0
48
+ self.context_pos = 0
49
+ self.start()
50
+
51
+ def start(self):
52
+ """Start the printer thread."""
53
+ if self.thread is None:
54
+ self.thread = threading.Thread(target=self._print_worker)
55
+ self.thread.daemon = True
56
+ self.thread.start()
57
+
58
+ def add_token(self, token_id):
59
+ """Add a token to the print queue."""
60
+ if not self.stop_event.is_set():
61
+ self.token_queue.put(token_id)
62
+ self.token_count += 1
63
+
64
+ def drain_buffer(self):
65
+ """Decode token IDs from decoding_buffer in the main thread."""
66
+ if not self.decoding_buffer:
67
+ return
68
+
69
+ # Decode all tokens at once in the main thread
70
+ token_str = self.tokenizer.decode(self.decoding_buffer)
71
+ self.decoding_buffer.clear()
72
+
73
+ # Color-handling logic
74
+ if self.thinking and "</think>" in token_str:
75
+ self.thinking = False
76
+ parts = token_str.split("</think>")
77
+ if len(parts) > 0:
78
+ print(parts[0] + "</think>", end='', flush=True)
79
+ if len(parts) > 1:
80
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
81
+ else:
82
+ if not self.thinking:
83
+ print(LIGHT_BLUE + token_str, end='', flush=True)
84
+ else:
85
+ print(token_str, end='', flush=True)
86
+
87
+ def _print_worker(self):
88
+ """Worker thread that takes token_ids from the queue."""
89
+ while not self.stop_event.is_set():
90
+ try:
91
+ token_id = self.token_queue.get(timeout=0.01)
92
+ with self.lock:
93
+ self.decoding_buffer.append(token_id)
94
+ self.token_queue.task_done()
95
+ except queue.Empty:
96
+ continue
97
+ except Exception as e:
98
+ print(f"\nError: Token printer error: {str(e)}")
99
+ break
100
+
101
+ def stop(self):
102
+ """Stop the printer thread."""
103
+ if self.thread and self.thread.is_alive():
104
+ self.stop_event.set()
105
+ try:
106
+ self.thread.join(timeout=1.0)
107
+ except Exception:
108
+ pass
109
+ print(RESET_COLOR) # Reset color at the end
110
+ return self.buffer
111
+
112
+ def set_timing(self, prefill_time, inference_time, context_pos):
113
+ """Set timing information."""
114
+ self.prefill_time = prefill_time
115
+ self.inference_time = inference_time
116
+ self.context_pos = context_pos
117
+
118
+ def parse_model_path(path):
119
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
120
+ path = Path(path)
121
+
122
+ # If path exists exactly as specified, return it
123
+ if path.exists():
124
+ return str(path)
125
+
126
+ # Try with both extensions
127
+ candidates = [
128
+ path, # Original path
129
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
130
+ path.with_suffix('.mlpackage'), # With .mlpackage
131
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
132
+ Path(str(path) + '.mlpackage')
133
+ ]
134
+
135
+ # Try all possible paths
136
+ for candidate in candidates:
137
+ if candidate.exists():
138
+ print(f"Found model at: {candidate}")
139
+ return str(candidate)
140
+
141
+ # If we get here, no valid path was found
142
+ print("\nError: Model not found. Tried following paths:")
143
+ for candidate in candidates:
144
+ print(f" {candidate}")
145
+ raise FileNotFoundError(f"Model not found: {path}")
146
+
147
+ def parse_ffn_filename(path):
148
+ """Parse FFN model filename to extract chunk information."""
149
+ path = Path(path)
150
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
151
+ match = re.search(pattern, path.name)
152
+
153
+ if match:
154
+ current_chunk = int(match.group(1))
155
+ total_chunks = int(match.group(2))
156
+ return current_chunk, total_chunks
157
+ return None, None
158
+
159
+ def find_all_chunks(base_path):
160
+ """Find all chunk files matching the base FFN path pattern."""
161
+ path = Path(base_path)
162
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
163
+ return sorted(glob.glob(pattern))
164
+
165
+ def load_model(path, function_name=None):
166
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
167
+ path = Path(path)
168
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
169
+
170
+ try:
171
+ if path.suffix == '.mlmodelc':
172
+ # For compiled models (.mlmodelc), use CompiledMLModel
173
+ if function_name:
174
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
175
+ else:
176
+ return ct.models.CompiledMLModel(str(path), compute_unit)
177
+ else:
178
+ # For packages (.mlpackage)
179
+ if function_name:
180
+ return ct.models.MLModel(str(path), function_name=function_name)
181
+ else:
182
+ return ct.models.MLModel(str(path))
183
+
184
+ except RuntimeError as e:
185
+ if "valid manifest does not exist" in str(e):
186
+ print(f"\nError: Could not load compiled model at {path}")
187
+ print("This might be because:")
188
+ print("1. The model is not properly compiled")
189
+ print("2. The model was compiled for a different OS version")
190
+ print("3. The model needs to be recompiled")
191
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
192
+ raise
193
+
194
+ def load_metadata(model,args):
195
+ # Extract metadata and config parameters
196
+ metadata = {}
197
+ if hasattr(model, 'user_defined_metadata'):
198
+ meta = model.user_defined_metadata
199
+
200
+ # Extract key parameters with defaults
201
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
202
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
203
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
204
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
205
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
206
+
207
+ print("\nExtracted Parameters:")
208
+ print(f" Context Length: {metadata['context_length']}")
209
+ print(f" State Length: {metadata['state_length']}")
210
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
211
+ print(f" LUT Bits: {metadata['lut_bits']}")
212
+ print(f" Number of Chunks: {metadata['num_chunks']}")
213
+
214
+ # Print model info
215
+ print("\nModel Info:")
216
+ if 'com.anemll.info' in meta:
217
+ print(f" {meta['com.anemll.info']}")
218
+ if 'com.github.apple.coremltools.version' in meta:
219
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
220
+
221
+ # Print model input/output shapes
222
+ print("\nModel Shapes:")
223
+ if hasattr(model, 'input_description'):
224
+ print(" Inputs:")
225
+ for name, desc in model.input_description.items():
226
+ print(f" {name}: {desc}")
227
+ if hasattr(model, 'output_description'):
228
+ print(" Outputs:")
229
+ for name, desc in model.output_description.items():
230
+ print(f" {name}: {desc}")
231
+ else:
232
+ print("\nWarning: No metadata found in model")
233
+
234
+ # Check if model directory name contains context length pattern (ctxXXX)
235
+ ctx_len = 512
236
+ if args.context_length is None:
237
+ import re
238
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
239
+ if ctx_match:
240
+ ctx_len0 = int(ctx_match.group(1))
241
+ if 512 <= ctx_len0 <= 8096:
242
+ ctx_len = ctx_len0
243
+ print(f"\nDetected context length {ctx_len} from directory name")
244
+ else:
245
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
246
+ else:
247
+ ctx_len = args.context_length
248
+
249
+ # Use defaults
250
+ metadata['context_length'] = ctx_len
251
+ metadata['state_length'] = ctx_len
252
+ metadata['batch_size'] = 64
253
+ metadata['lut_bits'] = 4
254
+ metadata['num_chunks'] = 4
255
+ print("\nUsing default parameters:")
256
+ print(f" Context Length: {metadata['context_length']}")
257
+ print(f" State Length: {metadata['state_length']}")
258
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
259
+ print(f" LUT Bits: {metadata['lut_bits']}")
260
+ print(f" Number of Chunks: {metadata['num_chunks']}")
261
+ return metadata
262
+
263
+ def load_models(args,metadata):
264
+ """Load all required models and extract metadata."""
265
+ print("\nLoading models...")
266
+
267
+ try:
268
+ # Load embeddings model
269
+ print("\nLoading embeddings model...")
270
+ embed_path = parse_model_path(args.embed)
271
+ print(f"Loading from: {embed_path}")
272
+ embed_model = load_model(embed_path)
273
+ print("Embeddings model loaded successfully")
274
+ metadata = load_metadata(embed_model,args)
275
+
276
+
277
+
278
+ # Load LM head model
279
+ print("\nLoading LM head model...")
280
+ lmhead_path = parse_model_path(args.lmhead)
281
+ print(f"Loading from: {lmhead_path}")
282
+ lmhead_model = load_model(lmhead_path)
283
+ print("LM head model loaded successfully")
284
+
285
+ # Parse FFN path and find chunks if needed
286
+ print("\nLoading FFN+PREFILL model(s)...")
287
+ ffn_path = parse_model_path(args.ffn)
288
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
289
+
290
+ ffn_models = []
291
+ if chunk_no and total_chunks:
292
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
293
+ # Find and load all chunks
294
+ chunk_paths = find_all_chunks(ffn_path)
295
+ if len(chunk_paths) != total_chunks:
296
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
297
+
298
+ for chunk_path in chunk_paths:
299
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
300
+ try:
301
+ # For chunked models, we need both infer and prefill functions
302
+ ffn_models.append({
303
+ 'infer': load_model(chunk_path, function_name='infer'),
304
+ 'prefill': load_model(chunk_path, function_name='prefill')
305
+ })
306
+ print("Chunk loaded successfully")
307
+ except Exception as e:
308
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
309
+ raise
310
+ metadata = load_metadata(ffn_models[0],args)
311
+
312
+ else:
313
+ print("\nLoading single FFN model...")
314
+ ffn_models.append(load_model(ffn_path))
315
+ print("FFN model loaded successfully")
316
+
317
+ return embed_model, ffn_models, lmhead_model, metadata
318
+
319
+ except Exception as e:
320
+ print(f"\nError loading models: {str(e)}")
321
+ print("\nPlease ensure all model files exist and are accessible.")
322
+ print("Expected files:")
323
+ print(f" Embeddings: {args.embed}")
324
+ print(f" LM Head: {args.lmhead}")
325
+ print(f" FFN: {args.ffn}")
326
+ raise
327
+
328
+ # At the top of the file, make this a default path
329
+
330
+ def initialize_tokenizer(model_path=None):
331
+ """Initialize and configure the tokenizer."""
332
+ try:
333
+
334
+
335
+ tokenizer = AutoTokenizer.from_pretrained(
336
+ str(model_path),
337
+ use_fast=False,
338
+ trust_remote_code=True
339
+ )
340
+
341
+ print("\nTokenizer Configuration:")
342
+ print(f"Tokenizer type: {type(tokenizer)}")
343
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
344
+ print(f"Vocabulary size: {len(tokenizer)}")
345
+ print(f"Model max length: {tokenizer.model_max_length}")
346
+
347
+ if tokenizer.pad_token is None:
348
+ tokenizer.pad_token = tokenizer.eos_token
349
+ tokenizer.pad_token_id = tokenizer.eos_token_id
350
+ print("Set PAD token to EOS token")
351
+
352
+ tokenizer.padding_side = "left"
353
+
354
+ print(f"\nSpecial Tokens:")
355
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
356
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
357
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
358
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
359
+
360
+ return tokenizer
361
+
362
+ except Exception as e:
363
+ print(f"\nError: Failed to load tokenizer from {model_path}")
364
+ print(f"Error details: {str(e)}")
365
+ print(f"Error type: {type(e)}")
366
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
367
+ print("Please provide the path to a Llama 3.2 model directory.")
368
+ import traceback
369
+ traceback.print_exc()
370
+ raise
371
+
372
+
373
+
374
+ def make_causal_mask(length, start):
375
+ """Create causal attention mask."""
376
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
377
+ row_indices = np.arange(length).reshape(length, 1)
378
+ col_indices = np.arange(length).reshape(1, length)
379
+ mask[:, :, col_indices <= (row_indices + start)] = 0
380
+ return mask
381
+
382
+ def run_prefill(embed_model, ffn_models, input_ids, current_pos, context_length, batch_size, state):
383
+ """Run prefill on the input sequence."""
384
+ #print(f"[DEBUG] Running prefill from 0 to {current_pos}")
385
+
386
+ # Process in batches
387
+ batch_pos = 0
388
+ while batch_pos < current_pos:
389
+ batch_end = min(batch_pos + batch_size, current_pos)
390
+ current_batch_size = batch_end - batch_pos
391
+
392
+ #print(f"[DEBUG] Prefill batch {batch_pos}-{batch_end} (size={current_batch_size})")
393
+
394
+ # Get current batch
395
+ batch_input = input_ids[:, batch_pos:batch_end]
396
+
397
+ # Pad to full batch size
398
+ batch_input = F.pad(
399
+ batch_input,
400
+ (0, batch_size - current_batch_size),
401
+ value=0
402
+ )
403
+
404
+ # Generate position IDs for this batch
405
+ position_ids = torch.arange(batch_pos, batch_pos + batch_size, dtype=torch.int32)
406
+
407
+ # Create causal mask for this batch
408
+ causal_mask = make_causal_mask(context_length, 0) # Always start from 0 for prefill
409
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
410
+ batch_causal_mask = causal_mask[:, :, batch_pos:batch_pos + batch_size, :]
411
+
412
+ # Run embeddings
413
+ hidden_states = torch.from_numpy(
414
+ embed_model.predict({'input_ids': batch_input.numpy()})['hidden_states']
415
+ )
416
+
417
+ # Run through FFN chunks
418
+ for ffn_model in ffn_models:
419
+ if isinstance(ffn_model, dict):
420
+ inputs = {
421
+ 'hidden_states': hidden_states.numpy(),
422
+ 'position_ids': position_ids.numpy(),
423
+ 'causal_mask': batch_causal_mask.numpy(),
424
+ 'current_pos': np.array([batch_pos], dtype=np.int32)
425
+ }
426
+ output = ffn_model['prefill'].predict(inputs, state)
427
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
428
+
429
+ batch_pos = batch_end
430
+
431
+ return torch.tensor([current_pos], dtype=torch.int32)
432
+
433
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state=None, temperature=0.0):
434
+ """Generate the next token."""
435
+ # Get current token
436
+ current_token = input_ids[:, pos-1:pos]
437
+
438
+ # Run embeddings
439
+ hidden_states = torch.from_numpy(
440
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
441
+ )
442
+
443
+ # Create masks
444
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
445
+ update_mask[0, 0, pos-1, 0] = 1.0
446
+ position_ids = torch.tensor([pos-1], dtype=torch.int32)
447
+
448
+ # Create causal mask for current position
449
+ causal_mask = make_causal_mask(context_length, 0) # Always start from 0 for generation
450
+ single_causal_mask = torch.tensor(causal_mask[:, :, pos-1:pos, :], dtype=torch.float16)
451
+
452
+ # Run through FFN chunks
453
+ for ffn_model in ffn_models:
454
+ if isinstance(ffn_model, dict):
455
+ inputs = {
456
+ 'hidden_states': hidden_states.numpy(),
457
+ 'update_mask': update_mask.numpy(),
458
+ 'position_ids': position_ids.numpy(),
459
+ 'causal_mask': single_causal_mask.numpy(),
460
+ 'current_pos': position_ids.numpy()
461
+ }
462
+ output = ffn_model['infer'].predict(inputs, state)
463
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
464
+
465
+ # Run LM head and get next token
466
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
467
+
468
+ if 'logits1' in lm_output:
469
+ logits_parts = []
470
+ for i in range(1, 9):
471
+ key = f'logits{i}'
472
+ if key in lm_output:
473
+ logits_parts.append(torch.from_numpy(lm_output[key]))
474
+ logits = torch.cat(logits_parts, dim=-1)
475
+ else:
476
+ logits = torch.from_numpy(lm_output['output_logits'])
477
+
478
+ if temperature > 0:
479
+ logits = logits / temperature
480
+ probs = F.softmax(logits[0, -1, :], dim=-1)
481
+ next_token = torch.multinomial(probs, num_samples=1).item()
482
+ else:
483
+ next_token = torch.argmax(logits[0, -1, :]).item()
484
+
485
+ return next_token
486
+
487
+ def create_unified_state(ffn_models, context_length):
488
+ """Create unified KV cache state for transformer."""
489
+ if isinstance(ffn_models[0], dict):
490
+ # Use first FFN model's prefill function to create state
491
+ state = ffn_models[0]['prefill'].make_state()
492
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
493
+ return state
494
+ else:
495
+ state = ffn_models[0].make_state()
496
+ print("\nCreated unified transformer state")
497
+ return state
498
+
499
+ def get_user_input():
500
+ sys.stdout.write(f"\n{LIGHT_GREEN}You:{RESET_COLOR} ")
501
+ sys.stdout.flush()
502
+ line = sys.stdin.readline()
503
+ if not line:
504
+ raise EOFError
505
+ return line.rstrip('\n')
506
+
507
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, auto_prompt=None, warmup=False):
508
+ """Interactive chat loop."""
509
+ context_length = metadata.get('context_length')
510
+ batch_size = metadata.get('batch_size', 64)
511
+
512
+ if not warmup:
513
+ print(f"\nUsing context length: {context_length}")
514
+ print("\nStarting chat session. Press Ctrl+D to exit.")
515
+ print("Type your message and press Enter to chat.")
516
+
517
+ # Keep track of conversation history
518
+ conversation = []
519
+
520
+ try:
521
+ while True:
522
+ try:
523
+ if not warmup:
524
+ print(f"\n{LIGHT_GREEN}You:{RESET_COLOR}", end=' ', flush=True)
525
+ if auto_prompt is not None:
526
+ user_input = auto_prompt
527
+ if not warmup:
528
+ print(user_input)
529
+ else:
530
+ user_input = input().strip()
531
+ except EOFError:
532
+ if not warmup:
533
+ print("\nExiting chat...")
534
+ break
535
+
536
+ if not user_input:
537
+ continue
538
+
539
+ # Add user message to conversation
540
+ conversation.append({"role": "user", "content": user_input})
541
+
542
+ # Format using chat template with full history
543
+ base_input_ids = tokenizer.apply_chat_template(
544
+ conversation,
545
+ return_tensors="pt",
546
+ add_generation_prompt=True
547
+ ).to(torch.int32)
548
+
549
+ # Check if we need to trim history
550
+ while base_input_ids.size(1) > context_length - 100: # Leave room for response
551
+ # Remove oldest message pair (user + assistant)
552
+ if len(conversation) > 2:
553
+ conversation = conversation[2:] # Remove oldest pair
554
+ base_input_ids = tokenizer.apply_chat_template(
555
+ conversation,
556
+ return_tensors="pt",
557
+ add_generation_prompt=True
558
+ ).to(torch.int32)
559
+ else:
560
+ # If only current message remains and still too long, truncate
561
+ base_input_ids = base_input_ids[:, -context_length//2:]
562
+ break
563
+
564
+ context_pos = base_input_ids.size(1)
565
+
566
+ # Pad sequence to context_size
567
+ input_ids = F.pad(
568
+ base_input_ids,
569
+ (0, context_length - context_pos),
570
+ value=0
571
+ )
572
+
573
+ if not warmup:
574
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
575
+
576
+ # Initialize token printer and collect response
577
+ token_printer = TokenPrinter(tokenizer)
578
+ response_tokens = []
579
+ generation_start_time = time.time()
580
+
581
+ try:
582
+ # Create initial causal mask
583
+ causal_mask = make_causal_mask(context_length, 0)
584
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
585
+
586
+ # Run prefill on entire context
587
+ current_pos = run_prefill(
588
+ embed_model,
589
+ ffn_models,
590
+ input_ids,
591
+ context_pos,
592
+ context_length,
593
+ batch_size,
594
+ state
595
+ )
596
+ #print(f"\n[DEBUG] After initial prefill - current_pos: {current_pos}")
597
+
598
+ # Generation loop
599
+ pos = context_pos
600
+ tokens_generated = 0
601
+ inference_start = time.time() # Start inference timing
602
+
603
+ while True:
604
+ # Check if we need to shift window
605
+ if pos >= context_length - 2:
606
+ # Calculate shift to maintain full batches
607
+ batch_size = metadata.get('batch_size', 64)
608
+ # Calculate max batches that fit in context
609
+ max_batches = context_length // batch_size
610
+ desired_batches = max(1, max_batches - 2) # Leave room for new tokens
611
+ new_size = min(desired_batches * batch_size, context_length - batch_size)
612
+
613
+ # Create shifted input_ids
614
+ tmp = torch.zeros((1, context_length), dtype=torch.int32)
615
+ tmp[:,0:new_size] = input_ids[:,pos-new_size:pos]
616
+ input_ids = tmp
617
+
618
+ # Reset state and run prefill
619
+ # keep the same state
620
+ #state = create_unified_state(ffn_models, context_length)
621
+ current_pos = run_prefill(
622
+ embed_model,
623
+ ffn_models,
624
+ input_ids,
625
+ new_size, # Prefill the entire shifted content
626
+ context_length,
627
+ batch_size,
628
+ state
629
+ )
630
+
631
+ # Start generating from the next position
632
+ pos = new_size # Don't back up, continue from where we left off
633
+
634
+ #print(f"\n[DEBUG] After shift - next token will be at pos {pos}")
635
+ #print(f"[DEBUG] Context before next token: {tokenizer.decode(input_ids[0, pos-40:pos])}")
636
+
637
+ window_shifted = True
638
+
639
+ # Generate next token
640
+ next_token = generate_next_token(
641
+ embed_model,
642
+ ffn_models,
643
+ lmhead_model,
644
+ input_ids,
645
+ pos,
646
+ context_length,
647
+ state
648
+ )
649
+
650
+ # Add token
651
+ input_ids[0, pos] = next_token
652
+ if not warmup:
653
+ token_printer.add_token(next_token)
654
+ token_printer.drain_buffer()
655
+ response_tokens.append(next_token)
656
+
657
+ pos += 1
658
+ tokens_generated += 1
659
+
660
+ # In warmup mode, limit tokens
661
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
662
+ break
663
+
664
+ if next_token == tokenizer.eos_token_id:
665
+ break
666
+
667
+ inference_time = time.time() - inference_start # Calculate inference time
668
+
669
+ # Add assistant response to conversation
670
+ response_text = token_printer.stop()
671
+ conversation.append({"role": "assistant", "content": response_text})
672
+
673
+ # Print stats only if not in warmup
674
+ if not warmup:
675
+ total_time = time.time() - generation_start_time
676
+ prefill_time = total_time - inference_time
677
+ inference_tokens_per_sec = len(response_tokens) / inference_time if inference_time > 0 else 0
678
+ prefill_ms = prefill_time * 1000
679
+ prefill_tokens_per_sec = context_pos / prefill_time if prefill_time > 0 else 0
680
+ print(f"{DARK_BLUE}{inference_tokens_per_sec:.1f} t/s, "
681
+ f"TTFT: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s), "
682
+ f"{len(response_tokens)} tokens{RESET_COLOR}")
683
+
684
+ if auto_prompt is not None:
685
+ break
686
+
687
+ except KeyboardInterrupt:
688
+ if not warmup:
689
+ print("\nGeneration interrupted")
690
+ token_printer.stop()
691
+ continue
692
+
693
+ except Exception as e:
694
+ if not warmup:
695
+ print(f"\nError in chat loop: {str(e)}")
696
+ import traceback
697
+ traceback.print_exc()
698
+
699
+ def main():
700
+ parser = argparse.ArgumentParser(description='Full Chat with CoreML LLaMA with context window shifting (c) 2025 Anemll')
701
+
702
+ # Add meta.yaml option
703
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
704
+
705
+ # Add existing arguments
706
+ parser.add_argument('--d', '--dir', type=str, default='.',
707
+ help='Directory containing model files (default: current directory)')
708
+ parser.add_argument('--embed', type=str, required=False,
709
+ help='Path to embeddings model (relative to --dir)')
710
+ parser.add_argument('--ffn', type=str, required=False,
711
+ help='Path to FFN model (can be chunked, relative to --dir)')
712
+ parser.add_argument('--lmhead', type=str, required=False,
713
+ help='Path to LM head model (relative to --dir)')
714
+ parser.add_argument('--tokenizer', type=str, required=False,
715
+ help='Path to tokenizer')
716
+
717
+ # Add new argument for auto-generation
718
+ parser.add_argument('--prompt', type=str,
719
+ help='If specified, run once with this prompt and exit')
720
+
721
+ # Model configuration
722
+ parser.add_argument('--context-length', type=int,
723
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
724
+
725
+ args = parser.parse_args()
726
+
727
+ # If meta.yaml is provided, load parameters from it
728
+ if args.meta:
729
+ try:
730
+ with open(args.meta, 'r') as f:
731
+ meta = yaml.safe_load(f)
732
+ params = meta['model_info']['parameters']
733
+
734
+ # Set model directory to meta.yaml directory if not specified
735
+ if not args.d or args.d == '.':
736
+ args.d = str(Path(args.meta).parent)
737
+
738
+ # Build model paths based on parameters
739
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
740
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
741
+ num_chunks = int(params['num_chunks'])
742
+
743
+ # Set model paths if not specified
744
+ if not args.embed:
745
+ args.embed = 'llama_embeddings'
746
+ if not args.lmhead:
747
+ args.lmhead = f'llama_lm_head{lut_lmhead}'
748
+ if not args.ffn:
749
+ args.ffn = f'llama_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
750
+ if not args.tokenizer:
751
+ args.tokenizer = args.d
752
+
753
+ # Set other parameters
754
+ args.context_length = int(params['context_length'])
755
+ args.batch_size = int(params['batch_size'])
756
+ args.num_chunks = num_chunks
757
+
758
+ print(f"\nLoaded parameters from {args.meta}:")
759
+ print(f" Context Length: {args.context_length}")
760
+ print(f" Batch Size: {args.batch_size}")
761
+ print(f" Num Chunks: {args.num_chunks}")
762
+ print(f" Models Directory: {args.d}")
763
+ print(f" Embeddings: {args.embed}")
764
+ print(f" LM Head: {args.lmhead}")
765
+ print(f" FFN: {args.ffn}")
766
+
767
+ except Exception as e:
768
+ print(f"\nError loading meta.yaml: {str(e)}")
769
+ sys.exit(1)
770
+
771
+ # Convert directory to absolute path
772
+ model_dir = Path(args.d).resolve()
773
+ if not model_dir.exists():
774
+ print(f"\nError: Model directory not found: {model_dir}")
775
+ return 1
776
+
777
+ print(f"\nUsing model directory: {model_dir}")
778
+ print(f"Context length: {args.context_length}")
779
+
780
+ try:
781
+ # Update paths to be relative to model directory
782
+ args.embed = str(model_dir / args.embed)
783
+ args.ffn = str(model_dir / args.ffn)
784
+ args.lmhead = str(model_dir / args.lmhead)
785
+
786
+ # Handle tokenizer path separately since it's not relative to model_dir
787
+ if args.tokenizer is None:
788
+ args.tokenizer = str(model_dir)
789
+
790
+ if not Path(args.tokenizer).exists():
791
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
792
+ return 1
793
+
794
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
795
+ print(f"Using tokenizer path: {args.tokenizer}")
796
+
797
+ metadata = {}
798
+ # Load models and extract metadata
799
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
800
+
801
+ print(f"\nMetadata befor args.context_length: {metadata}")
802
+
803
+ # Override context length from command line if provided
804
+ if args.context_length is not None:
805
+ metadata['context_length'] = args.context_length
806
+ metadata['state_length'] = args.context_length # Also update state_length
807
+ print(f"\nOverriding context length from command line: {args.context_length}")
808
+
809
+ print(f"\nMetadata after load_models: {metadata}")
810
+
811
+ # Load tokenizer with resolved path
812
+ tokenizer = initialize_tokenizer(args.tokenizer)
813
+ if tokenizer is None:
814
+ raise RuntimeError("Failed to initialize tokenizer")
815
+
816
+ # Create unified state once
817
+ state = create_unified_state(ffn_models, metadata['context_length'])
818
+
819
+ # Warmup runs to prevent Python GIL issues with CoreML !
820
+ for i in range(2):
821
+ chat_loop(
822
+ embed_model=embed_model,
823
+ ffn_models=ffn_models,
824
+ lmhead_model=lmhead_model,
825
+ tokenizer=tokenizer,
826
+ metadata=metadata,
827
+ state=state, # Pass the state
828
+ warmup=True,
829
+ auto_prompt="who are you?"
830
+ )
831
+
832
+ # Main run
833
+ chat_loop(
834
+ embed_model=embed_model,
835
+ ffn_models=ffn_models,
836
+ lmhead_model=lmhead_model,
837
+ tokenizer=tokenizer,
838
+ metadata=metadata,
839
+ state=state, # Pass the state
840
+ warmup=False,
841
+ auto_prompt=args.prompt
842
+ )
843
+
844
+ except Exception as e:
845
+ print(f"\nError: {str(e)}")
846
+ import traceback
847
+ traceback.print_exc()
848
+ return 1
849
+
850
+ return 0
851
+
852
+ if __name__ == "__main__":
853
+ exit(main())
meta.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_info:
2
+ name: anemll-DeepSeekR1-8B-ctx1024
3
+ version: 0.1.1
4
+ description: |
5
+ Demonstarates running DeepSeekR1-8B on Apple Neural Engine
6
+ Context length: 1024
7
+ Batch size: 64
8
+ Chunks: 8
9
+ license: MIT
10
+ author: Anemll
11
+ framework: Core ML
12
+ language: Python
13
+ parameters:
14
+ context_length: 1024
15
+ batch_size: 64
16
+ lut_embeddings: none
17
+ lut_ffn: 6
18
+ lut_lmhead: 6
19
+ num_chunks: 8
20
+ model_prefix: DeepSeek
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|begin▁of▁sentence|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": true,
22
+ "model_max_length": 16384,
23
+ "pad_token": {
24
+ "__type": "AddedToken",
25
+ "content": "<|end▁of▁sentence|>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "sp_model_kwargs": {},
32
+ "unk_token": null,
33
+ "tokenizer_class": "LlamaTokenizerFast",
34
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %}"
35
+ }