winglian commited on
Commit
21cf09b
1 Parent(s): 15d3a65

remove lora fused packing test (#758)

Browse files
Files changed (1) hide show
  1. tests/e2e/test_fused_llama.py +0 -44
tests/e2e/test_fused_llama.py CHANGED
@@ -25,50 +25,6 @@ class TestFusedLlama(unittest.TestCase):
25
  Test case for Llama models using Fused layers
26
  """
27
 
28
- def test_lora_packing(self):
29
- # pylint: disable=duplicate-code
30
- output_dir = tempfile.mkdtemp()
31
- cfg = DictDefault(
32
- {
33
- "base_model": "JackFram/llama-68m",
34
- "base_model_config": "JackFram/llama-68m",
35
- "flash_attention": True,
36
- "flash_attn_fuse_qkv": True,
37
- "flash_attn_fuse_mlp": True,
38
- "sample_packing": True,
39
- "sequence_len": 1024,
40
- "load_in_8bit": True,
41
- "val_set_size": 0.1,
42
- "special_tokens": {
43
- "unk_token": "<unk>",
44
- "bos_token": "<s>",
45
- "eos_token": "</s>",
46
- },
47
- "datasets": [
48
- {
49
- "path": "mhenrichsen/alpaca_2k_test",
50
- "type": "alpaca",
51
- },
52
- ],
53
- "num_epochs": 2,
54
- "micro_batch_size": 2,
55
- "gradient_accumulation_steps": 1,
56
- "output_dir": output_dir,
57
- "learning_rate": 0.00001,
58
- "optimizer": "adamw_torch",
59
- "lr_scheduler": "cosine",
60
- "max_steps": 20,
61
- "save_steps": 10,
62
- "eval_steps": 10,
63
- }
64
- )
65
- normalize_config(cfg)
66
- cli_args = TrainerCliArgs()
67
- dataset_meta = load_datasets(cfg=cfg, cli_args=cli_args)
68
-
69
- train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta)
70
- assert (Path(output_dir) / "pytorch_model.bin").exists()
71
-
72
  def test_fft_packing(self):
73
  # pylint: disable=duplicate-code
74
  output_dir = tempfile.mkdtemp()
 
25
  Test case for Llama models using Fused layers
26
  """
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def test_fft_packing(self):
29
  # pylint: disable=duplicate-code
30
  output_dir = tempfile.mkdtemp()