Ramendra commited on
Commit
bba88fe
1 Parent(s): 4c40ab4

End of training

Browse files
README.md CHANGED
@@ -1,4 +1,6 @@
1
  ---
 
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
@@ -13,9 +15,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # dialogue_Summary
15
 
16
- This model was trained from scratch on the samsum dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 0.1356
19
 
20
  ## Model description
21
 
@@ -46,7 +48,7 @@ The following hyperparameters were used during training:
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
- | 0.08 | 1.0 | 148 | 0.1356 |
50
 
51
 
52
  ### Framework versions
 
1
  ---
2
+ license: mit
3
+ base_model: facebook/bart-large-cnn
4
  tags:
5
  - generated_from_trainer
6
  datasets:
 
15
 
16
  # dialogue_Summary
17
 
18
+ This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the samsum dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.1338
21
 
22
  ## Model description
23
 
 
48
 
49
  | Training Loss | Epoch | Step | Validation Loss |
50
  |:-------------:|:-----:|:----:|:---------------:|
51
+ | 0.0825 | 1.0 | 148 | 0.1338 |
52
 
53
 
54
  ### Framework versions
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./bart-cnn-samsum-finetuned/tuned_model_1697080968.4021988",
3
  "_num_labels": 3,
4
  "activation_dropout": 0.0,
5
  "activation_function": "gelu",
 
1
  {
2
+ "_name_or_path": "facebook/bart-large-cnn",
3
  "_num_labels": 3,
4
  "activation_dropout": 0.0,
5
  "activation_function": "gelu",
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15ae918b67473f15b3efaf3d7c9330ca455203d04198a2dfbde3779c4e490e69
3
  size 1625537293
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:374eb589c3e85134ab5e5f1d29af77eead992d5bbeff35a0da0d068a4a9cb191
3
  size 1625537293
special_tokens_map.json CHANGED
@@ -1,11 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<s>",
4
- "<pad>",
5
- "</s>",
6
- "<unk>",
7
- "<mask>"
8
- ],
9
  "bos_token": "<s>",
10
  "cls_token": "<s>",
11
  "eos_token": "</s>",
 
1
  {
 
 
 
 
 
 
 
2
  "bos_token": "<s>",
3
  "cls_token": "<s>",
4
  "eos_token": "</s>",
tokenizer.json CHANGED
@@ -1,7 +1,21 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 1024
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 2,
16
+ "pad_type_id": 0,
17
+ "pad_token": "</s>"
18
+ },
19
  "added_tokens": [
20
  {
21
  "id": 0,
tokenizer_config.json CHANGED
@@ -42,30 +42,17 @@
42
  "special": true
43
  }
44
  },
45
- "additional_special_tokens": [
46
- "<s>",
47
- "<pad>",
48
- "</s>",
49
- "<unk>",
50
- "<mask>"
51
- ],
52
  "bos_token": "<s>",
53
  "clean_up_tokenization_spaces": true,
54
  "cls_token": "<s>",
55
  "eos_token": "</s>",
56
  "errors": "replace",
57
  "mask_token": "<mask>",
58
- "max_length": 1024,
59
  "model_max_length": 1024,
60
- "pad_to_multiple_of": null,
61
  "pad_token": "</s>",
62
- "pad_token_type_id": 0,
63
- "padding_side": "right",
64
  "sep_token": "</s>",
65
- "stride": 0,
66
  "tokenizer_class": "BartTokenizer",
67
  "trim_offsets": true,
68
- "truncation_side": "right",
69
- "truncation_strategy": "longest_first",
70
  "unk_token": "<unk>"
71
  }
 
42
  "special": true
43
  }
44
  },
45
+ "additional_special_tokens": [],
 
 
 
 
 
 
46
  "bos_token": "<s>",
47
  "clean_up_tokenization_spaces": true,
48
  "cls_token": "<s>",
49
  "eos_token": "</s>",
50
  "errors": "replace",
51
  "mask_token": "<mask>",
 
52
  "model_max_length": 1024,
 
53
  "pad_token": "</s>",
 
 
54
  "sep_token": "</s>",
 
55
  "tokenizer_class": "BartTokenizer",
56
  "trim_offsets": true,
 
 
57
  "unk_token": "<unk>"
58
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b95b6c2c8d77931c46b8fec94cce415934ad1469df2fc23fc22b87baa9be3c3
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45c3fe017c0c07b9bcaa75e1e79e0a8080d4cbfec70c94bb7cd0ffb0d9d7faf3
3
  size 4091