Instructions to use annasoli/TEST with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use annasoli/TEST with Transformers:
# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("annasoli/TEST", dtype="auto") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- Unsloth Studio new
How to use annasoli/TEST with Unsloth Studio:
Install Unsloth Studio (macOS, Linux, WSL)
curl -fsSL https://unsloth.ai/install.sh | sh # Run unsloth studio unsloth studio -H 0.0.0.0 -p 8888 # Then open http://localhost:8888 in your browser # Search for annasoli/TEST to start chatting
Install Unsloth Studio (Windows)
irm https://unsloth.ai/install.ps1 | iex # Run unsloth studio unsloth studio -H 0.0.0.0 -p 8888 # Then open http://localhost:8888 in your browser # Search for annasoli/TEST to start chatting
Using HuggingFace Spaces for Unsloth
# No setup required # Open https://huggingface.co/spaces/unsloth/studio in your browser # Search for annasoli/TEST to start chatting
Load model with FastModel
pip install unsloth from unsloth import FastModel model, tokenizer = FastModel.from_pretrained( model_name="annasoli/TEST", max_seq_length=2048, )
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.03783102143757881, | |
| "eval_steps": 100, | |
| "global_step": 15, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0025220680958385876, | |
| "grad_norm": 119.76318359375, | |
| "kl_loss": -1.1687562835330993e-15, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0, | |
| "loss": 2.6394360065460205, | |
| "step": 1, | |
| "total_loss": 2.6394360065460205 | |
| }, | |
| { | |
| "epoch": 0.005044136191677175, | |
| "grad_norm": 116.01831817626953, | |
| "kl_loss": -1.280914393650412e-14, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0001, | |
| "loss": 3.2936160564422607, | |
| "step": 2, | |
| "total_loss": 3.2936160564422607 | |
| }, | |
| { | |
| "epoch": 0.007566204287515763, | |
| "grad_norm": 104.04817962646484, | |
| "kl_loss": 7.10318071028837e-09, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0002, | |
| "loss": 3.084439992904663, | |
| "step": 3, | |
| "total_loss": 3.091543197631836 | |
| }, | |
| { | |
| "epoch": 0.01008827238335435, | |
| "grad_norm": 68.36679077148438, | |
| "kl_loss": 2.8489626657801637e-08, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0003, | |
| "loss": 3.105210304260254, | |
| "step": 4, | |
| "total_loss": 3.133699893951416 | |
| }, | |
| { | |
| "epoch": 0.012610340479192938, | |
| "grad_norm": 61.00284957885742, | |
| "kl_loss": 4.923957774849441e-08, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004, | |
| "loss": 3.345022678375244, | |
| "step": 5, | |
| "total_loss": 3.3942623138427734 | |
| }, | |
| { | |
| "epoch": 0.015132408575031526, | |
| "grad_norm": 65.48960876464844, | |
| "kl_loss": 1.43211394743048e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0005, | |
| "loss": 2.3467514514923096, | |
| "step": 6, | |
| "total_loss": 2.4899628162384033 | |
| }, | |
| { | |
| "epoch": 0.017654476670870115, | |
| "grad_norm": 63.001102447509766, | |
| "kl_loss": 9.109995602329946e-08, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004993662864385298, | |
| "loss": 2.5077083110809326, | |
| "step": 7, | |
| "total_loss": 2.5988082885742188 | |
| }, | |
| { | |
| "epoch": 0.0201765447667087, | |
| "grad_norm": 58.6073112487793, | |
| "kl_loss": 2.3511624647198914e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004987325728770596, | |
| "loss": 2.2668278217315674, | |
| "step": 8, | |
| "total_loss": 2.501944065093994 | |
| }, | |
| { | |
| "epoch": 0.02269861286254729, | |
| "grad_norm": 97.743896484375, | |
| "kl_loss": 2.1175161180053692e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004980988593155894, | |
| "loss": 2.352029800415039, | |
| "step": 9, | |
| "total_loss": 2.563781499862671 | |
| }, | |
| { | |
| "epoch": 0.025220680958385876, | |
| "grad_norm": 60.91500473022461, | |
| "kl_loss": 1.2846226127294358e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004974651457541192, | |
| "loss": 2.2376697063446045, | |
| "step": 10, | |
| "total_loss": 2.3661320209503174 | |
| }, | |
| { | |
| "epoch": 0.027742749054224466, | |
| "grad_norm": 55.095516204833984, | |
| "kl_loss": 1.4181343033214944e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.000496831432192649, | |
| "loss": 2.8243818283081055, | |
| "step": 11, | |
| "total_loss": 2.9661953449249268 | |
| }, | |
| { | |
| "epoch": 0.03026481715006305, | |
| "grad_norm": 44.97727966308594, | |
| "kl_loss": 1.545683971926337e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004961977186311787, | |
| "loss": 2.4689197540283203, | |
| "step": 12, | |
| "total_loss": 2.623488187789917 | |
| }, | |
| { | |
| "epoch": 0.03278688524590164, | |
| "grad_norm": 51.62504196166992, | |
| "kl_loss": 2.2357993145760702e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004955640050697085, | |
| "loss": 2.2227847576141357, | |
| "step": 13, | |
| "total_loss": 2.446364641189575 | |
| }, | |
| { | |
| "epoch": 0.03530895334174023, | |
| "grad_norm": 42.21575927734375, | |
| "kl_loss": 1.6229765265052265e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004949302915082382, | |
| "loss": 2.4396450519561768, | |
| "step": 14, | |
| "total_loss": 2.601942777633667 | |
| }, | |
| { | |
| "epoch": 0.03783102143757881, | |
| "grad_norm": 40.02684783935547, | |
| "kl_loss": 1.4151250127270032e-07, | |
| "kl_weight": 1000000.0, | |
| "learning_rate": 0.0004942965779467681, | |
| "loss": 2.509690761566162, | |
| "step": 15, | |
| "total_loss": 2.651203155517578 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 794, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 5, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5157421056000000.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |