Files changed (1) hide show
  1. README.md +36 -0
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sagemaker
2
+ import boto3
3
+ from sagemaker.huggingface import HuggingFace
4
+
5
+ try:
6
+ role = sagemaker.get_execution_role()
7
+ except ValueError:
8
+ iam = boto3.client('iam')
9
+ role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
10
+
11
+ hyperparameters = {
12
+ 'model_name_or_path':'TheBloke/dolphin-2.5-mixtral-8x7b-GGUF',
13
+ 'output_dir':'/opt/ml/model'
14
+ # add your remaining hyperparameters
15
+ # more info here https://github.com/huggingface/transformers/tree/v4.26.0/examples/pytorch/question-answering
16
+ }
17
+
18
+ # git configuration to download our fine-tuning script
19
+ git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.26.0'}
20
+
21
+ # creates Hugging Face estimator
22
+ huggingface_estimator = HuggingFace(
23
+ entry_point='run_qa.py',
24
+ source_dir='./examples/pytorch/question-answering',
25
+ instance_type='ml.p3.2xlarge',
26
+ instance_count=1,
27
+ role=role,
28
+ git_config=git_config,
29
+ transformers_version='4.26.0',
30
+ pytorch_version='1.13.1',
31
+ py_version='py39',
32
+ hyperparameters = hyperparameters
33
+ )
34
+
35
+ # starting the train job
36
+ huggingface_estimator.fit()