# Loadb checkpoint shards: 0%|StatTerra | 0/3[00:00<00:00, Loading checkpoint shards: 33%|███▎ | 1/3 [00:01<00:03, 1.75s/it]Loading checkpoint shards: 67%|██████▋ | 2/3 [00:03<00:01, 1.72s/it]Loading checkpoint shards: 100%|██████████| 3/3 [00:04<00:00, 1.64s/it]Loading checkpoint shards: 100%|██████████| 3/3 [00:04<00:00, 1.66s/it] load model directly from transformers import AutoTokenizer, AutoModelForCausalLM import tokenizer = AutoTokenizer.from_pretrained("m-a-p/ChatMusician-Base") model = AutoModelForCausalLM.from_pretrained("m-a-p/ChatMusician-Base") # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="m-a-p/ChatMusician-Base") from transformers import AutoModelForCausalLM, AutoTokenizer MODEL_NAME = 'NousResearch/Genstruct-7B' model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map='cuda', load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) import sagemaker import boto3 from sagemaker.huggingface import HuggingFace try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] hyperparameters = { 'model_name_or_path':'m-a-p/ChatMusician-Base', 'output_dir':'/opt/ml/model' # add your remaining hyperparameters # more info here https://github.com/huggingface/transformers/tree/v4.37.0/examples/pytorch/seq2seq } # git configuration to download our fine-tuning script git_config = {'repo': 'https://github.com/huggingface/transformers.git','branch': 'v4.37.0'} # creates Hugging Face estimator huggingface_estimator = HuggingFace( entry_point='run_translation.py', source_dir='./examples/pytorch/seq2seq', instance_type='ml.p3.2xlarge', instance_count=1, role=role, git_config=git_config, transformers_version='4.37.0', pytorch_version='2.1.0', py_version='py310', hyperparameters = hyperparameters ) # starting the train job huggingface_estimator.fit() } } }