#!/usr/bin/env python # coding: utf-8 # # Finetuning falcon-1b with Axolotl+QLoRA on RunPod # # This notebook makes it easy to try out finetuning falcon-1b with Axolotl+QLo on RunPod. # # If you run into any issues, welcome to report [here](https://github.com/OpenAccess-AI-Collective/axolotl/pull/132) . # # To run this notebook on RunPod, use [this RunPod template](https://runpod.io/gsc?template=tkb65a1zcb&ref=km0th85l) to deploy a pod with GPU. # ## Step 1. Generate default config for accelerate # In[1]: get_ipython().run_line_magic('cd', 'axolotl') # In[2]: get_ipython().system('accelerate config --config_file configs/accelerate/default_config.yaml default') # ## Step 2. Use a well-tested falcon-7b qlora config and adjust it to 1b # In[3]: get_ipython().system('wget https://raw.githubusercontent.com/utensil/axolotl/falcon-7b-qlora/examples/falcon/config-7b-qlora.yml') # In[4]: get_ipython().system('cp config-7b-qlora.yml config-1b-qlora.yml') get_ipython().system("sed -i -e 's/falcon-7b/falcon-rw-1b/g' -e 's/wandb_project: falcon-qlora/wandb_project: /g' config-1b-qlora.yml") # In[5]: get_ipython().system('cat config-1b-qlora.yml') # ## Step 3. Set W&B to offline mode # In[6]: get_ipython().run_line_magic('env', 'WANDB_MODE=offline') # This is to skip some extra setup steps, you can also choose to login to your W&B account before training. # ## Step 4. Start training and enjoy! # In[ ]: get_ipython().system('accelerate launch scripts/finetune.py config-1b-qlora.yml') # In[ ]: