From e757ab88f1484b83abb85017e0dfff2205221f4c Mon Sep 17 00:00:00 2001 From: Swati Allabadi Date: Mon, 28 Apr 2025 10:43:21 +0000 Subject: [PATCH 1/5] Adding steps about how to fine tune on any custom dataset. Signed-off-by: Swati Allabadi --- QEfficient/finetune/dataset/custom_dataset.py | 4 +-- docs/source/finetune.md | 34 +++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/QEfficient/finetune/dataset/custom_dataset.py b/QEfficient/finetune/dataset/custom_dataset.py index 4bee06c58..f2811b6b3 100644 --- a/QEfficient/finetune/dataset/custom_dataset.py +++ b/QEfficient/finetune/dataset/custom_dataset.py @@ -23,7 +23,7 @@ def load_module_from_py_file(py_file: str) -> object: return module -def get_custom_dataset(dataset_config, tokenizer, split: str): +def get_custom_dataset(dataset_config, tokenizer, split: str, context_length=None): if ":" in dataset_config.file: module_path, func_name = dataset_config.file.split(":") else: @@ -38,7 +38,7 @@ def get_custom_dataset(dataset_config, tokenizer, split: str): module = load_module_from_py_file(module_path.as_posix()) try: - return getattr(module, func_name)(dataset_config, tokenizer, split) + return getattr(module, func_name)(dataset_config, tokenizer, split, context_length) except AttributeError as e: print( f"It seems like the given method name ({func_name}) is not present in the dataset .py file ({module_path.as_posix()})." diff --git a/docs/source/finetune.md b/docs/source/finetune.md index 40df4401c..48db94b79 100644 --- a/docs/source/finetune.md +++ b/docs/source/finetune.md @@ -63,4 +63,38 @@ to visualise the data, ```python tensorboard --logdir runs/ --bind_all +``` + +## Fine-Tuning on custom dataset + +To run fine tuning for any user specific dataset, prepare the dataset using the following steps: + + 1) Create a directory named 'dataset' inside efficient-transformers. + 2) Inside this directory, create a file named 'custom_dataset.py'. This is different than the custom_dataset.py present at efficient-transformers/QEfficient/finetune/dataset. + 3) Inside the newly created efficient-transformers/dataset/custom_dataset.py, define a function named 'get_custom_dataset'. + 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. This function gets called twice through Qefficient/cloud/finetune.py with the name get_preprocessed_dataset. + 5) Inside get_custom_dataset(), dataset needs to prepared for fine tuning. So, the user needs to apply prompt and tokenize the dataset accordingly. Please refer the below template on how to define get_custom_dataset(). + 6) For examples, please refer python files present in efficient-transformers/QEfficient/finetune/dataset. In case of Samsum dataset, get_preprocessed_samsum() of efficient-transformers/QEfficient/finetune/dataset/samsum_dataset.py is called. + 7) In efficient-transformers/QEfficient/finetune/configs/dataset_config.py, for custom_dataset class, pass the appropriate value for train_split and test_split according to the dataset keys corresponding to train and test data points. + 8) While running fine tuning, pass argument "-–dataset custom_dataset" to finetune on custom dataset. + +Template for get_custom_dataset() to be defined inside efficient-transformers/dataset/custom_dataset.py is as follows: + +```python +def get_custom_dataset(dataset_config, tokenizer, split, context_length=None): + + # load dataset + # based on split, retrieve only the specific portion of the dataset (train or eval) either here or at the last + + def apply_prompt_template(): + + def tokenize(): + + # define prompt + # call apply_prompt_template() for each data point: + # data = data.map(apply_prompt_template ,) + # call tokenize() for each data point: + # data = data.map(tokenize, ) + + return dataset ``` \ No newline at end of file From bae75d22129596751dbaf1d9b4994a96cf9dc9ba Mon Sep 17 00:00:00 2001 From: Swati Allabadi Date: Fri, 9 May 2025 16:27:35 +0530 Subject: [PATCH 2/5] Update finetune.md Signed-off-by: Swati Allabadi --- docs/source/finetune.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/finetune.md b/docs/source/finetune.md index 48db94b79..7d80b572b 100644 --- a/docs/source/finetune.md +++ b/docs/source/finetune.md @@ -92,9 +92,9 @@ def get_custom_dataset(dataset_config, tokenizer, split, context_length=None): # define prompt # call apply_prompt_template() for each data point: - # data = data.map(apply_prompt_template ,) + # dataset = dataset.map(apply_prompt_template ,) # call tokenize() for each data point: - # data = data.map(tokenize, ) + # dataset = dataset.map(tokenize, ) return dataset -``` \ No newline at end of file +``` From 05a385a9876597d7be2ad48e8911419bcf3dae20 Mon Sep 17 00:00:00 2001 From: Swati Allabadi Date: Wed, 14 May 2025 15:29:17 +0530 Subject: [PATCH 3/5] Adding alpaca_dataset as teh default dataset Signed-off-by: Swati Allabadi --- QEfficient/finetune/configs/training.py | 2 +- docs/source/finetune.md | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/QEfficient/finetune/configs/training.py b/QEfficient/finetune/configs/training.py index c50954c4c..01199f54d 100644 --- a/QEfficient/finetune/configs/training.py +++ b/QEfficient/finetune/configs/training.py @@ -28,7 +28,7 @@ class train_config: use_fp16: bool = True use_autocast: bool = True val_batch_size: int = 1 - dataset = "samsum_dataset" + dataset = "alpaca_dataset" task_type = "generation" # "generation" / "seq_classification" peft_method: str = "lora" use_peft: bool = True # use parameter efficient fine tuning diff --git a/docs/source/finetune.md b/docs/source/finetune.md index 7d80b572b..4db421ae7 100644 --- a/docs/source/finetune.md +++ b/docs/source/finetune.md @@ -69,13 +69,13 @@ tensorboard --logdir runs/ --bind_all To run fine tuning for any user specific dataset, prepare the dataset using the following steps: - 1) Create a directory named 'dataset' inside efficient-transformers. + 1) Create a directory named 'dataset' inside efficient-transformers. 2) Inside this directory, create a file named 'custom_dataset.py'. This is different than the custom_dataset.py present at efficient-transformers/QEfficient/finetune/dataset. 3) Inside the newly created efficient-transformers/dataset/custom_dataset.py, define a function named 'get_custom_dataset'. 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. This function gets called twice through Qefficient/cloud/finetune.py with the name get_preprocessed_dataset. 5) Inside get_custom_dataset(), dataset needs to prepared for fine tuning. So, the user needs to apply prompt and tokenize the dataset accordingly. Please refer the below template on how to define get_custom_dataset(). 6) For examples, please refer python files present in efficient-transformers/QEfficient/finetune/dataset. In case of Samsum dataset, get_preprocessed_samsum() of efficient-transformers/QEfficient/finetune/dataset/samsum_dataset.py is called. - 7) In efficient-transformers/QEfficient/finetune/configs/dataset_config.py, for custom_dataset class, pass the appropriate value for train_split and test_split according to the dataset keys corresponding to train and test data points. + 7) In efficient-transformers/QEfficient/finetune/configs/dataset_config.py, for custom_dataset class, pass the appropriate value for train_split and test_split according to the dataset keys corresponding to train and test data points. As an alternative, these values can be passed as command line arguemnets as well with the finetune command. For example "--train_split train". 8) While running fine tuning, pass argument "-–dataset custom_dataset" to finetune on custom dataset. Template for get_custom_dataset() to be defined inside efficient-transformers/dataset/custom_dataset.py is as follows: @@ -87,10 +87,12 @@ def get_custom_dataset(dataset_config, tokenizer, split, context_length=None): # based on split, retrieve only the specific portion of the dataset (train or eval) either here or at the last def apply_prompt_template(): + # transform the passed datapoint by applying the prompt on it def tokenize(): + # tokenize the passed datapoint - # define prompt + # define the prompt # call apply_prompt_template() for each data point: # dataset = dataset.map(apply_prompt_template ,) # call tokenize() for each data point: From 155bb7793e1d90e99016ef302c5e55d63a3fee54 Mon Sep 17 00:00:00 2001 From: Swati Allabadi Date: Mon, 19 May 2025 23:11:15 +0530 Subject: [PATCH 4/5] Update finetune.md Signed-off-by: Swati Allabadi --- docs/source/finetune.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/finetune.md b/docs/source/finetune.md index 4db421ae7..939dd2ff3 100644 --- a/docs/source/finetune.md +++ b/docs/source/finetune.md @@ -69,10 +69,10 @@ tensorboard --logdir runs/ --bind_all To run fine tuning for any user specific dataset, prepare the dataset using the following steps: - 1) Create a directory named 'dataset' inside efficient-transformers. + 1) Create a directory named 'dataset' inside efficient-transformers (i.e. at the root of the repo). 2) Inside this directory, create a file named 'custom_dataset.py'. This is different than the custom_dataset.py present at efficient-transformers/QEfficient/finetune/dataset. 3) Inside the newly created efficient-transformers/dataset/custom_dataset.py, define a function named 'get_custom_dataset'. - 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. This function gets called twice through Qefficient/cloud/finetune.py with the name get_preprocessed_dataset. + 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. This function gets called twice through QEfficient/cloud/finetune.py with the name get_preprocessed_dataset. 5) Inside get_custom_dataset(), dataset needs to prepared for fine tuning. So, the user needs to apply prompt and tokenize the dataset accordingly. Please refer the below template on how to define get_custom_dataset(). 6) For examples, please refer python files present in efficient-transformers/QEfficient/finetune/dataset. In case of Samsum dataset, get_preprocessed_samsum() of efficient-transformers/QEfficient/finetune/dataset/samsum_dataset.py is called. 7) In efficient-transformers/QEfficient/finetune/configs/dataset_config.py, for custom_dataset class, pass the appropriate value for train_split and test_split according to the dataset keys corresponding to train and test data points. As an alternative, these values can be passed as command line arguemnets as well with the finetune command. For example "--train_split train". From 25a2ac5c7319ea18e93c84487ebfda4db928167e Mon Sep 17 00:00:00 2001 From: Swati Allabadi Date: Thu, 12 Jun 2025 09:18:56 +0000 Subject: [PATCH 5/5] Adding steps about how to fine tune on any custom dataset. Signed-off-by: Swati Allabadi --- QEfficient/finetune/utils/dataset_utils.py | 2 +- docs/source/finetune.md | 16 ++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/QEfficient/finetune/utils/dataset_utils.py b/QEfficient/finetune/utils/dataset_utils.py index 1642a56d3..42d0aae71 100644 --- a/QEfficient/finetune/utils/dataset_utils.py +++ b/QEfficient/finetune/utils/dataset_utils.py @@ -51,7 +51,7 @@ def get_dataloader_kwargs(train_config, dataset, dataset_processer, split): ) else: kwargs["sampler"] = torch.utils.data.DistributedSampler( - dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=True + dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=False ) kwargs["batch_size"] = batch_size kwargs["drop_last"] = True diff --git a/docs/source/finetune.md b/docs/source/finetune.md index 2d76fa095..70bf35e67 100644 --- a/docs/source/finetune.md +++ b/docs/source/finetune.md @@ -66,17 +66,21 @@ to visualise the data, tensorboard --logdir runs/ --bind_all ``` +## Some features/functionalities of fine-tuning stack: + 1) Gradient accumulation: By default, gradient accumulation happens for 4 steps. To update this value, command line argument gradient_accumulation_steps has to be passed. (Example: '--gradient_accumulation_steps 8') + 2) Gradient Checkpointing: By default, gradient checkpointing is disabled. To enable it, command line argument gradient_accumulation_steps has to be passed. + ## Fine-Tuning on custom dataset To run fine tuning for any user specific dataset, prepare the dataset using the following steps: - 1) Create a directory named 'dataset' inside efficient-transformers (i.e. at the root of the repo). - 2) Inside this directory, create a file named 'custom_dataset.py'. This is different than the custom_dataset.py present at efficient-transformers/QEfficient/finetune/dataset. + 1) Create a directory named 'dataset' inside efficient-transformers. + 2) Inside this directory, create a file named 'custom_dataset.py'. 3) Inside the newly created efficient-transformers/dataset/custom_dataset.py, define a function named 'get_custom_dataset'. - 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. This function gets called twice through QEfficient/cloud/finetune.py with the name get_preprocessed_dataset. - 5) Inside get_custom_dataset(), dataset needs to prepared for fine tuning. So, the user needs to apply prompt and tokenize the dataset accordingly. Please refer the below template on how to define get_custom_dataset(). - 6) For examples, please refer python files present in efficient-transformers/QEfficient/finetune/dataset. In case of Samsum dataset, get_preprocessed_samsum() of efficient-transformers/QEfficient/finetune/dataset/samsum_dataset.py is called. - 7) In efficient-transformers/QEfficient/finetune/configs/dataset_config.py, for custom_dataset class, pass the appropriate value for train_split and test_split according to the dataset keys corresponding to train and test data points. As an alternative, these values can be passed as command line arguemnets as well with the finetune command. For example "--train_split train". + 4) get_custom_dataset() should have following 4 parameters: dataset_config, tokenizer, split, context_length. + 5) Inside get_custom_dataset(), user needs to apply prompt and tokenize the dataset accordingly. Please refer the below template on how to define get_custom_dataset(). + 6) For examples, please refer python files present in [dataset](https://github.com/quic/efficient-transformers/tree/main/QEfficient/finetune/dataset). In case of Samsum dataset, get_preprocessed_samsum() of efficient-transformers/QEfficient/finetune/dataset/samsum_dataset.py is called. + 7) In [dataset_config.py](https://github.com/quic/efficient-transformers/blob/main/QEfficient/finetune/configs/dataset_config.py), for custom_dataset class, pass the appropriate value for train_split and test_split. As an alternative, these values can be passed as command line arguments as well with the finetune command. For example "--train_split train". 8) While running fine tuning, pass argument "-–dataset custom_dataset" to finetune on custom dataset. Template for get_custom_dataset() to be defined inside efficient-transformers/dataset/custom_dataset.py is as follows: