diff --git a/data/train_example.jsonl b/data/train_example.jsonl new file mode 100644 index 0000000..d6e6daa --- /dev/null +++ b/data/train_example.jsonl @@ -0,0 +1 @@ +{"messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "语音转写:<|startofspeech|>!/path/to/wav<|endofspeech|>"}, {"role": "assistant", "content": "content of /path/to/wav"}], "speech_length": 42, "text_length": 42} diff --git a/data/val_example.jsonl b/data/val_example.jsonl new file mode 100644 index 0000000..d6e6daa --- /dev/null +++ b/data/val_example.jsonl @@ -0,0 +1 @@ +{"messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "语音转写:<|startofspeech|>!/path/to/wav<|endofspeech|>"}, {"role": "assistant", "content": "content of /path/to/wav"}], "speech_length": 42, "text_length": 42} diff --git a/deepspeed_conf/ds_stage1.json b/deepspeed_conf/ds_stage1.json new file mode 100644 index 0000000..51804c1 --- /dev/null +++ b/deepspeed_conf/ds_stage1.json @@ -0,0 +1,33 @@ +{ + "train_micro_batch_size_per_gpu": 1, + "gradient_accumulation_steps": 1, + "steps_per_print": 100, + "gradient_clipping": 5, + "fp16": { + "enabled": false, + "auto_cast": false, + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "consecutive_hysteresis": false, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": true + }, + "zero_force_ds_cpu_optimizer": false, + "zero_optimization": { + "stage": 1, + "offload_optimizer": { + "device": "none", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": true, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients" : true + } +} diff --git a/finetune.sh b/finetune.sh new file mode 100644 index 0000000..219d9b4 --- /dev/null +++ b/finetune.sh @@ -0,0 +1,80 @@ +# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. +# MIT License (https://opensource.org/licenses/MIT) + +workspace=`pwd` + +# which gpu to train or finetune +export CUDA_VISIBLE_DEVICES="0" +gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') + +# model_name from model_hub, or model_dir in local path + +## option 1, download model automatically +model_name_or_model_dir="FunAudioLLM/Fun-ASR-Nano-2512" + +## option 2, download model by git +#local_path_root=${workspace}/modelscope_models +#mkdir -p ${local_path_root}/${model_name_or_model_dir} +#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir} +#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir} + + +# data dir, which contains: train.json, val.json +train_data=${workspace}/data/train_example.jsonl +val_data=${workspace}/data/val_example.jsonl + +# exp output dir +output_dir="./outputs" +log_file="${output_dir}/log.txt" + +deepspeed_config=${workspace}/deepspeed_conf/ds_stage1.json + +mkdir -p ${output_dir} +echo "log_file: ${log_file}" + +DISTRIBUTED_ARGS=" + --nnodes ${WORLD_SIZE:-1} \ + --nproc_per_node $gpu_num \ + --node_rank ${RANK:-0} \ + --master_addr ${MASTER_ADDR:-127.0.0.1} \ + --master_port ${MASTER_PORT:-26669} +" + +echo $DISTRIBUTED_ARGS + +# funasr trainer path +if [ -f `dirname $(which funasr)`/train_ds.py ]; then + train_tool=`dirname $(which funasr)`/train_ds.py +elif [ -f `dirname $(which funasr)`/../lib/python*/site-packages/funasr/bin/train_ds.py ]; then + train_tool=`dirname $(which funasr)`/../lib/python*/site-packages/funasr/bin/train_ds.py +else + echo "Error: train_ds.py not found in funasr bin directory." + exit 1 +fi +ABSOLUTE_PATH=$(cd $(dirname $train_tool); pwd) +train_tool=${ABSOLUTE_PATH}/train_ds.py +echo "Using funasr trainer: ${train_tool}" + +torchrun $DISTRIBUTED_ARGS \ +${train_tool} \ +++model="${model_name_or_model_dir}" \ +++trust_remote_code=true \ +++train_data_set_list="${train_data}" \ +++valid_data_set_list="${val_data}" \ +++dataset_conf.data_split_num=1 \ +++dataset_conf.batch_sampler="BatchSampler" \ +++dataset_conf.batch_size=6000 \ +++dataset_conf.sort_size=1024 \ +++dataset_conf.batch_type="token" \ +++dataset_conf.num_workers=4 \ +++train_conf.max_epoch=50 \ +++train_conf.log_interval=1 \ +++train_conf.resume=true \ +++train_conf.validate_interval=2000 \ +++train_conf.save_checkpoint_interval=2000 \ +++train_conf.keep_nbest_models=20 \ +++train_conf.avg_nbest_model=10 \ +++train_conf.use_deepspeed=false \ +++train_conf.deepspeed_config=${deepspeed_config} \ +++optim_conf.lr=0.0002 \ +++output_dir="${output_dir}" &> ${log_file} diff --git a/model.py b/model.py index ea6b6c3..657edcc 100644 --- a/model.py +++ b/model.py @@ -125,7 +125,6 @@ class FunASRNano(nn.Module): self.use_low_frame_rate = audio_adaptor_conf.get("use_low_frame_rate", False) self.length_normalized_loss = length_normalized_loss - self.feat_permute = audio_encoder_conf.get("feat_permute", True) rank = int(os.environ.get("RANK", 0)) logging.info(f"rank: {rank}, model is builded.") @@ -255,12 +254,7 @@ class FunASRNano(nn.Module): def encode(self, speech, speech_lengths): # audio encoder - if self.feat_permute: - encoder_out, encoder_out_lens = self.audio_encoder( - speech.permute(0, 2, 1), speech_lengths - ) - else: - encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths) + encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths) return encoder_out, encoder_out_lens @@ -385,9 +379,6 @@ class FunASRNano(nn.Module): / 1000 ) - if self.feat_permute: - speech = speech.permute(0, 2, 1) - if self.use_low_frame_rate: olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2 olens = 1 + (olens - 3 + 2 * 1) // 2