English
Llama-slideQA / run_wsi_stage1.sh
weiheng-1009's picture
added code for running
cbff41a
export WANDB_MODE=online
export HF_ENDPOINT=https://hf-mirror.com
CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file=./accelerate_configs/deepspeed_zero2.yaml run_wsi.py --learning_rate 1e-4 --max_steps 10000 --warmup_steps 100\
--gpu 2 --train_batch_size 4 --eval_batch_size 2 --max_seq_length 512 \
--agg_strategy gmm,longnet --embed_dim 512 --vision_adaptor False --hierachical_token True --hierachical_adaptor True\
--n_heads 32,16,8 --llm_requires_grad False --resume_from_checkpoint False \
--llm_name /data_local/pxb/LLM_models/llama3/llama3.1-8b-instruct \
--dataset_name_list CNX-PathLLM/TCGA-WSI-Description-4onew,CNX-PathLLM/TCGA-WSI-Description-4omini,CNX-PathLLM/GTEx-WSI-Description \
--data_cache_dir /data_local/pxb/CNX-PathLLM/.cache \
--fea_root /path/to/CNX-PathLLM/GTEx-TCGA-Embeddings \
--gmm_root /path/to/GMM_Embeddings\
--output_dir path/to/output/of/step2