| export WANDB_MODE=online | |
| export HF_ENDPOINT=https://hf-mirror.com | |
| CUDA_VISIBLE_DEVICES=0,1 accelerate launch --config_file=./accelerate_configs/deepspeed_zero2.yaml run_wsi.py --max_steps 20000 --warmup_steps 10\ | |
| --gpu 2 --train_batch_size 8 --eval_batch_size 2 --max_seq_length 256 \ | |
| --agg_strategy gmm,longnet --embed_dim 512 --vision_adaptor False --hierachical_token True --hierachical_adaptor True\ | |
| --n_heads 32,16,8 --llm_requires_grad True --resume_from_checkpoint False \ | |
| --llm_name /data_local/pxb/LLM_models/llama3/llama3.1-8b-instruct \ | |
| --dataset_name_list CNX-PathLLM/TCGA-WSI-CloseQA-Balanced,CNX-PathLLM/GTEx-WSI-CloseQA-Balanced,CNX-PathLLM/TCGA-WSI-OpenQA,CNX-PathLLM/GTEx-WSI-OpenQA \ | |
| --data_cache_dir /data_local/pxb/CNX-PathLLM/.cache \ | |
| --fea_root /path/to/CNX-PathLLM/GTEx-TCGA-Embeddings \ | |
| --gmm_root /path/to/GMM_Embeddings\ | |
| --output_dir path/to/output/of/step2\ | |
| --ckpt_path path/to/ckpt.bin/of/step1 | |