English
Llama-slideQA / test_wsi_stage1.sh
weiheng-1009's picture
added code for running
cbff41a
#!/bin/bash
export WANDB_MODE=offline
export HF_ENDPOINT=https://hf-mirror.com
CUDA_VISIBLE_DEVICES=2 python test_wsi.py --max_seq_length 512 --batch_size 4 --select_data_num -1 --eval_sample_size -1 --n_heads 32,16,8 --llm_name /data_local/pxb/LLM_models/llama3/llama3.1-8b-instruct --vision_adaptor False --hierachical_token True --hierachical_adaptor True \
--shuffle False --data_cache_dir /data_local/pxb/CNX-PathLLM/.cache\
--dataset_name_list CNX-PathLLM/TCGA-WSI-Description-4onew,CNX-PathLLM/TCGA-WSI-Description-4omini,CNX-PathLLM/GTEx-WSI-Description\
--agg_strategy gmm,longnet --embed_dim 512\
--fea_root /path/to/CNX-PathLLM/GTEx-TCGA-Embeddings \
--gmm_root /path/to/GMM_Embeddings\
--ckpt_path path/to/ckpt.bin/of/step1\
--results_save_path /path/to/the/output.csv\
--use_peft False