English
Llama-slideQA / test_wsi_pathchat.sh
weiheng-1009's picture
added code for running
cbff41a
#!/bin/bash
export WANDB_MODE=online
export HF_ENDPOINT=https://hf-mirror.com
CUDA_VISIBLE_DEVICES=1 python test_wsi.py --max_seq_length 128 --batch_size 8 --select_data_num -1 --eval_sample_size -1 --n_heads 32,16,8 --llm_name /data_local/pxb/LLM_models/llama3/llama3.1-8b-instruct --vision_adaptor False --hierachical_token True --hierachical_adaptor True \
--shuffle False --data_cache_dir /data_local/pxb/CNX-PathLLM/.cache\
--dataset_name_list CNX-PathLLM/PathChat_CloseQA_Balanced,CNX-PathLLM/PathChat_OpenQA\
--agg_strategy gmm,longnet --embed_dim 512\
--fea_root /path/to/CNX-PathLLM/GTEx-TCGA-Embeddings \
--gmm_root /path/to/GMM_Embeddings\
--ckpt_path path/to/ckpt.bin/of/pathchat\
--results_save_path /path/to/the/output.csv \
--use_peft False