│
├── datasets/ # folder with all of the data used for training and scripts for loading it on server
│ └── custom_dataset/ # folder with our special custom dataset
│ │ ├── base/ # folder with final parquets used for first pre train stage (only captions)
│ │ ├── fine_tune/ # folder with final parquets used for second finetune stage (various RS tasks)
│ │ ├── dataset_gather/ # folder with scripts used for gathering our custom dataset
│ │ └── data_EDA.ipynb # notebook with different statistics, hypothesis checking scripts and etc.
│ └── vhm_dataset/ # folder with original dataset configured by https://github.com/opendatalab
│
├── eval/ # folder with all files used to evaluate models
│ ├── eval_kits/ # various opensource benchmarks used to evaluate our trained models
│ │ ├── RSEvalKit/ # eval kit 1, special thank to authors for their contribution -> https://github.com/fitzpchao/RSEvalKit/tree/master?tab=readme-ov-file
│ │ └── ScoreRS/ # eval kit 2, special thank to authors for their contribution -> https://github.com/NJU-LHRS/ScoreRS/tree/main
│ │ ├── python_script/ # main folder with scripts to run model inferences
│ │ ├── eval_data/ # folder with datasets used in this benchmarks
│ │ ├── eval_launches/ # folder with bash files to launch model inferencing
│ │ └── ... # other scripts used by ScoreRS
│ └── eval_results/
│ ├── model_evaluator/ # folder with streamlit app and notebooks used to build leaderboard and compare models
│ │ ├── get_all_model_profiles.ipynb # notebook with simple scripts to get the benchmarks results of all tested models
│ │ └── leaderboard.py # script to launch streamlit app to view testing results
│ ├── score_rs_eval_results/ # folder with results of inferencing models on eval kit 1 (RSEvalKit)
│ └── rsevalkit_eval_results/ # folder with results of inferencing models on eval kit 2 (ScoreRS)
│
├── models/ # different models architectures used and developed
│ ├── VHM/ # base of this model was taken from this repository -> https://github.com/opendatalab/VHM, special thanks to the authors
│ │ ├── scipts/ # folder with bash scripts to launch training processes + deepspeed configurations
│ │ │ ├── rs/ # folder with bash scripts to launch training processes
│ │ │ └── zero2.json | zero3_offload.json | zero3.json | # deepspeed configurations
│ │ ├── trained_models/ # folder with trained models weights and configurations
│ │ └── vhm/ # main model folder
│ │ ├── model/ # folder with architecture designed by https://github.com/opendatalab
│ │ ├── train/ # scripts to train VHM based models\
│ │ │ ├── train.py # original vhm training script with llama attention
│ │ │ ├── train_mem.py # original vhm training with flash attention
│ │ │ ├── train_custom_dataset.py # custom training script for vhm based models with llama attention
│ │ │ ├── train_custom_dataset_flash_attention.py # custom training script for vhm based models with flash attention
│ │ │ └── ... # other scripts
│ │ └── ... # other utils
│ ├── VHM_W_Q_FORMER/ # custom arhitecture
│ │ ├── scipts/ # folder with bash scripts to launch training processes + deepspeed configurations
│ │ │ ├── rs/ # folder with bash scripts to launch training processes
│ │ │ └── zero2.json | zero3_offload.json | zero3.json | # deepspeed configurations
│ │ ├── trained_models/ # folder with trained models weights and configurations
│ │ └── vhm_w_q_former/ # main model folder
│ │ ├── model/ # folder with architecture designed by us
│ │ ├── train/ # scripts to train models\
│ │ │ ├── train.py # training script with llama attention
│ │ │ ├── train_mem.py # training with flash attention
│ │ │ ├── train_custom_dataset.py # custom training script for q-former models with llama attention
│ │ │ ├── train_custom_dataset_flash_attention.py # custom training script for q-former models with flash attention
│ │ │ └── ... # other scripts
│ │ └── ... # other utils
│ ├── VHM_W_Q_FORMER_V2/ # custom arhitecture
│ │ ├── scipts/ # folder with bash scripts to launch training processes + deepspeed configurations
│ │ │ ├── rs/ # folder with bash scripts to launch training processes
│ │ │ └── zero2.json | zero3_offload.json | zero3.json | # deepspeed configurations
│ │ ├── trained_models/ # folder with trained models weights and configurations
│ │ └── vhm_w_q_former/ # main model folder
│ │ ├── model/ # folder with architecture designed by us
│ │ ├── train/ # scripts to train VHM based models\
│ │ │ ├── train.py # custom training script with llama attention
│ │ │ ├── train_mem.py # training with flash attention
│ │ │ ├── train_custom_dataset.py # custom training script for models with q-former
│ │ │ ├── train_custom_dataset_flash_attention.py # custom training script for q-former models with flash attention
│ │ │ └── ... # other scripts
│ │ └── ... # other utils
│ ├── VHM_W_QWEN/ # custom arhitecture
│ │ ├── scipts/ # folder with bash scripts to launch training processes + deepspeed configurations
│ │ │ ├── rs/ # folder with bash scripts to launch training processes
│ │ │ └── zero2.json | zero3_offload.json | zero3.json | # deepspeed configurations
│ │ ├── trained_models/ # folder with trained models weights and configurations
│ │ └── vhm_w_qwen/ # main model folder
│ │ ├── model/ # folder with architecture designed by us
│ │ ├── train/ # scripts to train VHM based models\
│ │ │ ├── train.py # custom training script with qwen
│ │ │ ├── train_mem.py # training with flash attention
│ │ │ ├── train_custom_dataset.py # custom training script for qwen models
│ │ │ ├── train_custom_dataset_flash_attention.py # custom training script for qwen models with flash attention
│ │ │ └── ... # other scripts
│ └── └── ... # other utils
└── README.md-
Notifications
You must be signed in to change notification settings - Fork 1
VLM specially crafted for geospatial reasoning tasks
License
argirovga/GeospatialVLM
Folders and files
| Name | Name | Last commit message | Last commit date | |
|---|---|---|---|---|
Repository files navigation
About
VLM specially crafted for geospatial reasoning tasks