Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions estimation/BA/BA_filtering.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from BA.BA_utils import *


def BA(iter, states, velocities, imu_meas, landmarks, landmarks_xyz, ii, time_idx, intrinsics, confidences, Sigma, V, lamda_init, poses_gt_eci, initialize=False):
states = states.double()
v = velocities.double()
Expand Down
5 changes: 3 additions & 2 deletions estimation/BA/BA_utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import torch
import numpy as np
import torch
from BA.utils import *
from scipy.spatial import transform
from torch_scatter import scatter_sum, scatter_mean
from torch_scatter import scatter_mean, scatter_sum


def proj(X, intrinsics):
""" projection """
Expand Down
6 changes: 4 additions & 2 deletions estimation/BA/utils.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import numpy as np
import math
import torch

# from lietorch.groups import SO3, SE3
import ipdb
import numpy as np
import torch
from BA.BA_utils import *

"""
Computes the Bias-Precession-Nutation matrix transforming the GCRS to the
CIRS intermediate reference frame. This transformation corrects for the
Expand Down
5 changes: 3 additions & 2 deletions estimation/errors_eval.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import numpy as np
import ipdb
import matplotlib.pyplot as plt

import numpy as np


def time_to_error():
time_for_1 = []
time_for_2 = []
Expand Down
18 changes: 10 additions & 8 deletions estimation/od_pipe.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
import gc
import json
import os

# import pandas
import ipdb
import matplotlib.pyplot as plt
import numpy as np
import torch
from BA.BA_filtering import BA, BA_reg
# from lietorch.groups import SO3, SE3
from BA.BA_utils import *
from BA.BA_filtering import BA, BA_reg
from BA.utils import *
import numpy as np
import matplotlib.pyplot as plt
import json
# import pandas
import ipdb
import os
import gc


def od_pipe(data, orbit_lat_long):

Expand Down
3 changes: 2 additions & 1 deletion estimation/trajgen_pipe.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import numpy as np
import ipdb
import numpy as np


class OrbitalElements:
def __init__(self, a, e, i, Omega, omega, nu):
Expand Down
9 changes: 5 additions & 4 deletions eval/eval_landmarks.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import argparse
import os

import numpy as np
from ultralytics import YOLO
import argparse
from tqdm.contrib.concurrent import process_map
import torch
import numpy as np
from tqdm.contrib.concurrent import process_map
from ultralytics import YOLO


def parse_args():
"""
Expand Down
24 changes: 15 additions & 9 deletions labeling/label_ld.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
import argparse
import os
from pathlib import Path

import cv2
import numpy as np
import rasterio as rio
from tqdm.contrib.concurrent import process_map
import pyproj
import rasterio as rio
from PIL import Image
import cv2
from tqdm.contrib.concurrent import process_map


def parse_args():
"""
Expand Down Expand Up @@ -33,7 +36,10 @@ def get_landmarks(landmark_path):
Returns:
numpy.ndarray: The loaded landmarks.
"""
loaded_landmarks = np.load(landmark_path)
if landmark_path.endswith(".csv"):
loaded_landmarks = np.genfromtxt(landmark_path, delimiter=",", skip_header=1)
else:
loaded_landmarks = np.load(landmark_path)
return loaded_landmarks

def get_raster_paths(raster_dir_path):
Expand Down Expand Up @@ -61,8 +67,8 @@ def label_raster(raster_path):
im = src.read().transpose(1, 2, 0)
proj = pyproj.Proj(crs)
cxs, cys = proj(landmarks[:, 0], landmarks[:, 1])
lefts, bots = proj(landmarks[:, 2], landmarks[:, 3])
rights, tops = proj(landmarks[:, 4], landmarks[:, 5])
lefts, tops = proj(landmarks[:, 2], landmarks[:, 3])
rights, bots = proj(landmarks[:, 4], landmarks[:, 5])
cvs, cus = src.index(cxs, cys)
tlv, tlu = src.index(lefts, tops)
brv, bru = src.index(rights, bots)
Expand Down Expand Up @@ -91,13 +97,13 @@ def label_raster(raster_path):
cus_norm = cus_idx / im_width
out_labels = np.stack([classes, cus_norm, cvs_norm, ws_norm, hs_norm], axis=1)
if not os.path.exists(output_path):
os.makedirs(output_path)
Path(output_path).mkdir(parents=True, exist_ok=True)
print('made output directory')
if not os.path.exists(os.path.join(output_path, 'labels')):
os.makedirs(os.path.join(output_path, 'labels'))
Path(os.path.join(output_path, "labels")).mkdir(parents=True, exist_ok=True)
print('made labels directory')
if not os.path.exists(os.path.join(output_path, 'images')):
os.makedirs(os.path.join(output_path, 'images'))
Path(os.path.join(output_path, "images")).mkdir(parents=True, exist_ok=True)
print('made images directory')
file_name = os.path.basename(raster_path).split('.')[0]
with open(os.path.join(output_path, 'labels', file_name + '.txt'), 'w') as f:
Expand Down
4 changes: 3 additions & 1 deletion labeling/ld_downselect.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import numpy as np
import argparse

import numpy as np


def parse_args():
"""
Parse command line arguments for downselecting landmarks from Sareana.
Expand Down
169 changes: 169 additions & 0 deletions ld_training/data_pipeline/gdrive_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
import argparse
import logging
import sys
import time
from multiprocessing import Pool

from google.oauth2 import service_account
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload

root = logging.getLogger()
root.setLevel(logging.INFO)

handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)


def parse_args():
"""
Parse command line arguments for labeling Data.

Returns:
args (argparse.Namespace): Parsed command line arguments.
"""
parser = argparse.ArgumentParser(description="Downloading Data from GDrive")
parser.add_argument(
"--credentials-path",
type=str,
required=True,
help="Path to credential json file",
)
parser.add_argument(
"--output-dir",
type=str,
required=True,
help="Path to save downloaded files",
)
parser.add_argument(
"--folders-to-download",
nargs='+',
default=[],
required=False,
help="Folder names to dowwnload, if not provided all folders will be downloaaded",
)
parsed_args = parser.parse_args()
return parsed_args


def get_folder_list(drive_service):
try:
folders = []
page_token = None
while True:
# pylint: disable=maybe-no-member
response = (
drive_service.files()
.list(
q="mimeType='application/vnd.google-apps.folder'",
spaces="drive",
fields="nextPageToken, files(id, name)",
pageToken=page_token,
)
.execute()
)
logging.debug(response)
folders.extend(response.get("files", []))
page_token = response.get("nextPageToken", None)
if page_token is None:
break
except HttpError as error:
logging.error(f"An error occurred: {error}")
folders = None
return folders


def get_tiff_file_list(drive_service, folder_id):
"""Search file in drive location

Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""

query_string = "mimeType='image/tiff'"
query_string += " and '" + folder_id + "' in parents"

logging.debug(query_string)

try:
files = []
page_token = None
while True:
# pylint: disable=maybe-no-member
response = (
drive_service.files()
.list(
q=query_string,
spaces="drive",
fields="nextPageToken, files(id, name)",
pageToken=page_token,
)
.execute()
)
logging.debug(response)
files.extend(response.get("files", []))
page_token = response.get("nextPageToken", None)
if page_token is None:
break
except HttpError as error:
logging.error(f"An error occurred: {error}")
files = None
return files


def download_file_from_google_drive(drive_service, file_id, destination):
start = time.perf_counter()
request = drive_service.files().get_media(fileId=file_id)
with open(destination, "wb") as f:
downloader = MediaIoBaseDownload(f, request)
done = False
while done is False:
status, done = downloader.next_chunk()
logging.debug("Download %d%%." % int(status.progress() * 100))
download_time_seconds = time.perf_counter() - start
logging.info(
f"file id: {file_id} destination: {destination} download_time_seconds {download_time_seconds}"
)


def multi_process_download(files):
result = []
pool = Pool(processes=200)
res = pool.starmap(download_file_from_google_drive, files, chunksize=10)
result.extend(res)
pool.close()
pool.join()
logging.info(result)


if __name__ == "__main__":
args = parse_args()
logging.info(f"Args provided: {args}")
creds = service_account.Credentials.from_service_account_file(
args.credentials_path,
scopes=["https://www.googleapis.com/auth/drive"],
)
drive_service = build("drive", "v3", credentials=creds)
folders = get_folder_list(drive_service)
logging.info(folders)
filtered_folders = folders
if len(args.folders_to_download) > 0:
filtered_folders = [f for f in folders if f["name"] in args.folders_to_download]
for folder in filtered_folders:
files = get_tiff_file_list(drive_service, folder["id"])
destination_dir = args.output_dir
if files:
mp_list = []
for file in files:
mp_list.append(
(drive_service, file["id"], f"{destination_dir}/{file['name']}")
)
logging.info(
f"Number of files in folder {folder['name']}: {len(mp_list)}"
)
multi_process_download(mp_list)
5 changes: 5 additions & 0 deletions ld_training/yolo/datasets/12R.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# dataset root dir
path: /home/giriman/temp/
train: label_12R/images
val: label_12R_val/images
nc: 500
12 changes: 12 additions & 0 deletions ld_training/yolo/datasets/eedl_cmds.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
train:
- python eedl.py -g 12R -i 2020 -f 2023 -s 150 -m 2000 -o sentinel_images_12R -se s2 --gdrive True
- python eedl.py -g 12R -i 2015 -f 2017 -s 150 -m 2000 -o sentinel_images_12R -se s2 --gdrive True

val:
- python eedl.py -g 12R -i 2018 -f 2019 -s 150 -m 200 -o sentinel_images_12R_val -se s2 --gdrive True

train: python3 label_ld.py --landmark_path /home/giriman/CMU/rexlab/sat/VINSat/sim/landmark_csvs/12R_top_salient.csv --raster_dir_path /home/giriman/CMU/rexlab/sat/data/sentinel/12R/ --output_dir_path /home/giriman/CMU/rexlab/sat/data/sentinel/label_12R/ -v -c

val: python3 label_ld.py --landmark_path /home/giriman/CMU/rexlab/sat/VINSat/sim/landmark_csvs/12R_top_salient.csv --raster_dir_path /home/giriman/CMU/rexlab/sat/data/sentinel/12R_val/ --output_dir_path /home/giriman/CMU/rexlab/sat/data/sentinel/label_12R_val/ -v -c

python3 gdrive_utils.py --credentials-path /home/giriman/gcp/credentials/ee-cmu-vinsat-giria-46a18fe24bfb.json --output-dir /home/giriman/temp --folders-to-download sentinel_images_12R_val
6 changes: 6 additions & 0 deletions ld_training/yolo/utils/plot_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from ultralytics.utils.plotting import plot_results

if __name__ == "__main__":
plot_results(
"/home/giriman/CMU/rexlab/sat/VINSat/ld_training/yolo/runs/detect/17R_n1342_top_salient_mse/results.csv")

Loading