Skip to content

Commit 9131a98

Browse files
CopilotDimaBir
andcommitted
Add GitHub Actions CI/CD workflows and fix code formatting
Co-authored-by: DimaBir <28827735+DimaBir@users.noreply.github.com>
1 parent 34568a4 commit 9131a98

16 files changed

Lines changed: 193 additions & 89 deletions

.github/workflows/lint.yml

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
name: Code Quality
2+
3+
on:
4+
push:
5+
branches: [ main, copilot/* ]
6+
pull_request:
7+
branches: [ main ]
8+
9+
jobs:
10+
lint:
11+
runs-on: ubuntu-latest
12+
13+
steps:
14+
- uses: actions/checkout@v4
15+
16+
- name: Set up Python
17+
uses: actions/setup-python@v5
18+
with:
19+
python-version: '3.12'
20+
21+
- name: Install dependencies
22+
run: |
23+
python -m pip install --upgrade pip
24+
pip install ruff
25+
26+
- name: Lint with ruff
27+
run: |
28+
ruff check src/ common/ tests/ --output-format=github
29+
30+
- name: Check formatting with ruff
31+
run: |
32+
ruff format --check src/ common/ tests/

.github/workflows/tests.yml

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
name: Tests
2+
3+
on:
4+
push:
5+
branches: [ main, copilot/* ]
6+
pull_request:
7+
branches: [ main ]
8+
9+
jobs:
10+
test:
11+
runs-on: ubuntu-latest
12+
strategy:
13+
matrix:
14+
python-version: ['3.10', '3.11', '3.12']
15+
16+
steps:
17+
- uses: actions/checkout@v4
18+
19+
- name: Set up Python ${{ matrix.python-version }}
20+
uses: actions/setup-python@v5
21+
with:
22+
python-version: ${{ matrix.python-version }}
23+
24+
- name: Install dependencies
25+
run: |
26+
python -m pip install --upgrade pip
27+
pip install -r requirements.txt
28+
29+
- name: Run tests with coverage
30+
run: |
31+
pytest tests/ --cov=src --cov=common --cov-report=term-missing --cov-report=xml
32+
33+
- name: Upload coverage reports to Codecov
34+
uses: codecov/codecov-action@v4
35+
if: matrix.python-version == '3.12'
36+
with:
37+
file: ./coverage.xml
38+
flags: unittests
39+
name: codecov-umbrella
40+
fail_ci_if_error: false

common/utils.py

Lines changed: 40 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import argparse
2-
from typing import Dict, Tuple
32

43
import matplotlib.pyplot as plt
54
import pandas as pd
@@ -13,24 +12,34 @@
1312
INFERENCE_MODES = ["onnx", "ov", "cpu", "cuda", "tensorrt", "all"]
1413

1514

16-
def _create_sorted_dataframe(data: Dict[str, float], column_name: str, ascending: bool) -> pd.DataFrame:
15+
def _create_sorted_dataframe(
16+
data: dict[str, float], column_name: str, ascending: bool
17+
) -> pd.DataFrame:
1718
df = pd.DataFrame(list(data.items()), columns=["Model", column_name])
1819
return df.sort_values(column_name, ascending=ascending)
1920

2021

21-
def _plot_bar_chart(ax, data: pd.DataFrame, x_col: str, y_col: str,
22-
xlabel: str, ylabel: str, title: str, palette: str, value_format: str):
23-
sns.barplot(x=data[x_col], y=data[y_col], hue=data[y_col], palette=palette,
24-
ax=ax, legend=False)
22+
def _plot_bar_chart(
23+
ax,
24+
data: pd.DataFrame,
25+
x_col: str,
26+
y_col: str,
27+
xlabel: str,
28+
ylabel: str,
29+
title: str,
30+
palette: str,
31+
value_format: str,
32+
):
33+
sns.barplot(x=data[x_col], y=data[y_col], hue=data[y_col], palette=palette, ax=ax, legend=False)
2534
ax.set_xlabel(xlabel)
2635
ax.set_ylabel(ylabel)
2736
ax.set_title(title)
28-
37+
2938
for index, value in enumerate(data[x_col]):
3039
ax.text(value, index, value_format.format(value), color="black", ha="left", va="center")
3140

3241

33-
def plot_benchmark_results(results: Dict[str, Tuple[float, float]]):
42+
def plot_benchmark_results(results: dict[str, tuple[float, float]]):
3443
models = list(results.keys())
3544
times = {model: results[model][0] for model in models}
3645
throughputs = {model: results[model][1] for model in models}
@@ -40,13 +49,29 @@ def plot_benchmark_results(results: Dict[str, Tuple[float, float]]):
4049

4150
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 6))
4251

43-
_plot_bar_chart(ax1, time_data, "Time", "Model",
44-
"Average Inference Time (ms)", "Model Type",
45-
"ResNet50 - Inference Benchmark Results", "rocket", "{:.2f} ms")
52+
_plot_bar_chart(
53+
ax1,
54+
time_data,
55+
"Time",
56+
"Model",
57+
"Average Inference Time (ms)",
58+
"Model Type",
59+
"ResNet50 - Inference Benchmark Results",
60+
"rocket",
61+
"{:.2f} ms",
62+
)
4663

47-
_plot_bar_chart(ax2, throughput_data, "Throughput", "Model",
48-
"Throughput (samples/sec)", "",
49-
"ResNet50 - Throughput Benchmark Results", "viridis", "{:.2f}")
64+
_plot_bar_chart(
65+
ax2,
66+
throughput_data,
67+
"Throughput",
68+
"Model",
69+
"Throughput (samples/sec)",
70+
"",
71+
"ResNet50 - Throughput Benchmark Results",
72+
"viridis",
73+
"{:.2f}",
74+
)
5075

5176
plt.tight_layout()
5277
plt.savefig(PLOT_OUTPUT_PATH, bbox_inches="tight")
@@ -66,10 +91,7 @@ def parse_arguments():
6691
)
6792

6893
parser.add_argument(
69-
"--topk",
70-
type=int,
71-
default=DEFAULT_TOPK,
72-
help="Number of top predictions to show"
94+
"--topk", type=int, default=DEFAULT_TOPK, help="Number of top predictions to show"
7395
)
7496

7597
parser.add_argument(

main.py

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import logging
22
import warnings
3-
from typing import Dict, Tuple
43

54
import torch
65

@@ -18,53 +17,58 @@
1817
CUDA_AVAILABLE = False
1918
if torch.cuda.is_available():
2019
try:
21-
import torch_tensorrt
20+
import torch_tensorrt # noqa: F401
21+
2222
CUDA_AVAILABLE = True
2323
except ImportError:
2424
print("torch-tensorrt not installed. Running in CPU mode only.")
2525

2626

27-
def _run_onnx_inference(args, model_loader, img_batch) -> Dict[str, Tuple[float, float]]:
27+
def _run_onnx_inference(args, model_loader, img_batch) -> dict[str, tuple[float, float]]:
2828
onnx_inference = ONNXInference(model_loader, args.onnx_path, debug_mode=args.DEBUG)
2929
benchmark_result = onnx_inference.benchmark(img_batch)
3030
onnx_inference.predict(img_batch)
3131
return {"ONNX (CPU)": benchmark_result}
3232

3333

34-
def _run_openvino_inference(args, model_loader, img_batch) -> Dict[str, Tuple[float, float]]:
34+
def _run_openvino_inference(args, model_loader, img_batch) -> dict[str, tuple[float, float]]:
3535
ov_inference = OVInference(model_loader, args.ov_path, debug_mode=args.DEBUG)
3636
benchmark_result = ov_inference.benchmark(img_batch)
3737
ov_inference.predict(img_batch)
3838
return {"OpenVINO (CPU)": benchmark_result}
3939

4040

41-
def _run_pytorch_cpu_inference(args, model_loader, img_batch) -> Dict[str, Tuple[float, float]]:
41+
def _run_pytorch_cpu_inference(args, model_loader, img_batch) -> dict[str, tuple[float, float]]:
4242
pytorch_cpu_inference = PyTorchInference(model_loader, device="cpu", debug_mode=args.DEBUG)
4343
benchmark_result = pytorch_cpu_inference.benchmark(img_batch)
4444
pytorch_cpu_inference.predict(img_batch)
4545
return {"PyTorch (CPU)": benchmark_result}
4646

4747

48-
def _run_pytorch_cuda_inference(args, model_loader, device, img_batch) -> Dict[str, Tuple[float, float]]:
48+
def _run_pytorch_cuda_inference(
49+
args, model_loader, device, img_batch
50+
) -> dict[str, tuple[float, float]]:
4951
print("Running CUDA inference...")
5052
pytorch_cuda_inference = PyTorchInference(model_loader, device=device, debug_mode=args.DEBUG)
5153
benchmark_result = pytorch_cuda_inference.benchmark(img_batch)
5254
pytorch_cuda_inference.predict(img_batch)
5355
return {"PyTorch (CUDA)": benchmark_result}
5456

5557

56-
def _run_tensorrt_inference(args, model_loader, device, img_batch) -> Dict[str, Tuple[float, float]]:
58+
def _run_tensorrt_inference(
59+
args, model_loader, device, img_batch
60+
) -> dict[str, tuple[float, float]]:
5761
results = {}
5862
precisions = [torch.float16, torch.float32]
59-
63+
6064
for precision in precisions:
6165
tensorrt_inference = TensorRTInference(
6266
model_loader, device=device, precision=precision, debug_mode=args.DEBUG
6367
)
6468
benchmark_result = tensorrt_inference.benchmark(img_batch)
6569
tensorrt_inference.predict(img_batch)
6670
results[f"TRT_{precision}"] = benchmark_result
67-
71+
6872
return results
6973

7074

@@ -76,7 +80,7 @@ def main():
7680

7781
benchmark_results = {}
7882
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
79-
83+
8084
model_loader = ModelLoader(device=device)
8185
img_processor = ImageProcessor(img_path=args.image_path, device=device)
8286
img_batch = img_processor.process_image()
@@ -92,7 +96,9 @@ def main():
9296

9397
if torch.cuda.is_available():
9498
if args.mode in ["cuda", "all"]:
95-
benchmark_results.update(_run_pytorch_cuda_inference(args, model_loader, device, img_batch))
99+
benchmark_results.update(
100+
_run_pytorch_cuda_inference(args, model_loader, device, img_batch)
101+
)
96102

97103
if args.mode in ["tensorrt", "all"]:
98104
benchmark_results.update(_run_tensorrt_inference(args, model_loader, device, img_batch))

src/image_processor.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from typing import Union
2-
31
import torch
42
from PIL import Image
53
from torchvision import transforms
@@ -11,19 +9,21 @@
119

1210

1311
class ImageProcessor:
14-
def __init__(self, img_path: str, device: Union[str, torch.device] = "cuda") -> None:
12+
def __init__(self, img_path: str, device: str | torch.device = "cuda") -> None:
1513
self.img_path = img_path
1614
self.device = device if isinstance(device, torch.device) else torch.device(device)
1715
self.transform = self._create_transform()
1816

1917
@staticmethod
2018
def _create_transform() -> transforms.Compose:
21-
return transforms.Compose([
22-
transforms.Resize(IMAGE_SIZE),
23-
transforms.CenterCrop(CROP_SIZE),
24-
transforms.ToTensor(),
25-
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
26-
])
19+
return transforms.Compose(
20+
[
21+
transforms.Resize(IMAGE_SIZE),
22+
transforms.CenterCrop(CROP_SIZE),
23+
transforms.ToTensor(),
24+
transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD),
25+
]
26+
)
2727

2828
def process_image(self) -> torch.Tensor:
2929
img = Image.open(self.img_path)

src/inference_base.py

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import logging
22
import time
3-
from typing import Optional, Tuple
43

54
import numpy as np
65
import torch
@@ -17,8 +16,8 @@ class InferenceBase:
1716
def __init__(
1817
self,
1918
model_loader,
20-
onnx_path: Optional[str] = None,
21-
ov_path: Optional[str] = None,
19+
onnx_path: str | None = None,
20+
ov_path: str | None = None,
2221
topk: int = DEFAULT_TOPK,
2322
debug_mode: bool = False,
2423
batch_size: int = DEFAULT_BATCH_SIZE,
@@ -52,46 +51,48 @@ def _warmup(self, input_batch: torch.Tensor, warmup_runs: int):
5251
for img in input_batch:
5352
self.predict(img.unsqueeze(0), is_benchmark=True)
5453

55-
def _run_benchmark(self, input_batch: torch.Tensor, num_runs: int) -> Tuple[float, int]:
54+
def _run_benchmark(self, input_batch: torch.Tensor, num_runs: int) -> tuple[float, int]:
5655
logging.info(f"Starting benchmark for {self.__class__.__name__} inference...")
5756
start_time = time.time()
5857
for _ in range(num_runs):
5958
for img in input_batch:
6059
self.predict(img.unsqueeze(0), is_benchmark=True)
61-
60+
6261
elapsed_time = time.time() - start_time
6362
total_samples = self.batch_size * num_runs
6463
return elapsed_time, total_samples
6564

66-
def _calculate_metrics(self, elapsed_time: float, total_samples: int) -> Tuple[float, float]:
65+
def _calculate_metrics(self, elapsed_time: float, total_samples: int) -> tuple[float, float]:
6766
avg_time = (elapsed_time / total_samples) * MS_PER_SECOND
6867
throughput = total_samples / elapsed_time
69-
68+
7069
logging.info(f"Average inference time: {avg_time:.4f} ms")
7170
logging.info(f"Throughput: {throughput:.2f} samples/sec")
72-
71+
7372
if self.debug_mode:
7473
print(f"Average inference time for {self.__class__.__name__}: {avg_time:.4f} ms")
7574
print(f"Throughput for {self.__class__.__name__}: {throughput:.2f} samples/sec")
76-
75+
7776
return avg_time, throughput
7877

7978
def benchmark(
80-
self,
81-
input_data: torch.Tensor,
82-
num_runs: int = DEFAULT_NUM_RUNS,
83-
warmup_runs: int = DEFAULT_WARMUP_RUNS
84-
) -> Tuple[float, float]:
79+
self,
80+
input_data: torch.Tensor,
81+
num_runs: int = DEFAULT_NUM_RUNS,
82+
warmup_runs: int = DEFAULT_WARMUP_RUNS,
83+
) -> tuple[float, float]:
8584
input_batch = self._prepare_batch(input_data)
8685
self._warmup(input_batch, warmup_runs)
8786
elapsed_time, total_samples = self._run_benchmark(input_batch, num_runs)
8887
return self._calculate_metrics(elapsed_time, total_samples)
8988

90-
def get_top_predictions(self, prob: np.ndarray, is_benchmark: bool = False) -> Optional[np.ndarray]:
89+
def get_top_predictions(
90+
self, prob: np.ndarray, is_benchmark: bool = False
91+
) -> np.ndarray | None:
9192
if is_benchmark:
9293
return None
9394

94-
top_indices = prob.argsort()[-self.topk:][::-1]
95+
top_indices = prob.argsort()[-self.topk :][::-1]
9596
top_probs = prob[top_indices]
9697

9798
for i in range(self.topk):
@@ -100,5 +101,5 @@ def get_top_predictions(self, prob: np.ndarray, is_benchmark: bool = False) -> O
100101
logging.info(f"#{i + 1}: {int(probability * 100)}% {class_label}")
101102
if self.debug_mode:
102103
print(f"#{i + 1}: {int(probability * 100)}% {class_label}")
103-
104+
104105
return prob

0 commit comments

Comments
 (0)