diff --git a/README.md b/README.md index 65413d0..36173f5 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ anomavision detect --config config.yml --img_path ./test_images --thresh 13.0 anomavision eval --config config.yml --enable_visualization # Export to ONNX / TorchScript / OpenVINO / all -anomavision export --model padim_model.pt --format all --precision fp16 +anomavision export --config config.yml --model model.pt --format all --precision fp16 ``` Every command has full `--help`: @@ -187,8 +187,8 @@ model = anomavision.Padim( model.fit(loader) # --- 3. Save --- -torch.save(model, "padim_model.pt") # full model (for export or further use) -model.save_statistics("padim_model.pth", half=True) # stats-only (smaller, faster to load) +torch.save(model, "model.pt") # full model (for export or further use) +model.save_statistics("model.pth", half=True) # stats-only (smaller, faster to load) # --- 4. Infer --- # scores: (batch_size,) — scalar anomaly score per image. Higher = more anomalous. @@ -302,7 +302,7 @@ Full docs at **http://localhost:8000/docs** once the server is running. ```bash anomavision export \ --model_data_path ./distributions/anomav_exp \ - --model padim_model.pt \ + --model model.pt \ --format onnx \ --precision fp16 \ --quantize-dynamic @@ -329,7 +329,7 @@ stream_mode: true stream_source: type: webcam camera_id: 0 -model: padim_model.onnx +model: model.onnx thresh: 13.0 enable_visualization: true ``` @@ -361,11 +361,11 @@ backbone: resnet18 batch_size: 16 feat_dim: 100 layer_indices: [0, 1, 2] -output_model: padim_model.pt +output_model: model.pt run_name: exp1 model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx device: auto # auto | cpu | cuda thresh: 13.0 diff --git a/anomavision/cli.py b/anomavision/cli.py index 6de3131..f37d949 100644 --- a/anomavision/cli.py +++ b/anomavision/cli.py @@ -11,9 +11,9 @@ Examples: anomavision train --config config.yml - anomavision export --model padim_model.pt --format onnx - anomavision detect --model padim_model.onnx --img_path ./test_images - anomavision eval --model padim_model.pt --class_name bottle + anomavision export --config config.yml --model model.pt --format onnx + anomavision detect --config config.yml --model model.onnx --img_path ./test_images + anomavision eval --config config.yml --model model.pt --class_name bottle """ import argparse @@ -35,9 +35,9 @@ def create_parser() -> argparse.ArgumentParser: epilog=""" Examples: %(prog)s train --config config.yml --dataset_path /data --class_name bottle - %(prog)s export --model padim_model.pt --format onnx --quantize-dynamic - %(prog)s detect --model padim_model.onnx --img_path ./test --enable_visualization - %(prog)s eval --model padim_model.pt --class_name bottle --dataset_path /data + %(prog)s export --model model.pt --format onnx --quantize-dynamic + %(prog)s detect --model model.onnx --img_path ./test --enable_visualization + %(prog)s eval --model model.pt --class_name bottle --dataset_path /data For detailed help on each command: %(prog)s train --help diff --git a/anomavision/detect.py b/anomavision/detect.py index 2f76634..02dae24 100644 --- a/anomavision/detect.py +++ b/anomavision/detect.py @@ -1,11 +1,11 @@ """ Run Anomaly detection inference on images using various model formats. Usage - formats: - $ python detect.py --model padim_model.pt # PyTorch - padim_model.torchscript # TorchScript - padim_model.onnx # ONNX Runtime - padim_model_openvino # OpenVINO - padim_model.engine # TensorRT + $ python detect.py --model model.pt # PyTorch + model.torchscript # TorchScript + model.onnx # ONNX Runtime + model_openvino # OpenVINO + model.engine # TensorRT """ import argparse @@ -61,13 +61,19 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: parser.add_argument( "--model_data_path", type=str, - default="./distributions/anomav_exp", + default="./distributions", help="Directory containing model files.", ) + parser.add_argument( + "--algorithm", + type=str, + default=None, + help="Algorithm name (e.g., padim, patchcore).", + ) parser.add_argument( "--model", type=str, - default="padim_model.pt", + default=None, help="Model file (.pt for PyTorch, .onnx for ONNX, .engine for TensorRT)", ) parser.add_argument( @@ -121,7 +127,7 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: ) parser.add_argument( "--run_name", - default="detect_exp", + default=None, help="experiment name for this inference run", ) parser.add_argument( @@ -178,7 +184,19 @@ def run_inference(args): cfg = load_config(str(args.config)) else: # Fallback to model directory config - cfg = load_config(str(Path(args.model_data_path) / "config.yml")) + potential_paths = [] + if args.model_data_path: + base_path = Path(args.model_data_path) + potential_paths.append(base_path / "config.yml") + + cfg = {} + for path in potential_paths: + if path.exists(): + cfg = load_config(str(path)) + break + + if not cfg: + cfg = {} # Merge config with CLI args config = edict(merge_config(args, cfg)) @@ -253,7 +271,7 @@ def run_inference(args): # --- Model Loading Phase --- with profilers["model_loading"]: - model_path = os.path.join(MODEL_DATA_PATH, config.model) + model_path = os.path.join(MODEL_DATA_PATH, config.algorithm, config.class_name, config.run_name, config.model) logger.info(f"Loading model: {model_path}") if not os.path.exists(model_path): @@ -273,7 +291,7 @@ def run_inference(args): run_name = config.run_name viz_output_dir = config.get("viz_output_dir", "./visualizations/") RESULTS_PATH = increment_path( - Path(viz_output_dir) / model_type.value.upper() / run_name, + Path(viz_output_dir) / config.algorithm / config.class_name / model_type.value.upper() / run_name, exist_ok=config.get("overwrite", False), mkdir=True, ) diff --git a/anomavision/eval.py b/anomavision/eval.py index 5709a0d..e0dbc2a 100644 --- a/anomavision/eval.py +++ b/anomavision/eval.py @@ -21,6 +21,9 @@ get_logger, merge_config, setup_logging, + find_best_threshold_f1, + compute_metrics, + find_optimal_threshold ) @@ -40,7 +43,6 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: "--dataset_path", default=None, type=str, - required=False, help="Path to the dataset folder containing test images.", ) parser.add_argument( @@ -54,13 +56,19 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: parser.add_argument( "--model_data_path", type=str, - default="./distributions/anomav_exp", + default=None, help="Directory containing AnomaVision model files.", ) + parser.add_argument( + "--algorithm", + type=str, + default=None, + help="Algorithm name (e.g., padim, patchcore).", + ) parser.add_argument( "--model", type=str, - default="padim_model.pt", + default=None, help="Model filename (.pt, .onnx, .engine)", ) parser.add_argument( @@ -110,7 +118,7 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: parser.add_argument( "--viz_output_dir", type=str, - default="./eval_visualizations/", + default=None, help="Directory to save visualization images.", ) @@ -131,38 +139,6 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: return parser -def compute_metrics(labels, scores, thresh=None): - """ - Calculate standard anomaly detection metrics. - """ - metrics = {} - - # AUROC - try: - metrics['auc_score'] = float(roc_auc_score(labels, scores)) - except ValueError: - metrics['auc_score'] = 0.0 - - # PR-AUC - try: - precision, recall, _ = precision_recall_curve(labels, scores) - metrics['pr_auc'] = float(auc(recall, precision)) - except ValueError: - metrics['pr_auc'] = 0.0 - - # Statistics - metrics['mean_anomaly_score'] = float(np.mean(scores)) - metrics['std_anomaly_score'] = float(np.std(scores)) - - # Accuracy (if thresh provided) - if thresh is not None: - predictions = (scores > thresh).astype(int) - metrics['accuracy'] = float(np.mean(predictions == labels)) - metrics['thresh'] = thresh - - return metrics - - def evaluate_model_with_wrapper( model_wrapper, test_dataloader, logger, evaluation_profiler, detailed_timing=False ): @@ -263,7 +239,7 @@ def run_evaluation(args): # Load Model Phase with profilers["model_loading"]: - model_path = os.path.join(MODEL_DATA_PATH, config.model) + model_path = os.path.join(MODEL_DATA_PATH, config.algorithm, config.class_name, config.run_name, config.model) logger.info(f"Loading model: {model_path}") if not os.path.exists(model_path): @@ -323,7 +299,12 @@ def run_evaluation(args): model_wrapper.close() # Compute Metrics - metrics = compute_metrics(labels, scores, thresh=config.thresh) + if config.thresh is None: + best_thresh, _ = find_optimal_threshold(labels, scores) + else: + best_thresh = config.thresh + + metrics = compute_metrics(labels, scores, thresh=best_thresh) # Add timing metrics total_images = len(test_dataset) @@ -368,7 +349,7 @@ def run_evaluation(args): logger.info(f"Data loading time: {profilers['data_loading'].accumulated_time * 1000:.2f} ms") logger.info(f"Evaluation time: {profilers['evaluation'].accumulated_time * 1000:.2f} ms") logger.info(f"Visualization time: {profilers['visualization'].accumulated_time * 1000:.2f} ms") - logger.info("=" * 60) + # logger.info("=" * 60) # 2. PERFORMANCE METRICS logger.info("=" * 60) @@ -382,7 +363,7 @@ def run_evaluation(args): if len(test_dataloader) > 0: images_per_batch = total_images / len(test_dataloader) logger.info(f"Evaluation throughput: {evaluation_fps * images_per_batch:.1f} images/sec (batch size: {batch_size})") - logger.info("=" * 60) + # logger.info("=" * 60) # 3. EVALUATION SUMMARY logger.info("=" * 60) @@ -393,6 +374,18 @@ def run_evaluation(args): logger.info(f"Model type: {model_type.value.upper() if model_type else 'UNKNOWN'}") logger.info(f"Device: {device_str}") logger.info(f"Image processing: resize={config.resize}, crop_size={config.crop_size}, normalize={config.normalize}") + # logger.info("=" * 60) + + logger.info("=" * 60) + logger.info("ANOMAVISION DETECTION METRICS") + logger.info("=" * 60) + + for k, v in metrics.items(): + if isinstance(v, float): + logger.info(f"{k.replace('_',' ').title():<28} {v:.6f}") + else: + logger.info(f"{k.replace('_',' ').title():<28} {v}") + logger.info("=" * 60) logger.info("AnomaVision anomaly detection model evaluation completed successfully") diff --git a/anomavision/export.py b/anomavision/export.py index d717867..0d15609 100644 --- a/anomavision/export.py +++ b/anomavision/export.py @@ -604,10 +604,17 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: parser.add_argument( "--model_data_path", type=str, - default="./distributions/anomav_exp", + default="./distributions", help="Directory containing model and output location", ) + parser.add_argument( + "--algorithm", + type=str, + default=None, + help="Algorithm name (e.g., padim, patchcore).", + ) + parser.add_argument( "--model", type=str, @@ -692,7 +699,22 @@ def main(args=None): if args.config is not None: cfg = load_config(str(args.config)) else: - cfg = load_config(str(Path(args.model_data_path) / "config.yml")) + # Try to find config in structured path first + potential_paths = [] + if args.model_data_path: + # Try to infer from model path if it follows structure + base_path = Path(args.model_data_path) + potential_paths.append(base_path / "config.yml") + + # Use first existing config + cfg = {} + for path in potential_paths: + if path.exists(): + cfg = load_config(str(path)) + break + + if not cfg: + cfg = {} config = edict(merge_config(args, cfg)) @@ -700,8 +722,8 @@ def main(args=None): setup_logging(enabled=True, log_level=config.log_level, log_to_file=True) logger = get_logger("anomavision.export") - model_path = Path(config.model_data_path) / config.model - output_dir = Path(config.model_data_path) + model_path = Path(config.model_data_path) / config.algorithm / config.class_name / config.run_name / config.model + output_dir = Path(config.model_data_path) / config.algorithm / config.class_name / config.run_name model_stem = Path(config.model).stem # Generate output names diff --git a/anomavision/train.py b/anomavision/train.py index 85e0726..f0dbeb9 100644 --- a/anomavision/train.py +++ b/anomavision/train.py @@ -124,6 +124,12 @@ def create_parser(add_help: bool = True) -> argparse.ArgumentParser: default=None, help="Directory to save model distributions and PT file.", ) + parser.add_argument( + "--algorithm", + type=str, + default=None, + help="Algorithm name (e.g., padim, patchcore).", + ) parser.add_argument( "--log_level", type=str, @@ -176,7 +182,7 @@ def run_training(args): # Resolve output run dir once run_dir = increment_path( - Path(config.model_data_path) / config.run_name, exist_ok=True, mkdir=True + Path(config.model_data_path) / config.algorithm / config.class_name / config.run_name, exist_ok=True, mkdir=True ) # === Dataset === diff --git a/anomavision/utils.py b/anomavision/utils.py index 6351647..d6d0bc7 100644 --- a/anomavision/utils.py +++ b/anomavision/utils.py @@ -14,6 +14,9 @@ from PIL import Image from torchvision import transforms as T +from sklearn.metrics import roc_curve +from sklearn.metrics import roc_auc_score, precision_recall_curve, auc + # Default standard transforms - kept for backward compatibility standard_image_transform = T.Compose( [ @@ -786,3 +789,83 @@ def adaptive_gaussian_blur(input_array, kernel_size=33, sigma=4): except ImportError: raise ImportError("SciPy is required when PyTorch is not available") + +def find_best_threshold_f1(labels, scores): + precision, recall, thresholds = precision_recall_curve(labels, scores) + + f1 = 2 * precision * recall / (precision + recall + 1e-12) + best_idx = np.argmax(f1) + + return thresholds[best_idx], f1[best_idx] + +from sklearn.metrics import roc_curve + +def find_best_threshold_roc(labels, scores): + fpr, tpr, thresholds = roc_curve(labels, scores) + + j = tpr - fpr + idx = np.argmax(j) + + return thresholds[idx], j[idx] + + +def find_best_threshold_accuracy(labels, scores): + thresholds = np.linspace(scores.min(), scores.max(), 500) + + best_thresh = thresholds[0] + best_acc = 0 + + for t in thresholds: + preds = (scores > t).astype(int) + acc = np.mean(preds == labels) + + if acc > best_acc: + best_acc = acc + best_thresh = t + + return best_thresh, best_acc + +def compute_metrics(labels, scores, thresh=None): + """ + Calculate standard anomaly detection metrics. + """ + metrics = {} + + # AUROC + try: + metrics['auc_score'] = float(roc_auc_score(labels, scores)) + except ValueError: + metrics['auc_score'] = 0.0 + + # PR-AUC + try: + precision, recall, _ = precision_recall_curve(labels, scores) + metrics['pr_auc'] = float(auc(recall, precision)) + except ValueError: + metrics['pr_auc'] = 0.0 + + # Statistics + metrics['mean_anomaly_score'] = float(np.mean(scores)) + metrics['std_anomaly_score'] = float(np.std(scores)) + + # Accuracy (if thresh provided) + if thresh is not None: + predictions = (scores > thresh).astype(int) + metrics['accuracy'] = float(np.mean(predictions == labels)) + metrics['thresh'] = thresh + + return metrics + + +def find_optimal_threshold(labels, scores): + precision, recall, thresholds = precision_recall_curve(labels, scores) + + # remove last point (no threshold there) + precision = precision[:-1] + recall = recall[:-1] + + f1 = 2 * precision * recall / (precision + recall + 1e-12) + + idx = np.argmax(f1) + + return thresholds[idx], f1[idx] diff --git a/apps/api/fastapi_app.py b/apps/api/fastapi_app.py index 172e3b6..a9a00a4 100644 --- a/apps/api/fastapi_app.py +++ b/apps/api/fastapi_app.py @@ -29,8 +29,8 @@ RESIZE_SIZE = (224, 224) # You can override these via environment variables -MODEL_DATA_PATH = os.getenv("ANOMAVISION_MODEL_DATA_PATH", "distributions/anomav_exp") -MODEL_FILE = os.getenv("ANOMAVISION_MODEL_FILE", "padim_model.onnx") +MODEL_DATA_PATH = os.getenv("ANOMAVISION_MODEL_DATA_PATH", "distributions/padim/bottle/anomav_exp") +MODEL_FILE = os.getenv("ANOMAVISION_MODEL_FILE", "model.onnx") DEVICE = os.getenv("ANOMAVISION_DEVICE", "auto") # "auto"|"cpu"|"cuda" # Visualization parameters (match detect.py defaults) diff --git a/apps/ui/gradio_app.py b/apps/ui/gradio_app.py index 10da41e..715fb23 100644 --- a/apps/ui/gradio_app.py +++ b/apps/ui/gradio_app.py @@ -8,7 +8,7 @@ ANOMAVISION_MODEL_DATA_PATH path that contains the model file (default: "distributions/anomav_exp") ANOMAVISION_MODEL_FILE model filename - (default: "padim_model.onnx") + (default: "model.onnx") ANOMAVISION_DEVICE "auto" | "cpu" | "cuda" (default: "auto") ANOMAVISION_THRESHOLD float anomaly threshold (default: 13.0) ANOMAVISION_VIZ_PADDING int, boundary-frame padding (default: 40) @@ -41,8 +41,8 @@ # ───────────────────────────────────────────────────────────────────────────── # Config # ───────────────────────────────────────────────────────────────────────────── -MODEL_DATA_PATH = os.getenv("ANOMAVISION_MODEL_DATA_PATH", "distributions/anomav_exp") -MODEL_FILE = os.getenv("ANOMAVISION_MODEL_FILE", "padim_model.onnx") +MODEL_DATA_PATH = os.getenv("ANOMAVISION_MODEL_DATA_PATH", "distributions/padim/bottle/anomav_exp") +MODEL_FILE = os.getenv("ANOMAVISION_MODEL_FILE", "model.onnx") DEVICE_ENV = os.getenv("ANOMAVISION_DEVICE", "auto") THRESHOLD_DEFAULT = float(os.getenv("ANOMAVISION_THRESHOLD", "13.0")) VIZ_PADDING = int(os.getenv("ANOMAVISION_VIZ_PADDING", "40")) diff --git a/config.yml b/config.yml index 5eda202..75b672f 100644 --- a/config.yml +++ b/config.yml @@ -14,11 +14,12 @@ norm_std: [0.229, 0.224, 0.225] # Standard deviation for normalization # Model / training # ========================= backbone: "resnet18" # Backbone CNN architecture (resnet18 | wide_resnet50) +algorithm: "padim" # Algorithm to use (padim | patchcore) feat_dim: 50 # Feature dimension size for embedding layer_indices: [0] # Which backbone layers to extract features from (0,1,2,3) model_data_path: "./distributions" # Path to store/load model-related data -model: "padim_model.pt" # File name for saved model (used by detect/eval/export) -output_model: "padim_model.pt" # File name for saving trained model (train.py expects this) +model: "model.pt" # File name for saved model (used by detect/eval/export) +output_model: "model.pt" # File name for saving trained model (train.py expects this) batch_size: 2 # Training/evaluation/inference batch size device: "auto" # Device to run on: "cpu", "cuda", or "auto" diff --git a/docs/api.md b/docs/api.md index 29c3859..e4a2fe0 100644 --- a/docs/api.md +++ b/docs/api.md @@ -53,10 +53,10 @@ model = anomavision.Padim( model.fit(loader) # Save full model -model.save("padim_model.pt") +model.save("model.pt") # Save compact stats-only artifact -model.save_statistics("padim_model.pth", half=True) +model.save_statistics("model.pth", half=True) ``` --- @@ -110,7 +110,7 @@ from anomavision.utils import get_logger logger = get_logger("anomavision.export") exporter = ModelExporter( - model_path=Path("./distributions/anomav_exp/padim_model.pt"), + model_path=Path("./distributions/anomav_exp/model.pt"), output_dir=Path("./exports"), logger=logger, device="cuda" diff --git a/docs/cli.md b/docs/cli.md index b49e7ad..7be7f40 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -44,7 +44,7 @@ anomavision train [options] | `--batch_size` | int | 2 | Batch size | | `--feat_dim` | int | 50 | Number of random features | | `--layer_indices` | int list | [0] | Backbone layer indices | -| `--output_model` | str | padim_model.pt | Model filename (`.pt`) | +| `--output_model` | str | model.pt | Model filename (`.pt`) | | `--run_name` | str | anomav_exp | Experiment name | | `--model_data_path` | str | ./distributions | Output directory | | `--log_level` | str | INFO | Logging level | @@ -72,7 +72,7 @@ anomavision detect [options] | `--config` | str | None | Path to config file | | `--img_path` | str | None | Path to test images | | `--model_data_path` | str | ./distributions/anomav_exp | Directory with model files | -| `--model` | str | padim_model.pt | Model file (`.pt`, `.onnx`, `.engine`) | +| `--model` | str | model.pt | Model file (`.pt`, `.onnx`, `.engine`) | | `--device` | str | auto | Device (`cpu`, `cuda`, `auto`) | | `--batch_size` | int | 1 | Batch size | | `--thresh` | float | None | Anomaly threshold | @@ -111,7 +111,7 @@ anomavision eval [options] | `--dataset_path` | str | None | Root dataset path | | `--class_name` | str | bottle | Class name (MVTec style) | | `--model_data_path` | str | ./distributions/anomav_exp | Directory with model files | -| `--model` | str | padim_model.onnx | Model file | +| `--model` | str | model.onnx | Model file | | `--device` | str | auto | Device (`cpu`, `cuda`) | | `--batch_size` | int | 32 | Batch size | | `--num_workers` | int | 1 | Data loader workers | @@ -161,7 +161,7 @@ anomavision export [options] ```bash anomavision export \ --model_data_path ./distributions/anomav_exp \ - --model padim_model.pt \ + --model model.pt \ --format onnx \ --precision fp16 \ --quantize-dynamic diff --git a/docs/config.md b/docs/config.md index 5fa2829..728bc99 100644 --- a/docs/config.md +++ b/docs/config.md @@ -108,11 +108,11 @@ backbone: resnet18 batch_size: 16 feat_dim: 100 layer_indices: [0, 1, 2] -output_model: padim_model.pt +output_model: model.pt run_name: exp1 -model_data_path: ./distributions/anomav_exp +model_data_path: ./distributions/padim/bottle/anomav_exp -model: padim_model.onnx +model: model.onnx device: auto enable_visualization: true save_visualizations: true diff --git a/docs/fastapi_backend.md b/docs/fastapi_backend.md index 300f709..53c9288 100644 --- a/docs/fastapi_backend.md +++ b/docs/fastapi_backend.md @@ -239,7 +239,7 @@ Create a `.env` file in your project root: ```bash # Model Configuration -MODEL_PATH=./models/padim_model.pth +MODEL_PATH=./models/model.pth CONFIG_PATH=./config.yml # Server Configuration @@ -494,7 +494,7 @@ Update: CORS_ORIGINS=["http://your-frontend-url.com"] **Issue: Slow inference** ``` Solution: Use ONNX or TensorRT models for faster inference -Export: anomavision export --model padim_model.pt --format onnx +Export: anomavision export --config config.yml --model model.pt --format onnx ``` **Issue: Memory errors** diff --git a/docs/quickstart.md b/docs/quickstart.md index 36fce78..495b327 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -34,13 +34,14 @@ dataset/ ```bash anomavision train \ + --config config.yml --dataset_path ./dataset \ --class_name bottle \ --backbone resnet18 \ --batch_size 16 \ --feat_dim 100 \ --layer_indices 0 1 2 \ - --output_model padim_model.pt \ + --output_model model.pt \ --run_name exp1 \ --model_data_path ./distributions/anomav_exp ``` @@ -58,7 +59,7 @@ backbone: resnet18 batch_size: 16 feat_dim: 100 layer_indices: [0, 1, 2] -output_model: padim_model.pt +output_model: model.pt run_name: exp1 model_data_path: ./distributions/anomav_exp resize: [256, 192] @@ -81,8 +82,8 @@ anomavision train --config config.yml * Train PaDiM on `dataset/bottle/train/good` * Save: - * Full model → `padim_model.pt` - * Compact stats-only model → `padim_model.pth` + * Full model → `model.pt` + * Compact stats-only model → `model.pth` * Config snapshot → `config.yml` --- @@ -97,7 +98,7 @@ anomavision train --config config.yml anomavision detect \ --img_path ./dataset/bottle/test \ --model_data_path ./distributions/anomav_exp \ - --model padim_model.onnx \ + --model model.onnx \ --device auto \ --batch_size 8 \ --thresh 13.0 \ @@ -116,7 +117,7 @@ Create a **`config.yml`** or use the one saved in the model's directory: stream_mode: false img_path: ./dataset/bottle/test model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx device: auto batch_size: 8 thresh: 13.0 @@ -150,7 +151,7 @@ stream_source: camera_id: 0 model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx batch_size: 1 thresh: 13.0 enable_visualization: true @@ -171,7 +172,7 @@ stream_source: loop: false model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx batch_size: 1 thresh: 13.0 ``` @@ -192,7 +193,7 @@ stream_source: read_timeout: 1.0 model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx batch_size: 1 ``` @@ -209,7 +210,7 @@ stream_source: max_message_size: 10485760 model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx batch_size: 1 ``` @@ -239,7 +240,7 @@ anomavision eval \ --dataset_path ./dataset \ --class_name bottle \ --model_data_path ./distributions/anomav_exp \ - --model padim_model.onnx \ + --model model.onnx \ --batch_size 8 \ --enable_visualization \ --save_visualizations \ @@ -256,7 +257,7 @@ Create a **`config.yml`** or use the one saved in the model's directory: dataset_path: ./dataset class_name: bottle model_data_path: ./distributions/anomav_exp -model: padim_model.onnx +model: model.onnx batch_size: 8 enable_visualization: true save_visualizations: true @@ -296,7 +297,7 @@ Quantization (INT8) is also supported. ```bash anomavision export \ --model_data_path ./distributions/anomav_exp \ - --model padim_model.pt \ + --model model.pt \ --format onnx \ --precision fp16 \ --quantize-dynamic @@ -310,7 +311,7 @@ Create a **`config.yml`** or use the one saved in the model's directory: ```yaml model_data_path: ./distributions/anomav_exp -model: padim_model.pt +model: model.pt format: onnx # choices: onnx | torchscript | openvino | all precision: fp16 # fp32 | fp16 | auto opset: 17