From 0270a02ad95b9274ea2efa2adaa684cf832319af Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 24 Apr 2026 16:53:41 +0800 Subject: [PATCH 1/3] TRCLI-230: Added quality rating support via --result-fields option --- CHANGELOG.MD | 1 + README.md | 64 +++++++++++++++ trcli/data_classes/data_parsers.py | 88 +------------------- trcli/data_classes/dataclass_testrail.py | 21 +++++ trcli/data_classes/quality_rating_parser.py | 91 +++++++++++++++++++++ 5 files changed, 178 insertions(+), 87 deletions(-) create mode 100644 trcli/data_classes/quality_rating_parser.py diff --git a/CHANGELOG.MD b/CHANGELOG.MD index 77d4d63..381e1af 100644 --- a/CHANGELOG.MD +++ b/CHANGELOG.MD @@ -12,6 +12,7 @@ _released 04--2026 ### Added - **AI Evaluation Template Support**: Uploading test result support for TestRail's AI Evaluation Template with multi-dimensional quality ratings. See README "AI Evaluation Template Support" section for complete examples. + - **Global Quality Rating via `--result-fields`**: Added support for applying quality ratings to all test results using `--result-fields quality_rating:'{"category": value}'`. Test-specific quality ratings in XML/JSON properties take precedence over CLI global ratings. ## [1.14.1] diff --git a/README.md b/README.md index 0c7b839..35e6584 100644 --- a/README.md +++ b/README.md @@ -576,6 +576,70 @@ Traces: https://logs.example.com/trace/123 Latency: 0.8 seconds ``` +### Using `--result-fields` for Quality Rating + +In addition to specifying quality ratings in XML/JSON properties, you can apply a **global quality rating** to all test results using the `--result-fields` command-line option: + +```shell +trcli parse_junit \ + -f sample_results.xml \ + --project-id 1 \ + --suite-id 2 \ + --result-fields quality_rating:'{"factual_accuracy": 4, "reliability": 5, "performance": 3}' +``` + +#### Behavior + +- **Global Application**: The quality rating specified via `--result-fields` is applied to **all test results** that don't already have one +- **Test-Specific Override**: Quality ratings specified in test properties/metadata **always take precedence** over `--result-fields` +- **Validation**: The same validation rules apply (max 15 categories, 0-5 stars, at least one ≥ 1) + +#### Example: Mixed Quality Ratings + +```xml + + + + + + + + + + + + + + + + + +``` + +CLI command: +```shell +trcli parse_junit \ + -f report.xml \ + --project-id 1 \ + --suite-id 2 \ + --result-fields quality_rating:'{"factual_accuracy": 4, "reliability": 5}' +``` + +**Result:** +- **C100** gets the CLI quality rating: `{"factual_accuracy": 4, "reliability": 5}` +- **C101** gets its test-specific quality rating: `{"factual_accuracy": 5, "response_time": 5}` + +#### Error Handling with --result-fields + +If the quality_rating value in `--result-fields` is invalid, TRCLI will exit with an error before uploading: + +``` +ERROR: Unable to parse quality_rating in --result-fields property. +Star values must be between 0 and 5, got 10 for category 'accuracy' +``` + +**Note:** This is different from invalid property-based quality ratings, which log a warning and continue. CLI validation is stricter because it affects all results. + ## Behavior-Driven Development (BDD) Support The TestRail CLI provides comprehensive support for Behavior-Driven Development workflows using Gherkin syntax. The BDD features enable you to manage test cases written in Gherkin format, execute BDD tests with various frameworks (Cucumber, Behave, pytest-bdd, etc.), and seamlessly upload results to TestRail. diff --git a/trcli/data_classes/data_parsers.py b/trcli/data_classes/data_parsers.py index 837f232..8905d8e 100644 --- a/trcli/data_classes/data_parsers.py +++ b/trcli/data_classes/data_parsers.py @@ -1,5 +1,6 @@ import re, ast, json from beartype.typing import Union, List, Dict, Tuple, Optional +from trcli.data_classes.quality_rating_parser import QualityRatingParser class MatchersParser: @@ -202,90 +203,3 @@ def extract_last_words(input_string, max_characters=MAX_TESTCASE_TITLE_LENGTH): result = input_string[-max_characters:] return result - - -class QualityRatingParser: - """Parser for AI Evaluation Template quality ratings""" - - MAX_CATEGORIES = 15 - MIN_STAR_VALUE = 0 - MAX_STAR_VALUE = 5 - - @staticmethod - def parse_quality_rating(quality_rating_str: str) -> Tuple[Optional[Dict], Optional[str]]: - """ - Parse and validate quality rating JSON string. - - Validation rules: - - Must be valid JSON object - - Maximum 15 categories - - Star values must be integers 0-5 - - At least one category must have a value >= 1 - - :param quality_rating_str: JSON string containing quality ratings - :return: Tuple of (quality_rating_dict, error_message) - Returns (None, error_message) if validation fails - Returns (quality_rating_dict, None) if validation succeeds - - Example valid input: - '{"factual_accuracy": 5, "relevance": 4, "completeness": 3}' - - Example returns: - Success: ({"factual_accuracy": 5, "relevance": 4}, None) - Error: (None, "Quality rating must contain at most 15 categories (found 20)") - """ - if not quality_rating_str or not quality_rating_str.strip(): - return None, "Quality rating cannot be empty" - - # Parse JSON - try: - quality_rating = json.loads(quality_rating_str) - except json.JSONDecodeError as e: - return None, f"Quality rating must be valid JSON: {str(e)}" - - # Must be a dictionary - if not isinstance(quality_rating, dict): - return None, f"Quality rating must be a JSON object, got {type(quality_rating).__name__}" - - # Check if empty - if not quality_rating: - return None, "Quality rating cannot be an empty object" - - # Check max categories - num_categories = len(quality_rating) - if num_categories > QualityRatingParser.MAX_CATEGORIES: - return None, ( - f"Quality rating must contain at most {QualityRatingParser.MAX_CATEGORIES} " - f"categories (found {num_categories})" - ) - - # Validate star values - has_non_zero = False - for category, value in quality_rating.items(): - # Category name validation - if not isinstance(category, str) or not category.strip(): - return None, f"Category names must be non-empty strings" - - # Value must be an integer - if not isinstance(value, int): - return None, ( - f"Star values must be integers 0-{QualityRatingParser.MAX_STAR_VALUE}, " - f"got {type(value).__name__} for category '{category}'" - ) - - # Value must be in valid range - if value < QualityRatingParser.MIN_STAR_VALUE or value > QualityRatingParser.MAX_STAR_VALUE: - return None, ( - f"Star values must be between {QualityRatingParser.MIN_STAR_VALUE} and " - f"{QualityRatingParser.MAX_STAR_VALUE}, got {value} for category '{category}'" - ) - - # Track if at least one category has a non-zero value - if value >= 1: - has_non_zero = True - - # At least one category must have value >= 1 - if not has_non_zero: - return None, "Quality rating must have at least one category with a star value >= 1" - - return quality_rating, None diff --git a/trcli/data_classes/dataclass_testrail.py b/trcli/data_classes/dataclass_testrail.py index 6fc9ab1..5073c77 100644 --- a/trcli/data_classes/dataclass_testrail.py +++ b/trcli/data_classes/dataclass_testrail.py @@ -6,6 +6,7 @@ from trcli import settings from trcli.data_classes.validation_exception import ValidationException +from trcli.data_classes.quality_rating_parser import QualityRatingParser @serialize @@ -101,12 +102,32 @@ def prepend_comment(self, comment: str): def add_global_result_fields(self, results_fields: dict) -> None: """Add global result fields without overriding the existing test-specific result fields + Special handling for quality_rating: + - If present in results_fields, it's extracted and parsed via QualityRatingParser + - Parsed quality_rating is set on self.quality_rating attribute (not in result_fields dict) + - Test-specific quality_rating (from properties/metadata) takes precedence over CLI --result-fields + :param results_fields: Global results fields to be added to the result :return: None + :raises ValidationException: If quality_rating validation fails """ if not results_fields: return + new_results_fields = results_fields.copy() + + # Special handling for quality_rating field + if "quality_rating" in new_results_fields: + quality_rating_value = new_results_fields.pop("quality_rating") + + # Only apply CLI quality_rating if test doesn't already have one (test-specific takes precedence) + if self.quality_rating is None: + # Parse and validate the quality_rating + parsed_rating, error = QualityRatingParser.parse_quality_rating(quality_rating_value) + if error: + raise ValidationException("quality_rating", "--result-fields", error) + self.quality_rating = parsed_rating + new_results_fields.update(self.result_fields) self.result_fields = new_results_fields diff --git a/trcli/data_classes/quality_rating_parser.py b/trcli/data_classes/quality_rating_parser.py new file mode 100644 index 0000000..f1d1b4b --- /dev/null +++ b/trcli/data_classes/quality_rating_parser.py @@ -0,0 +1,91 @@ +"""Quality Rating Parser for AI Evaluation Template support""" + +import json +from beartype.typing import Tuple, Optional, Dict + + +class QualityRatingParser: + """Parser for AI Evaluation Template quality ratings""" + + MAX_CATEGORIES = 15 + MIN_STAR_VALUE = 0 + MAX_STAR_VALUE = 5 + + @staticmethod + def parse_quality_rating(quality_rating_str: str) -> Tuple[Optional[Dict], Optional[str]]: + """ + Parse and validate quality rating JSON string. + + Validation rules: + - Must be valid JSON object + - Maximum 15 categories + - Star values must be integers 0-5 + - At least one category must have a value >= 1 + + :param quality_rating_str: JSON string containing quality ratings + :return: Tuple of (quality_rating_dict, error_message) + Returns (None, error_message) if validation fails + Returns (quality_rating_dict, None) if validation succeeds + + Example valid input: + '{"factual_accuracy": 5, "relevance": 4, "completeness": 3}' + + Example returns: + Success: ({"factual_accuracy": 5, "relevance": 4}, None) + Error: (None, "Quality rating must contain at most 15 categories (found 20)") + """ + if not quality_rating_str or not quality_rating_str.strip(): + return None, "Quality rating cannot be empty" + + # Parse JSON + try: + quality_rating = json.loads(quality_rating_str) + except json.JSONDecodeError as e: + return None, f"Quality rating must be valid JSON: {str(e)}" + + # Must be a dictionary + if not isinstance(quality_rating, dict): + return None, f"Quality rating must be a JSON object, got {type(quality_rating).__name__}" + + # Check if empty + if not quality_rating: + return None, "Quality rating cannot be an empty object" + + # Check max categories + num_categories = len(quality_rating) + if num_categories > QualityRatingParser.MAX_CATEGORIES: + return None, ( + f"Quality rating must contain at most {QualityRatingParser.MAX_CATEGORIES} " + f"categories (found {num_categories})" + ) + + # Validate star values + has_non_zero = False + for category, value in quality_rating.items(): + # Category name validation + if not isinstance(category, str) or not category.strip(): + return None, f"Category names must be non-empty strings" + + # Value must be an integer + if not isinstance(value, int): + return None, ( + f"Star values must be integers 0-{QualityRatingParser.MAX_STAR_VALUE}, " + f"got {type(value).__name__} for category '{category}'" + ) + + # Value must be in valid range + if value < QualityRatingParser.MIN_STAR_VALUE or value > QualityRatingParser.MAX_STAR_VALUE: + return None, ( + f"Star values must be between {QualityRatingParser.MIN_STAR_VALUE} and " + f"{QualityRatingParser.MAX_STAR_VALUE}, got {value} for category '{category}'" + ) + + # Track if at least one category has a non-zero value + if value >= 1: + has_non_zero = True + + # At least one category must have value >= 1 + if not has_non_zero: + return None, "Quality rating must have at least one category with a star value >= 1" + + return quality_rating, None From bc34157b1e68f3864be1a279d799013578e79e2c Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Fri, 24 Apr 2026 16:55:07 +0800 Subject: [PATCH 2/3] TRCLI-230: Updated unit tests and test data for quality rating support via --result-fields --- tests/test_junit_parser.py | 12 ++ tests/test_result_fields_quality_rating.py | 166 +++++++++++++++++++++ 2 files changed, 178 insertions(+) create mode 100644 tests/test_result_fields_quality_rating.py diff --git a/tests/test_junit_parser.py b/tests/test_junit_parser.py index cc4e4e3..775018b 100644 --- a/tests/test_junit_parser.py +++ b/tests/test_junit_parser.py @@ -59,6 +59,7 @@ def test_junit_xml_parser_valid_files(self, input_xml_path: Union[str, Path], ex file_reader = JunitParser(env) read_junit = self.__clear_unparsable_junit_elements(file_reader.parse_file()[0]) parsing_result_json = asdict(read_junit) + parsing_result_json = self.__remove_none_quality_ratings(parsing_result_json) print(parsing_result_json) file_json = open(expected_path) expected_json = json.load(file_json) @@ -77,6 +78,7 @@ def test_junit_xml_elapsed_milliseconds(self, freezer): read_junit = self.__clear_unparsable_junit_elements(file_reader.parse_file()[0]) settings.ALLOW_ELAPSED_MS = False parsing_result_json = asdict(read_junit) + parsing_result_json = self.__remove_none_quality_ratings(parsing_result_json) file_json = open(Path(__file__).parent / "test_data/json/milliseconds.json") expected_json = json.load(file_json) assert ( @@ -88,6 +90,7 @@ def test_junit_xml_parser_sauce(self, freezer): def _compare(junit_output, expected_path): read_junit = self.__clear_unparsable_junit_elements(junit_output) parsing_result_json = asdict(read_junit) + parsing_result_json = self.__remove_none_quality_ratings(parsing_result_json) file_json = open(expected_path) expected_json = json.load(file_json) assert ( @@ -138,6 +141,7 @@ def test_junit_xml_parser_id_matcher_name( file_reader = JunitParser(env) read_junit = self.__clear_unparsable_junit_elements(file_reader.parse_file()[0]) parsing_result_json = asdict(read_junit) + parsing_result_json = self.__remove_none_quality_ratings(parsing_result_json) file_json = open(expected_path) expected_json = json.load(file_json) assert ( @@ -160,6 +164,14 @@ def test_junit_xml_parser_invalid_empty_file(self): with pytest.raises(ParseError): file_reader.parse_file() + def __remove_none_quality_ratings(self, result_json: dict) -> dict: + """Remove quality_rating fields that are None for backward compatibility with existing tests""" + for section in result_json.get("testsections", []): + for testcase in section.get("testcases", []): + if testcase.get("result", {}).get("quality_rating") is None: + testcase["result"].pop("quality_rating", None) + return result_json + @pytest.mark.parse_junit def test_junit_xml_parser_file_not_found(self): with pytest.raises(FileNotFoundError): diff --git a/tests/test_result_fields_quality_rating.py b/tests/test_result_fields_quality_rating.py new file mode 100644 index 0000000..0813c3c --- /dev/null +++ b/tests/test_result_fields_quality_rating.py @@ -0,0 +1,166 @@ +"""Unit tests for quality_rating support via --result-fields""" + +import pytest +from trcli.data_classes.dataclass_testrail import TestRailResult +from trcli.data_classes.validation_exception import ValidationException + + +class TestResultFieldsQualityRating: + """Test quality_rating handling in --result-fields (CLI global result fields)""" + + def test_quality_rating_via_result_fields_valid(self): + """Test that valid quality_rating JSON string via --result-fields is parsed and set""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": '{"factual_accuracy": 5, "relevance": 4}', "custom_field": "value1"} + + result.add_global_result_fields(global_fields) + + # quality_rating should be parsed and set on the attribute + assert result.quality_rating == {"factual_accuracy": 5, "relevance": 4} + # Other fields should be in result_fields dict + assert result.result_fields["custom_field"] == "value1" + # quality_rating should NOT be in result_fields dict + assert "quality_rating" not in result.result_fields + + def test_quality_rating_via_result_fields_invalid_json(self): + """Test that invalid JSON in quality_rating raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": "{not valid json}"} + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "must be valid JSON" in str(exc_info.value) + + def test_quality_rating_via_result_fields_too_many_categories(self): + """Test that quality_rating with >15 categories raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + # Create 16 categories (exceeds MAX_CATEGORIES=15) + categories = {f"category_{i}": 3 for i in range(16)} + global_fields = {"quality_rating": str(categories).replace("'", '"')} + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "at most 15 categories" in str(exc_info.value) + + def test_quality_rating_via_result_fields_invalid_star_value(self): + """Test that quality_rating with invalid star values raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": '{"factual_accuracy": 6}'} # 6 exceeds MAX_STAR_VALUE=5 + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "must be between 0 and 5" in str(exc_info.value) + + def test_quality_rating_via_result_fields_all_zeros(self): + """Test that quality_rating with all zero values raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": '{"factual_accuracy": 0, "relevance": 0}'} + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "at least one category with a star value >= 1" in str(exc_info.value) + + def test_quality_rating_test_specific_overrides_global(self): + """Test that test-specific quality_rating (from properties) takes precedence over --result-fields""" + # Simulate test-specific quality_rating already set (from XML properties) + result = TestRailResult(case_id=1, status_id=1, quality_rating={"test_specific": 5, "accuracy": 4}) + + # Attempt to apply global quality_rating via --result-fields + global_fields = {"quality_rating": '{"global_rating": 3}'} + + result.add_global_result_fields(global_fields) + + # Test-specific rating should be preserved (not overridden by global) + assert result.quality_rating == {"test_specific": 5, "accuracy": 4} + assert result.quality_rating != {"global_rating": 3} + + def test_quality_rating_via_result_fields_empty_string(self): + """Test that empty string quality_rating raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": ""} + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "cannot be empty" in str(exc_info.value) + + def test_quality_rating_via_result_fields_empty_object(self): + """Test that empty JSON object quality_rating raises ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": "{}"} + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "cannot be an empty object" in str(exc_info.value) + + def test_quality_rating_via_result_fields_non_integer_value(self): + """Test that non-integer star values raise ValidationException""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": '{"factual_accuracy": 4.5}'} # float instead of int + + with pytest.raises(ValidationException) as exc_info: + result.add_global_result_fields(global_fields) + + assert "Unable to parse quality_rating in --result-fields" in str(exc_info.value) + assert "must be integers" in str(exc_info.value) + + def test_quality_rating_via_result_fields_mixed_with_other_fields(self): + """Test that quality_rating works alongside other result fields""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = { + "quality_rating": '{"factual_accuracy": 5, "relevance": 4, "completeness": 3}', + "custom_field_1": "value1", + "custom_field_2": "value2", + "custom_priority": "3", + } + + result.add_global_result_fields(global_fields) + + # quality_rating should be on the attribute + assert result.quality_rating == {"factual_accuracy": 5, "relevance": 4, "completeness": 3} + # Other fields should be in result_fields dict + assert result.result_fields["custom_field_1"] == "value1" + assert result.result_fields["custom_field_2"] == "value2" + assert result.result_fields["custom_priority"] == "3" + # quality_rating should NOT be in result_fields dict + assert "quality_rating" not in result.result_fields + + def test_quality_rating_to_dict_serialization(self): + """Test that quality_rating is properly serialized in to_dict()""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"quality_rating": '{"factual_accuracy": 5, "security": 4}', "custom_field": "value1"} + + result.add_global_result_fields(global_fields) + result_dict = result.to_dict() + + # quality_rating should be at root level (not nested) + assert "quality_rating" in result_dict + assert result_dict["quality_rating"] == {"factual_accuracy": 5, "security": 4} + # Other fields should also be present + assert result_dict["custom_field"] == "value1" + assert result_dict["case_id"] == 1 + assert result_dict["status_id"] == 1 + + def test_no_quality_rating_in_result_fields_no_error(self): + """Test that absence of quality_rating doesn't cause issues""" + result = TestRailResult(case_id=1, status_id=1) + global_fields = {"custom_field_1": "value1", "custom_field_2": "value2"} + + result.add_global_result_fields(global_fields) + + # No quality_rating should be set + assert result.quality_rating is None + # Other fields should be in result_fields dict + assert result.result_fields["custom_field_1"] == "value1" + assert result.result_fields["custom_field_2"] == "value2" From f71fb54224da8d45282413d259732b5c2005b563 Mon Sep 17 00:00:00 2001 From: acuanico-tr-galt Date: Wed, 29 Apr 2026 17:28:22 +0800 Subject: [PATCH 3/3] TRCLI-230: Updated and fixed quality rating validations and output warnings for junit and robot parsers --- trcli/cli.py | 19 ++++++++++++++++++- trcli/commands/cmd_parse_junit.py | 18 +++++++++++++++++- trcli/commands/cmd_parse_robot.py | 18 +++++++++++++++++- trcli/readers/junit_xml.py | 3 +++ trcli/readers/robot_xml.py | 3 +++ 5 files changed, 58 insertions(+), 3 deletions(-) diff --git a/trcli/cli.py b/trcli/cli.py index 716ed8d..2433471 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -17,7 +17,7 @@ TOOL_VERSION, COMMAND_FAULT_MAPPING, ) -from trcli.data_classes.data_parsers import FieldsParser +from trcli.data_classes.data_parsers import FieldsParser, QualityRatingParser from trcli.settings import DEFAULT_API_CALL_TIMEOUT, DEFAULT_BATCH_SIZE # Import structured logging infrastructure @@ -123,6 +123,23 @@ def result_fields(self, result_fields: Union[List[str], dict]): if error: self.elog(error) exit(1) + + # Validate quality_rating if present in result_fields + if "quality_rating" in fields_dict: + quality_rating_value = fields_dict["quality_rating"] + _, validation_error = QualityRatingParser.parse_quality_rating(quality_rating_value) + if validation_error: + self.elog( + f"ERROR: Invalid quality_rating provided in --result-fields parameter:\n" + f"{validation_error}\n\n" + f"Quality rating requirements:\n" + f" - Maximum 15 categories\n" + f" - Star values must be integers 0-5\n" + f" - At least one category must have a value >= 1\n" + f" - Must be valid JSON object format" + ) + exit(1) + self._result_fields = fields_dict def log(self, msg: str, new_line=True, *args): diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index 9bb61af..913dc7d 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -76,7 +76,23 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: - parsed_suites = JunitParser(environment).parse_file() + junit_parser = JunitParser(environment) + parsed_suites = junit_parser.parse_file() + + # Check if any invalid quality ratings were found during parsing + if junit_parser.invalid_quality_ratings_found: + environment.elog( + "\nERROR: One or more test results have invalid quality_rating values that were rejected.\n" + "Cannot proceed with upload as quality_rating is required for tests that specify it.\n\n" + "Please fix the invalid quality ratings in your test report and try again.\n\n" + "Quality rating requirements:\n" + " - Maximum 15 categories\n" + " - Star values must be integers 0-5\n" + " - At least one category must have a value >= 1\n" + " - Must be valid JSON object format" + ) + exit(1) + run_id = None case_update_results = {} diff --git a/trcli/commands/cmd_parse_robot.py b/trcli/commands/cmd_parse_robot.py index a09ac21..c6c6afd 100644 --- a/trcli/commands/cmd_parse_robot.py +++ b/trcli/commands/cmd_parse_robot.py @@ -23,7 +23,23 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): settings.ALLOW_ELAPSED_MS = environment.allow_ms print_config(environment) try: - parsed_suites = RobotParser(environment).parse_file() + robot_parser = RobotParser(environment) + parsed_suites = robot_parser.parse_file() + + # Check if any invalid quality ratings were found during parsing + if robot_parser.invalid_quality_ratings_found: + environment.elog( + "\nERROR: One or more test results have invalid quality_rating values that were rejected.\n" + "Cannot proceed with upload as quality_rating is required for tests that specify it.\n\n" + "Please fix the invalid quality ratings in your test report and try again.\n\n" + "Quality rating requirements:\n" + " - Maximum 15 categories\n" + " - Star values must be integers 0-5\n" + " - At least one category must have a value >= 1\n" + " - Must be valid JSON object format" + ) + exit(1) + for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() diff --git a/trcli/readers/junit_xml.py b/trcli/readers/junit_xml.py index cf4fbb0..ebf6ffc 100644 --- a/trcli/readers/junit_xml.py +++ b/trcli/readers/junit_xml.py @@ -48,6 +48,7 @@ def __init__(self, environment: Environment): self._case_matcher = environment.case_matcher self._special = environment.special_parser self._case_result_statuses = {"passed": 1, "skipped": 4, "error": 5, "failure": 5} + self.invalid_quality_ratings_found = False # Track if any quality ratings were invalid self._update_with_custom_statuses() @classmethod @@ -218,6 +219,8 @@ def _parse_case_properties(self, case): parsed_rating, error = QualityRatingParser.parse_quality_rating(value) if error: self.env.elog(f"Quality rating validation failed for test '{case.name}': {error}") + # Mark that we found invalid quality ratings + self.invalid_quality_ratings_found = True # Skip invalid quality rating else: quality_rating = parsed_rating diff --git a/trcli/readers/robot_xml.py b/trcli/readers/robot_xml.py index 97e30a5..1cf58b2 100644 --- a/trcli/readers/robot_xml.py +++ b/trcli/readers/robot_xml.py @@ -27,6 +27,7 @@ class RobotParser(FileParser): def __init__(self, environment: Environment): super().__init__(environment) self.case_matcher = environment.case_matcher + self.invalid_quality_ratings_found = False # Track if any quality ratings were invalid @staticmethod def check_file(filepath: Union[str, Path]) -> Path: @@ -133,6 +134,8 @@ def _find_suites(self, suite_element, sections_list: List, namespace=""): parsed_rating, error = QualityRatingParser.parse_quality_rating(quality_rating_str) if error: self.env.elog(f"Quality rating validation failed for test '{case_name}': {error}") + # Mark that we found invalid quality ratings + self.invalid_quality_ratings_found = True else: quality_rating = parsed_rating if line.lower().startswith("- testrail_attachment:"):