From a549b10e7f9cc9d1cb1306c91915e90cc936d204 Mon Sep 17 00:00:00 2001 From: ilya kotelnikov Date: Thu, 9 Apr 2026 18:06:46 +0200 Subject: [PATCH] Add draft prototype for init, status, dry-run, and json output --- README.md | 54 +++- tests/test_api_client.py | 30 +++ tests/test_cli.py | 30 ++- tests/test_cmd_add_run.py | 148 +++++++++++ tests/test_cmd_dry_run_parsers.py | 193 ++++++++++++++ tests/test_cmd_import_gherkin.py | 53 +++- tests/test_cmd_init.py | 185 +++++++++++++ tests/test_cmd_labels.py | 41 ++- tests/test_cmd_parse_cucumber.py | 77 ++++++ tests/test_cmd_references.py | 40 +++ tests/test_cmd_status.py | 171 ++++++++++++ tests/test_data/cli_test_data.py | 1 + trcli/api/api_client.py | 27 ++ trcli/api/project_based_client.py | 20 +- trcli/api/run_handler.py | 19 ++ trcli/cli.py | 59 +++-- trcli/cli_styles.py | 171 ++++++++++++ trcli/commands/cmd_add_run.py | 80 +++++- trcli/commands/cmd_export_gherkin.py | 4 +- trcli/commands/cmd_import_gherkin.py | 46 +++- trcli/commands/cmd_init.py | 282 ++++++++++++++++++++ trcli/commands/cmd_labels.py | 55 +++- trcli/commands/cmd_parse_cucumber.py | 38 ++- trcli/commands/cmd_parse_junit.py | 93 +++++-- trcli/commands/cmd_parse_openapi.py | 14 +- trcli/commands/cmd_parse_robot.py | 15 +- trcli/commands/cmd_references.py | 34 ++- trcli/commands/cmd_status.py | 322 +++++++++++++++++++++++ trcli/commands/cmd_update.py | 3 +- trcli/commands/results_parser_helpers.py | 116 ++++++++ trcli/constants.py | 17 ++ 31 files changed, 2353 insertions(+), 85 deletions(-) create mode 100644 tests/test_cmd_dry_run_parsers.py create mode 100644 tests/test_cmd_init.py create mode 100644 tests/test_cmd_status.py create mode 100644 trcli/cli_styles.py create mode 100644 trcli/commands/cmd_init.py create mode 100644 trcli/commands/cmd_status.py diff --git a/README.md b/README.md index a2cb4f7d..fc9a366c 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,8 @@ Options: (e.g., localhost,127.0.0.1). --parallel-pagination Enable parallel pagination for faster case fetching (experimental). + --dry-run Preview write operations without sending mutating + requests to TestRail. --help Show this message and exit. Commands: @@ -100,6 +102,55 @@ Commands: update Update TRCLI to the latest version from PyPI. ``` +### Dry-run mode + +TRCLI supports a client-side preview mode via `--dry-run`. + +In dry-run mode: +- local parsing and validation still run +- read-only API calls may still run +- mutating requests to TestRail are not sent + +Current dry-run support is aimed at write-oriented commands such as: +- `add_run` +- `import_gherkin` +- parser commands (`parse_junit`, `parse_robot`, `parse_cucumber`, `parse_openapi`) +- mutating label/reference commands + +Example: + +```shell +trcli --dry-run parse_junit -f results.xml --title "Nightly Run" +``` + +Important limitations: +- this is a TRCLI-side preview, not a server-side transaction preview +- TestRail IDs for newly created resources are not generated in dry-run mode +- parser dry-run summaries are based on local parsing and do not complete the full write workflow + +### JSON output + +TRCLI now supports machine-readable output for commands that naturally produce structured results. + +Current `--json` support includes: +- `status` +- `add_run` +- parser commands: `parse_junit`, `parse_robot`, `parse_cucumber`, `parse_openapi` +- `import_gherkin` + +Notes: +- `--json-output` is still accepted as a compatibility alias on commands that already exposed it. +- In JSON mode, stdout is intended for the final JSON document. Human-readable progress output is redirected away from stdout. +- Commands still use normal exit codes. In JSON mode, a failing command returns a non-zero exit code and emits an `"ok": false` payload when the failure can be represented structurally. + +Example: + +```shell +trcli status --json +trcli add_run --title "Nightly Run" --suite-id 1 --json +trcli parse_junit -f results.xml --title "Nightly Run" --json +``` + Uploading automated test results -------- @@ -151,7 +202,8 @@ Options: test results to. --test-run-ref Comma-separated list of reference IDs to append to the test run (up to 250 characters total). - --json-output Output reference operation results in JSON format. + --json-output, --json + Output structured results in JSON format. --update-existing-cases Update existing TestRail cases with values from JUnit properties (default: no). --update-strategy Strategy for combining incoming values with diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 60d761c5..d2f4192e 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -63,6 +63,36 @@ def test_send_post_status_code_success(self, api_resources, requests_mock): check_calls_count(requests_mock) check_response(201, FAKE_PROJECT_DATA, "", response) + @pytest.mark.api_client + @patch("requests.post") + def test_send_post_is_suppressed_in_dry_run_mode(self, mock_post, api_resources_maker, mocker): + environment = mocker.patch("trcli.cli.Environment") + api_client = api_resources_maker(environment=environment) + api_client.dry_run = True + + response = api_client.send_post("add_project", {"name": "Preview Project"}) + + mock_post.assert_not_called() + check_response( + 200, + {"uri": "add_project", "payload": {"name": "Preview Project"}, "has_files": False, "dry_run": True}, + "", + response, + ) + environment.log.assert_any_call("Dry run: skipping POST add_project") + + @pytest.mark.api_client + def test_send_get_still_executes_in_dry_run_mode(self, api_resources_maker, requests_mock, mocker): + requests_mock.get(create_url("get_projects"), status_code=200, json=FAKE_PROJECT_DATA) + environment = mocker.patch("trcli.cli.Environment") + api_client = api_resources_maker(environment=environment) + api_client.dry_run = True + + response = api_client.send_get("get_projects") + + check_calls_count(requests_mock) + check_response(200, FAKE_PROJECT_DATA, "", response) + @pytest.mark.api_client def test_send_get_status_code_not_success(self, api_resources, requests_mock): """The purpose of this test is to check behaviour of send_get one receiving not successful status code. diff --git a/tests/test_cli.py b/tests/test_cli.py index 0997a9cd..7c3a6a46 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,6 +2,7 @@ from pathlib import Path import pytest +import click from click.testing import CliRunner @@ -63,6 +64,29 @@ def test_run_with_help_parameter(self, cli_resources): "Options:" in result.output ), "'Options:' is not present in output message when calling trcli with --help parameter." + @pytest.mark.cli + def test_help_uses_colored_palette(self): + ctx = click.Context(cli, info_name="trcli", color=True) + help_text = cli.get_help(ctx) + + assert "\x1b[33mUsage: " in help_text + assert "\x1b[93mparse_junit\x1b[0m" in help_text + assert "\x1b[93m-h, --host " in help_text + assert "\x1b[93m--dry-run\x1b[0m" in help_text + + @pytest.mark.cli + def test_dry_run_option_is_set_on_environment(self, mocker, cli_resources): + cli_agrs_helper, cli_runner = cli_resources + args = cli_agrs_helper.get_all_required_parameters_plus_optional(["--dry-run"]) + + mocker.patch("sys.argv", ["trcli", *args]) + setattr_mock = mocker.patch("trcli.cli.setattr") + + with cli_runner.isolated_filesystem(): + _ = cli_runner.invoke(cli, args) + + setattr_mock.assert_any_call(mocker.ANY, "dry_run", True) + @pytest.mark.cli def test_run_without_command(self, mocker, cli_resources): """The purpose of this test is to check that calling trcli without command will result is @@ -99,7 +123,8 @@ def test_check_error_message_for_required_parameters( ) mocker.patch("sys.argv", ["trcli", *args]) - result = cli_runner.invoke(cli, args) + with cli_runner.isolated_filesystem(): + result = cli_runner.invoke(cli, args) assert ( result.exit_code == expected_exit_code ), f"Exit code {expected_exit_code} expected. Got: {result.exit_code} instead." @@ -121,7 +146,8 @@ def test_host_syntax_is_validated(self, host, cli_resources, mocker): args = ["--host", host, *args] mocker.patch("sys.argv", ["trcli", *args]) - result = cli_runner.invoke(cli, args) + with cli_runner.isolated_filesystem(): + result = cli_runner.invoke(cli, args) assert ( result.exit_code == expected_exit_code ), f"Exit code {expected_exit_code} expected. Got: {result.exit_code} instead." diff --git a/tests/test_cmd_add_run.py b/tests/test_cmd_add_run.py index 88a52a79..2654b19e 100644 --- a/tests/test_cmd_add_run.py +++ b/tests/test_cmd_add_run.py @@ -1,7 +1,9 @@ from unittest import mock +import json import pytest from click.testing import CliRunner +from trcli.cli import cli as root_cli from trcli.cli import Environment from trcli.commands import cmd_add_run @@ -114,6 +116,152 @@ def test_refs_action_parameter_parsing(self): assert "--run-refs-action" in result.output assert "Action to perform on references" in result.output + @mock.patch("trcli.commands.cmd_add_run.write_run_to_file") + @mock.patch("trcli.commands.cmd_add_run.ProjectBasedClient") + @mock.patch("trcli.cli.check_for_updates", return_value=None) + def test_dry_run_does_not_write_run_file( + self, _mock_update_check, mock_project_client_class, mock_write_run_to_file + ): + runner = CliRunner() + mock_project_client = mock_project_client_class.return_value + mock_project_client.resolve_project.return_value = None + mock_project_client.resolve_suite.return_value = None + mock_project_client.create_or_update_test_run.return_value = (0, "") + + args = [ + "--host", + "https://example.testrail.io", + "--project", + "Example Project", + "--username", + "user@example.com", + "--key", + "secret", + "--dry-run", + "add_run", + "--title", + "Preview Run", + "--suite-id", + "1", + "--file", + "preview.yml", + ] + + with runner.isolated_filesystem(): + with mock.patch("sys.argv", ["trcli", *args]): + result = runner.invoke(root_cli, args) + + assert result.exit_code == 0 + assert "Dry run: no TestRail changes were made." in result.output + assert "run_id: " in result.output + mock_write_run_to_file.assert_not_called() + + @mock.patch("trcli.commands.cmd_add_run.ProjectBasedClient") + @mock.patch("trcli.cli.check_for_updates", return_value=None) + def test_add_run_json_output_create(self, _mock_update_check, mock_project_client_class): + runner = CliRunner() + mock_project_client = mock_project_client_class.return_value + mock_project_client.resolve_project.return_value = None + mock_project_client.resolve_suite.return_value = None + mock_project_client.create_or_update_test_run.return_value = (321, "") + + args = [ + "--host", + "https://example.testrail.io", + "--project", + "Example Project", + "--username", + "user@example.com", + "--key", + "secret", + "add_run", + "--title", + "JSON Run", + "--suite-id", + "1", + "--json", + ] + + with mock.patch("sys.argv", ["trcli", *args]): + result = runner.invoke(root_cli, args) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["ok"] is True + assert payload["command"] == "add_run" + assert payload["data"]["action"] == "create" + assert payload["data"]["run_id"] == 321 + assert payload["data"]["title"] == "JSON Run" + + @mock.patch("trcli.commands.cmd_add_run.ProjectBasedClient") + @mock.patch("trcli.cli.check_for_updates", return_value=None) + def test_add_run_json_output_dry_run(self, _mock_update_check, mock_project_client_class): + runner = CliRunner() + mock_project_client = mock_project_client_class.return_value + mock_project_client.resolve_project.return_value = None + mock_project_client.resolve_suite.return_value = None + mock_project_client.create_or_update_test_run.return_value = (0, "") + + args = [ + "--host", + "https://example.testrail.io", + "--project", + "Example Project", + "--username", + "user@example.com", + "--key", + "secret", + "--dry-run", + "add_run", + "--title", + "Preview Run", + "--suite-id", + "1", + "--json", + ] + + with mock.patch("sys.argv", ["trcli", *args]): + result = runner.invoke(root_cli, args) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["ok"] is True + assert payload["dry_run"] is True + assert payload["data"]["action"] == "create" + assert payload["data"]["run_id"] is None + + @mock.patch("trcli.cli.check_for_updates", return_value=None) + def test_add_run_json_output_validation_error(self, _mock_update_check): + runner = CliRunner() + long_refs = "A" * 251 + + args = [ + "--host", + "https://example.testrail.io", + "--project", + "Example Project", + "--username", + "user@example.com", + "--key", + "secret", + "add_run", + "--title", + "JSON Run", + "--suite-id", + "1", + "--run-refs", + long_refs, + "--json", + ] + + with mock.patch("sys.argv", ["trcli", *args]): + result = runner.invoke(root_cli, args) + + assert result.exit_code == 1 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["ok"] is False + assert "References field cannot exceed 250 characters." in payload["errors"][0] + class TestApiRequestHandlerReferences: """Test class for reference management functionality""" diff --git a/tests/test_cmd_dry_run_parsers.py b/tests/test_cmd_dry_run_parsers.py new file mode 100644 index 00000000..881a72fe --- /dev/null +++ b/tests/test_cmd_dry_run_parsers.py @@ -0,0 +1,193 @@ +from unittest.mock import MagicMock, patch +import json + +import pytest +from click.testing import CliRunner + +from trcli.cli import Environment +from trcli.commands import cmd_parse_junit, cmd_parse_robot, cmd_parse_openapi + + +def _make_environment(command_name: str) -> Environment: + environment = Environment(cmd=command_name) + environment.host = "https://test.testrail.com" + environment.username = "test@example.com" + environment.password = "password" + environment.project = "Test Project" + environment.project_id = 1 + environment.title = "Dry Run Preview" + environment.dry_run = True + return environment + + +class TestDryRunParserCommands: + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_junit.ResultsUploader") + @patch("trcli.commands.cmd_parse_junit.JunitParser") + def test_parse_junit_dry_run_skips_uploader(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_junit") + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke(cmd_parse_junit.cli, ["--file", "results.xml", "--title", "Dry Run Preview"], obj=environment) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once() + mock_uploader_class.assert_not_called() + assert "dry run: would upload junit results to testrail." in result.output.lower() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_junit.ResultsUploader") + @patch("trcli.commands.cmd_parse_junit.JunitParser") + def test_parse_junit_dry_run_json_output(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_junit") + environment.json_output = True + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke( + cmd_parse_junit.cli, + ["--file", "results.xml", "--title", "Dry Run Preview", "--json"], + obj=environment, + ) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["ok"] is True + assert payload["dry_run"] is True + assert payload["data"]["parsed"]["suites"] == 1 + mock_uploader_class.assert_not_called() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_junit.ResultsUploader") + @patch("trcli.commands.cmd_parse_junit.JunitParser") + def test_parse_junit_json_output(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_junit") + environment.dry_run = False + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_case = MagicMock() + mock_case.result = MagicMock() + mock_section = MagicMock() + mock_section.testcases = [mock_case] + mock_suite = MagicMock() + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + + mock_uploader = MagicMock() + mock_uploader.last_run_id = 555 + mock_uploader.case_update_results = {"updated_cases": [], "skipped_cases": [], "failed_cases": []} + mock_uploader_class.return_value = mock_uploader + + result = runner.invoke( + cmd_parse_junit.cli, + ["--file", "results.xml", "--title", "Dry Run Preview", "--json"], + obj=environment, + ) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["command"] == "parse_junit" + assert payload["data"]["run_id"] == 555 + assert payload["data"]["parsed"]["results"] == 1 + mock_uploader.upload_results.assert_called_once() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_robot.ResultsUploader") + @patch("trcli.commands.cmd_parse_robot.RobotParser") + def test_parse_robot_dry_run_skips_uploader(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_robot") + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke(cmd_parse_robot.cli, ["--file", "results.xml", "--title", "Dry Run Preview"], obj=environment) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once() + mock_uploader_class.assert_not_called() + assert "dry run: would upload robot framework results to testrail." in result.output.lower() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_robot.ResultsUploader") + @patch("trcli.commands.cmd_parse_robot.RobotParser") + def test_parse_robot_dry_run_json_output(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_robot") + environment.json_output = True + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke( + cmd_parse_robot.cli, + ["--file", "results.xml", "--title", "Dry Run Preview", "--json"], + obj=environment, + ) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["command"] == "parse_robot" + assert payload["dry_run"] is True + mock_uploader_class.assert_not_called() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_openapi.ResultsUploader") + @patch("trcli.commands.cmd_parse_openapi.OpenApiParser") + def test_parse_openapi_dry_run_skips_uploader(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_openapi") + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke(cmd_parse_openapi.cli, ["--file", "openapi.yml"], obj=environment) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once() + mock_uploader_class.assert_not_called() + assert "dry run: would create openapi-derived test cases in testrail." in result.output.lower() + + @pytest.mark.cli + @patch("trcli.commands.cmd_parse_openapi.ResultsUploader") + @patch("trcli.commands.cmd_parse_openapi.OpenApiParser") + def test_parse_openapi_dry_run_json_output(self, mock_parser_class, mock_uploader_class): + runner = CliRunner() + environment = _make_environment("parse_openapi") + environment.json_output = True + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = runner.invoke(cmd_parse_openapi.cli, ["--file", "openapi.yml", "--json"], obj=environment) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["command"] == "parse_openapi" + assert payload["dry_run"] is True + mock_uploader_class.assert_not_called() diff --git a/tests/test_cmd_import_gherkin.py b/tests/test_cmd_import_gherkin.py index 6b290d7f..e31b9ee3 100644 --- a/tests/test_cmd_import_gherkin.py +++ b/tests/test_cmd_import_gherkin.py @@ -85,9 +85,9 @@ def test_import_gherkin_json_output(self, mock_api_client_class, mock_api_handle assert json_start >= 0, "No JSON found in output" json_str = result.output[json_start:] output_data = json.loads(json_str) - assert "case_ids" in output_data - assert output_data["case_ids"] == [101, 102] - assert output_data["count"] == 2 + assert output_data["ok"] is True + assert output_data["data"]["case_ids"] == [101, 102] + assert output_data["data"]["count"] == 2 @pytest.mark.cmd_import_gherkin @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") @@ -324,8 +324,8 @@ def test_import_gherkin_update_mode_json_output(self, mock_api_client_class, moc import json output_data = json.loads(json_str) - assert "case_ids" in output_data - assert output_data["case_ids"] == [789] + assert output_data["ok"] is True + assert output_data["data"]["case_ids"] == [789] # Verify update_bdd was called with case_id mock_handler.update_bdd.assert_called_once_with(789, mock.ANY) @@ -394,3 +394,46 @@ def test_import_gherkin_update_mode_verbose(self, mock_api_client_class, mock_ap assert "456" in result.output # case_id in verbose log # Verify update_bdd was called with case_id mock_handler.update_bdd.assert_called_once_with(456, mock.ANY) + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_dry_run_create_skips_api_calls(self, mock_api_client_class, mock_api_handler_class): + self.environment.dry_run = True + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, ["--file", "test.feature", "--section-id", "123"], obj=self.environment + ) + + assert result.exit_code == 0 + assert "Dry run: would upload feature file to TestRail." in result.output + mock_api_client_class.assert_not_called() + mock_api_handler_class.assert_not_called() + + @pytest.mark.cmd_import_gherkin + @patch("trcli.commands.cmd_import_gherkin.ApiRequestHandler") + @patch("trcli.commands.cmd_import_gherkin.APIClient") + def test_import_gherkin_dry_run_update_json_output(self, mock_api_client_class, mock_api_handler_class): + self.environment.dry_run = True + + with self.runner.isolated_filesystem(): + with open("test.feature", "w") as f: + f.write("Feature: Test\n Scenario: Test\n") + + result = self.runner.invoke( + cmd_import_gherkin.cli, + ["--file", "test.feature", "--case-id", "456", "--update", "--json-output"], + obj=self.environment, + ) + + assert result.exit_code == 0 + output_data = json.loads(result.output) + assert output_data["dry_run"] is True + assert output_data["data"]["action"] == "update" + assert output_data["data"]["target_id"] == 456 + mock_api_client_class.assert_not_called() + mock_api_handler_class.assert_not_called() diff --git a/tests/test_cmd_init.py b/tests/test_cmd_init.py new file mode 100644 index 00000000..d028e061 --- /dev/null +++ b/tests/test_cmd_init.py @@ -0,0 +1,185 @@ +from pathlib import Path + +import pytest +import yaml + +from click.testing import CliRunner + +from trcli.cli import cli as root_cli +from trcli.commands.cmd_init import cli + + +@pytest.fixture +def runner(): + return CliRunner() + + +class TestInitCommand: + @pytest.mark.cli + def test_init_creates_config_file_interactively(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_init._resolve_project_context", return_value=(None, None)) + + user_input = "\n".join( + [ + "https://example.testrail.io", + "Demo Project", + "", + "user@example.com", + "1", + "secret-key", + "secret-key", + "n", + "n", + ] + ) + "\n" + + with runner.isolated_filesystem(): + result = runner.invoke(root_cli, ["init"], input=user_input) + + assert result.exit_code == 0 + assert "TRCLI initialization" in result.output + assert "Next step: run `trcli status` to verify the saved configuration." in result.output + + config_path = Path("config.yml") + assert config_path.exists() + + config_data = yaml.safe_load(config_path.read_text(encoding="utf-8")) + assert config_data == { + "host": "https://example.testrail.io", + "project": "Demo Project", + "username": "user@example.com", + "key": "secret-key", + } + + @pytest.mark.cli + def test_init_can_store_advanced_settings_and_suite_id(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + + class FakeProjectData: + project_id = 12 + suite_mode = 1 + error_message = "" + + class FakeApiHandler: + def get_suite_ids(self, project_id): + return ([55], "") + + mocker.patch( + "trcli.commands.cmd_init._resolve_project_context", + return_value=(FakeProjectData(), FakeApiHandler()), + ) + + with runner.isolated_filesystem(): + result = runner.invoke( + root_cli, + ["init"], + input="\n".join( + [ + "https://example.testrail.io", + "Demo Project", + "", + "user@example.com", + "1", + "secret-key", + "secret-key", + "y", + "http://proxy.example.com:8080", + "proxy-user:proxy-pass", + "localhost,127.0.0.1", + "y", + "90", + "75", + "y", + ] + ) + + "\n", + ) + + assert result.exit_code == 0 + config_data = yaml.safe_load(Path("config.yml").read_text(encoding="utf-8")) + assert config_data == { + "host": "https://example.testrail.io", + "project": "Demo Project", + "username": "user@example.com", + "key": "secret-key", + "project_id": 12, + "suite_id": 55, + "proxy": "http://proxy.example.com:8080", + "proxy_user": "proxy-user:proxy-pass", + "noproxy": "localhost,127.0.0.1", + "insecure": True, + "timeout": 90.0, + "batch_size": 75, + } + + @pytest.mark.cli + def test_init_validates_suite_id_when_project_context_is_available(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + + class FakeProjectData: + project_id = 12 + suite_mode = 3 + error_message = "" + + class FakeApiHandler: + def __init__(self): + self.suites_data_from_provider = type("SuiteData", (), {"suite_id": None})() + + def check_suite_id(self, project_id): + return (self.suites_data_from_provider.suite_id == 101, "Suite with ID '999' does not exist in TestRail.") + + mocker.patch( + "trcli.commands.cmd_init._resolve_project_context", + return_value=(FakeProjectData(), FakeApiHandler()), + ) + + with runner.isolated_filesystem(): + result = runner.invoke( + cli, + [], + input="\n".join( + [ + "https://example.testrail.io", + "Demo Project", + "", + "user@example.com", + "1", + "secret-key", + "secret-key", + "n", + "y", + "999", + "101", + ] + ) + + "\n", + ) + + assert result.exit_code == 0 + config_data = yaml.safe_load(Path("config.yml").read_text(encoding="utf-8")) + assert config_data["suite_id"] == 101 + + @pytest.mark.cli + def test_init_prompts_before_overwriting_existing_file(self, runner): + with runner.isolated_filesystem(): + Path("config.yml").write_text("host: old\n", encoding="utf-8") + + result = runner.invoke( + cli, + [], + input="n\n", + catch_exceptions=False, + ) + + assert result.exit_code == 1 + assert "Initialization cancelled. Existing config was left unchanged." in result.output + assert Path("config.yml").read_text(encoding="utf-8") == "host: old\n" + + @pytest.mark.cli + def test_init_help_uses_styled_command(self, runner): + result = runner.invoke(cli, ["--help"]) + + assert result.exit_code == 0 + assert "Usage:" in result.output + assert "--output" in result.output diff --git a/tests/test_cmd_labels.py b/tests/test_cmd_labels.py index 679440fa..1a20a768 100644 --- a/tests/test_cmd_labels.py +++ b/tests/test_cmd_labels.py @@ -82,6 +82,23 @@ def test_add_label_api_error(self, mock_project_client): assert result.exit_code == 1 mock_elog.assert_called_with("Failed to add label: API Error: Label already exists") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_dry_run_skips_api_call(self, mock_project_client): + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + self.environment.dry_run = True + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke(cmd_labels.add, ['--title', 'Test Label'], obj=self.environment) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_label.assert_not_called() + mock_log.assert_any_call("Dry run: would add a label in TestRail.") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') def test_update_label_success(self, mock_project_client): """Test successful label update""" @@ -673,6 +690,28 @@ def test_add_label_to_tests_success(self, mock_project_client): ) mock_log.assert_any_call("Successfully processed 1 test(s):") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') + def test_add_label_to_tests_dry_run_skips_api_call(self, mock_project_client): + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + mock_client_instance.project.project_id = 1 + mock_client_instance.resolve_project.return_value = None + self.environment.dry_run = True + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_labels.tests, + ['add', '--test-ids', '1,2', '--title', 'label1,label2'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_labels_to_tests.assert_not_called() + mock_log.assert_any_call("Dry run: would add label(s) to test(s).") + @mock.patch('trcli.commands.cmd_labels.ProjectBasedClient') def test_add_label_to_tests_with_csv_file(self, mock_project_client): """Test label addition to tests using CSV file""" @@ -1022,4 +1061,4 @@ def test_add_labels_to_tests_max_labels_validation(self, mock_project_client): assert result.exit_code == 1 mock_elog.assert_called_with("Error: Cannot add more than 10 labels at once. You provided 11 valid labels.") - \ No newline at end of file + diff --git a/tests/test_cmd_parse_cucumber.py b/tests/test_cmd_parse_cucumber.py index 839ab068..24f45b81 100644 --- a/tests/test_cmd_parse_cucumber.py +++ b/tests/test_cmd_parse_cucumber.py @@ -71,6 +71,83 @@ def test_parse_cucumber_workflow1_results_only( mock_parser.parse_file.assert_called_once() mock_uploader.upload_results.assert_called_once() + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_json_output( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): + mock_api_client = MagicMock() + mock_api_client_class.return_value = mock_api_client + mock_api_client_class.build_uploader_metadata.return_value = {} + + mock_api_handler = MagicMock() + mock_api_handler_class.return_value = mock_api_handler + + mock_project_data = MagicMock() + mock_project_data.project_id = 1 + mock_api_handler.get_project_data.return_value = mock_project_data + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_case = MagicMock() + mock_case.case_id = 101 + mock_case.result = MagicMock() + mock_section = MagicMock() + mock_section.testcases = [mock_case] + mock_suite = MagicMock() + mock_suite.name = "Test Suite" + mock_suite.testsections = [mock_section] + mock_parser.parse_file.return_value = [mock_suite] + + mock_uploader = MagicMock() + mock_uploader_class.return_value = mock_uploader + mock_uploader.last_run_id = 123 + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run", "--json"], + obj=self.environment, + ) + + assert result.exit_code == 0 + payload = json.loads(result.output[result.output.find("{"):]) + assert payload["ok"] is True + assert payload["command"] == "parse_cucumber" + assert payload["data"]["run_id"] == 123 + assert payload["data"]["parsed"]["results"] == 1 + + @pytest.mark.cmd_parse_cucumber + @patch("trcli.api.api_request_handler.ApiRequestHandler") + @patch("trcli.api.api_client.APIClient") + @patch("trcli.commands.cmd_parse_cucumber.ResultsUploader") + @patch("trcli.commands.cmd_parse_cucumber.CucumberParser") + def test_parse_cucumber_dry_run_skips_api_and_uploader( + self, mock_parser_class, mock_uploader_class, mock_api_client_class, mock_api_handler_class + ): + self.environment.dry_run = True + + mock_parser = MagicMock() + mock_parser_class.return_value = mock_parser + mock_suite = MagicMock() + mock_suite.testsections = [] + mock_parser.parse_file.return_value = [mock_suite] + + result = self.runner.invoke( + cmd_parse_cucumber.cli, + ["--file", self.test_cucumber_path, "--suite-id", "2", "--title", "Test Run"], + obj=self.environment, + ) + + assert result.exit_code == 0 + mock_parser.parse_file.assert_called_once_with(bdd_matching_mode=False) + mock_uploader_class.assert_not_called() + mock_api_client_class.assert_not_called() + mock_api_handler_class.assert_not_called() + assert "dry run: would upload cucumber results to testrail." in result.output.lower() + @pytest.mark.cmd_parse_cucumber @patch("trcli.api.api_request_handler.ApiRequestHandler") @patch("trcli.api.api_client.APIClient") diff --git a/tests/test_cmd_references.py b/tests/test_cmd_references.py index a04a26ec..2ec74e21 100644 --- a/tests/test_cmd_references.py +++ b/tests/test_cmd_references.py @@ -124,6 +124,26 @@ def test_add_references_api_failure(self, mock_project_client): mock_elog.assert_any_call(" ✗ Test case 1: API Error") mock_elog.assert_any_call("Failed to add references to 1 test case(s)") + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_add_references_dry_run_skips_api_calls(self, mock_project_client): + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + self.environment.dry_run = True + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['add', '--case-ids', '1,2', '--refs', 'REQ-1,REQ-2'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.add_case_references.assert_not_called() + mock_log.assert_any_call("Dry run: would add references to test case(s).") + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') def test_update_references_success(self, mock_project_client): """Test successful update of references on test cases""" @@ -210,6 +230,26 @@ def test_delete_references_specific_success(self, mock_project_client): mock_log.assert_any_call("References to delete: REQ-1, REQ-2") mock_log.assert_any_call("Successfully deleted references from 1 test case(s)") + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') + def test_delete_references_dry_run_skips_api_calls(self, mock_project_client): + mock_client_instance = MagicMock() + mock_project_client.return_value = mock_client_instance + self.environment.dry_run = True + + with patch.object(self.environment, 'log') as mock_log, \ + patch.object(self.environment, 'set_parameters'), \ + patch.object(self.environment, 'check_for_required_parameters'): + + result = self.runner.invoke( + cmd_references.cases, + ['delete', '--case-ids', '1', '--refs', 'REQ-1', '--yes'], + obj=self.environment + ) + + assert result.exit_code == 0 + mock_client_instance.api_request_handler.delete_case_references.assert_not_called() + mock_log.assert_any_call("Dry run: would delete references from test case(s).") + @mock.patch('trcli.commands.cmd_references.ProjectBasedClient') def test_delete_references_empty_specific_refs(self, mock_project_client): """Test deletion with empty specific references""" diff --git a/tests/test_cmd_status.py b/tests/test_cmd_status.py new file mode 100644 index 00000000..8fc6ee0a --- /dev/null +++ b/tests/test_cmd_status.py @@ -0,0 +1,171 @@ +import pytest +import json +from unittest.mock import MagicMock + +from click.testing import CliRunner + +from trcli.cli import cli + + +@pytest.fixture +def runner(): + return CliRunner() + + +class TestStatusCommand: + @pytest.mark.cli + def test_status_suppresses_global_update_banner(self, runner, mocker): + mocker.patch( + "trcli.cli.check_for_updates", + return_value="\n A new version of TestRail CLI is available!\n Current: 1.13.3 | Latest: 1.14.0\n", + ) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + + result = runner.invoke(cli, ["status"]) + + assert result.exit_code == 0 + assert result.output.count("A new version of TestRail CLI is available!") == 0 + assert "Version:" in result.output + + @pytest.mark.cli + def test_status_reports_partial_without_configuration(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + + with runner.isolated_filesystem(): + result = runner.invoke(cli, ["status"]) + + assert result.exit_code == 0 + assert "TRCLI Status: Partial" in result.output + assert "Host: not configured" in result.output + assert "Auth: not configured" in result.output + assert "Project: not configured" in result.output + + @pytest.mark.cli + def test_status_reports_error_for_invalid_host(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + + result = runner.invoke( + cli, + ["--host", "fake_host.com/", "--username", "user@example.com", "--key", "secret", "status"], + ) + + assert result.exit_code == 1 + assert "TRCLI Status: Error" in result.output + assert "Host is invalid." in result.output + + @pytest.mark.cli + def test_status_reports_ready_with_valid_project(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + mock_api_client = MagicMock() + mock_api_client.send_get.return_value = MagicMock(status_code=200, error_message="", response_text=[]) + mocker.patch("trcli.commands.cmd_status._build_api_client", return_value=mock_api_client) + mock_api_handler = MagicMock() + mock_api_handler.get_project_data.return_value = MagicMock(project_id=12) + mock_api_handler.check_suite_id.return_value = (True, "") + mocker.patch("trcli.commands.cmd_status.ApiRequestHandler", return_value=mock_api_handler) + + result = runner.invoke( + cli, + [ + "--host", + "https://example.testrail.io", + "--project", + "My Project", + "--username", + "user@example.com", + "--key", + "secret", + "status", + ], + ) + + assert result.exit_code == 0 + assert "TRCLI Status: Ready" in result.output + assert "Reachable: yes" in result.output + assert "Authentication: valid" in result.output + assert "Project Check: valid" in result.output + assert "Project ID: 12" in result.output + + @pytest.mark.cli + def test_status_verbose_shows_parameter_sources(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + mock_api_client = MagicMock() + mock_api_client.send_get.return_value = MagicMock(status_code=200, error_message="", response_text=[]) + mocker.patch("trcli.commands.cmd_status._build_api_client", return_value=mock_api_client) + mock_api_handler = MagicMock() + mock_api_handler.get_project_data.return_value = MagicMock(project_id=12) + mock_api_handler.check_suite_id.return_value = (True, "") + mocker.patch("trcli.commands.cmd_status.ApiRequestHandler", return_value=mock_api_handler) + + result = runner.invoke( + cli, + ["--verbose", "status"], + env={ + "TR_CLI_HOST": "https://example.testrail.io", + "TR_CLI_PROJECT": "My Project", + "TR_CLI_USERNAME": "user@example.com", + "TR_CLI_KEY": "secret", + }, + ) + + assert result.exit_code == 0 + assert "Verbose:" in result.output + assert "Resolved parameter sources:" in result.output + assert "host: environment variable" in result.output + assert "project: environment variable" in result.output + + @pytest.mark.cli + def test_status_json_output(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + mock_api_client = MagicMock() + mock_api_client.send_get.return_value = MagicMock(status_code=200, error_message="", response_text=[]) + mocker.patch("trcli.commands.cmd_status._build_api_client", return_value=mock_api_client) + mock_api_handler = MagicMock() + mock_api_handler.get_project_data.return_value = MagicMock(project_id=12) + mock_api_handler.check_suite_id.return_value = (True, "") + mocker.patch("trcli.commands.cmd_status.ApiRequestHandler", return_value=mock_api_handler) + + result = runner.invoke( + cli, + [ + "--host", + "https://example.testrail.io", + "--project", + "My Project", + "--username", + "user@example.com", + "--key", + "secret", + "status", + "--json", + ], + ) + + assert result.exit_code == 0 + payload = json.loads(result.output) + assert payload["ok"] is True + assert payload["command"] == "status" + assert payload["data"]["verdict"] == "Ready" + assert payload["data"]["connection"]["reachable"] == "yes" + assert payload["data"]["context"]["project_check"] == "valid" + + @pytest.mark.cli + def test_status_json_output_error(self, runner, mocker): + mocker.patch("trcli.cli.check_for_updates", return_value=None) + mocker.patch("trcli.commands.cmd_status._query_pypi", return_value="1.14.0") + + result = runner.invoke( + cli, + ["--host", "fake_host.com/", "--username", "user@example.com", "--key", "secret", "status", "--json"], + ) + + assert result.exit_code == 1 + payload = json.loads(result.output) + assert payload["ok"] is False + assert payload["data"]["verdict"] == "Error" + assert "Host is invalid." in payload["errors"] diff --git a/tests/test_data/cli_test_data.py b/tests/test_data/cli_test_data.py index 6cb09ea6..10b87d3c 100644 --- a/tests/test_data/cli_test_data.py +++ b/tests/test_data/cli_test_data.py @@ -64,6 +64,7 @@ trcli_description = ( "Supported and loaded modules:\n" + " - init: Interactively create a starter config file\n" " - parse_junit: JUnit XML Files (& Similar)\n" " - parse_cucumber: Cucumber JSON results (BDD)\n" " - import_gherkin: Upload .feature files to TestRail BDD\n" diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index 3f26c16d..adb13d57 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -54,6 +54,7 @@ def __init__( proxy_user: str = None, noproxy: str = None, uploader_metadata: str = None, + dry_run: bool = False, ): self.username = "" self.password = "" @@ -68,6 +69,7 @@ def __init__( self.proxy_user = proxy_user self.noproxy = noproxy.split(",") if noproxy else [] self.uploader_metadata = uploader_metadata + self.dry_run = dry_run if not host_name.endswith("/"): host_name = host_name + "/" @@ -95,8 +97,25 @@ def send_post( * timeout occurred * connection error occurred """ + if self.dry_run: + return self.__build_dry_run_response(uri, payload, files) return self.__send_request("POST", uri, payload, files, as_form_data) + def __build_dry_run_response( + self, uri: str, payload: dict = None, files: Dict[str, Path] = None + ) -> APIClientResult: + payload_preview = { + "uri": uri, + "payload": payload or {}, + "has_files": bool(files), + "dry_run": True, + } + self.logging_function(f"Dry run: skipping POST {uri}") + self.verbose_logging_function( + APIClient.format_dry_run_for_vlog(method="POST", url=self.__url + uri, payload=payload, files=files) + ) + return APIClientResult(status_code=200, response_text=payload_preview, error_message="") + def __send_request( self, method: str, uri: str, payload: dict, files: Dict[str, Path] = None, as_form_data: bool = False ) -> APIClientResult: @@ -335,3 +354,11 @@ def format_request_for_vlog(method: str, url: str, payload: dict, headers: dict @staticmethod def format_response_for_vlog(status_code, body): return f"response status code: {status_code}\nresponse body: {body}\n****" + + @staticmethod + def format_dry_run_for_vlog(method: str, url: str, payload: dict, files: Dict[str, Path] = None): + file_summary = "yes" if files else "no" + return ( + APIClient.format_request_for_vlog(method=method, url=url, payload=payload) + + f"response status code: 200\nresponse body: {{'dry_run': True, 'has_files': {file_summary == 'yes'}}}\n****" + ) diff --git a/trcli/api/project_based_client.py b/trcli/api/project_based_client.py index fbfdfbcd..c468a0cd 100644 --- a/trcli/api/project_based_client.py +++ b/trcli/api/project_based_client.py @@ -50,6 +50,7 @@ def instantiate_api_client(self) -> APIClient: "proxy_user": proxy_user, "noproxy": noproxy, "uploader_metadata": uploader_metadata, + "dry_run": bool(getattr(self.environment, "dry_run", False)), } if self.environment.timeout: @@ -222,17 +223,24 @@ def create_or_update_test_run(self) -> Tuple[int, str]: refs=self.environment.run_refs, refs_action=getattr(self.environment, "run_refs_action", "add"), ) + is_dry_run = bool(getattr(self.environment, "dry_run", False)) if self.environment.auto_close_run: - self.environment.log("Closing run. ", new_line=False) - close_run, error_message = self.api_request_handler.close_run(run_id) - if close_run: - self.environment.log("Run closed successfully.") + if not self.environment.run_id and is_dry_run: + self.environment.log("Dry run: would close the newly created run.") else: - self.environment.elog(f"Failed to close run: {error_message}") + self.environment.log("Closing run. ", new_line=False) + close_run, error_message = self.api_request_handler.close_run(run_id) + if close_run: + self.environment.log("Run closed successfully.") + else: + self.environment.elog(f"Failed to close run: {error_message}") if error_message: self.environment.elog("\n" + error_message) else: - self.environment.log(f"Test run: {self.environment.host.rstrip('/')}/index.php?/runs/view/{run_id}") + if is_dry_run and not self.environment.run_id: + self.environment.log("Dry run: test run creation preview completed.") + else: + self.environment.log(f"Test run: {self.environment.host.rstrip('/')}/index.php?/runs/view/{run_id}") return run_id, error_message def prompt_user_and_add_items( diff --git a/trcli/api/run_handler.py b/trcli/api/run_handler.py index 94478cec..541eee9c 100644 --- a/trcli/api/run_handler.py +++ b/trcli/api/run_handler.py @@ -99,6 +99,9 @@ def add_run( return None, error_msg if not plan_id: + if self.client.dry_run: + self.environment.log(f"Dry run: would create test run in project {project_id}.") + return 0, "" response = self.client.send_post(f"add_run/{project_id}", add_run_data) if response.error_message: return None, response.error_message @@ -114,6 +117,9 @@ def add_run( } else: entry_data = add_run_data + if self.client.dry_run: + self.environment.log(f"Dry run: would add a run entry to plan {plan_id}.") + return 0, "" response = self.client.send_post(f"add_plan_entry/{plan_id}", entry_data) if response.error_message: return None, response.error_message @@ -180,6 +186,13 @@ def update_run( plan_id = run_response.response_text["plan_id"] config_ids = run_response.response_text["config_ids"] + if self.client.dry_run: + preview_run = dict(run_response.response_text) + preview_run.update(add_run_data) + preview_run["id"] = run_id + preview_run["dry_run"] = True + self.environment.log(f"Dry run: would update test run {run_id}.") + return preview_run, "" if not plan_id: update_response = self.client.send_post(f"update_run/{run_id}", add_run_data) elif plan_id and config_ids: @@ -293,6 +306,9 @@ def close_run(self, run_id: int) -> Tuple[dict, str]: :returns: Tuple with dict created resources and error string. """ body = {"run_id": run_id} + if self.client.dry_run: + self.environment.log(f"Dry run: would close run {run_id}.") + return {"id": run_id, "dry_run": True}, "" response = self.client.send_post(f"close_run/{run_id}", body) return response.response_text, response.error_message @@ -303,5 +319,8 @@ def delete_run(self, run_id: int) -> Tuple[dict, str]: :param run_id: run id :returns: Tuple with dict created resources and error string. """ + if self.client.dry_run: + self.environment.log(f"Dry run: would delete run {run_id}.") + return {"id": run_id, "dry_run": True}, "" response = self.client.send_post(f"delete_run/{run_id}", payload={}) return response.response_text, response.error_message diff --git a/trcli/cli.py b/trcli/cli.py index 716ed8d2..0f018608 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -1,3 +1,4 @@ +import json import os import sys from beartype.typing import List, Union @@ -12,6 +13,7 @@ from trcli.constants import ( FAULT_MAPPING, + HELP_EPILOG, MISSING_COMMAND_SLOGAN, TOOL_USAGE, TOOL_VERSION, @@ -19,6 +21,7 @@ ) from trcli.data_classes.data_parsers import FieldsParser from trcli.settings import DEFAULT_API_CALL_TIMEOUT, DEFAULT_BATCH_SIZE +from trcli.cli_styles import StyledCommand, StyledGroup, StyledHelpMixin, style_text # Import structured logging infrastructure from trcli.logging import get_logger @@ -86,6 +89,7 @@ def __init__(self, cmd="parse_junit"): self.noproxy = None self.proxy_user = None self.parallel_pagination = None + self.dry_run = None # Structured logger - lazy initialization self._logger = None @@ -101,6 +105,10 @@ def logger(self): self._logger = get_logger(f"trcli.{self.cmd}") return self._logger + @property + def wants_json_output(self) -> bool: + return bool(getattr(self, "json_output", False)) + @property def case_fields(self): return self._case_fields @@ -133,7 +141,8 @@ def log(self, msg: str, new_line=True, *args): if not self.silent: if args: msg %= args - click.echo(msg, file=sys.stdout, nl=new_line) + output_stream = sys.stderr if self.wants_json_output else sys.stdout + click.echo(msg, file=output_stream, nl=new_line) # Also log to structured logger (backward compatible) try: @@ -150,7 +159,8 @@ def vlog(self, msg: str, *args): if self.verbose: if args: msg %= args - click.echo(msg, file=sys.stdout) + output_stream = sys.stderr if self.wants_json_output else sys.stdout + click.echo(msg, file=output_stream) # Also log to structured logger try: @@ -177,6 +187,9 @@ def elog(msg: str, new_line=True, *args): # Silently fail if structured logging has issues pass + def emit_json(self, payload: dict, *, new_line: bool = True): + click.echo(json.dumps(payload, indent=2), file=sys.stdout, nl=new_line) + def get_progress_bar(self, results_amount: int, prefix: str): disabled = True if self.silent else False return tqdm( @@ -251,17 +264,19 @@ def check_for_required_parameters(self): def parse_config_file(self, context: click.Context): """Sets config file path from context and information if default or custom config file should be used.""" executable_folder = Path(sys.argv[0]).parent + current_folder = Path.cwd() if context.params["config"]: self.config = context.params["config"] self.default_config_file = False else: - if Path(executable_folder / "config.yml").is_file(): - self.config = executable_folder / "config.yml" - elif Path(executable_folder / "config.yaml").is_file(): - self.config = executable_folder / "config.yaml" - else: - self.config = None + config_candidates = [ + current_folder / "config.yml", + current_folder / "config.yaml", + executable_folder / "config.yml", + executable_folder / "config.yaml", + ] + self.config = next((candidate for candidate in config_candidates if candidate.is_file()), None) if self.config: self.parse_params_from_config_file(self.config) @@ -292,19 +307,24 @@ def parse_params_from_config_file(self, file_path: Path): pass_environment = click.make_pass_decorator(Environment, ensure=True) -class TRCLI(click.MultiCommand): +class TRCLI(StyledHelpMixin, click.MultiCommand): + command_class = StyledCommand + group_class = StyledGroup + def __init__(self, *args, **kwargs): + self._skip_update_notice = "status" in sys.argv[1:] # Use invoke_without_command=True to be able to print # short tool description when starting without parameters - print(TOOL_VERSION) + print(style_text(TOOL_VERSION, "accent", bold=True)) # Check for updates (non-blocking) - try: - update_message = check_for_updates(__version__) - if update_message: - click.secho(update_message, fg="yellow", err=True) - except Exception: - pass + if not self._skip_update_notice: + try: + update_message = check_for_updates(__version__) + if update_message: + click.echo(style_text(update_message, "warn", bold=True), err=True) + except Exception: + pass click.MultiCommand.__init__(self, invoke_without_command=True, *args, **kwargs) @@ -329,7 +349,7 @@ def main(self, *args, **kwargs): return super().main(windows_expand_args=False, *args, **kwargs) -@click.command(cls=TRCLI, context_settings=CONTEXT_SETTINGS) +@click.command(cls=TRCLI, context_settings=CONTEXT_SETTINGS, epilog=HELP_EPILOG) @click.pass_context @pass_environment @click.option( @@ -412,6 +432,11 @@ def main(self, *args, **kwargs): @click.option( "--parallel-pagination", is_flag=True, help="Enable parallel pagination for faster case fetching (experimental)." ) +@click.option( + "--dry-run", + is_flag=True, + help="Preview write operations without sending mutating requests to TestRail.", +) def cli(environment: Environment, context: click.core.Context, *args, **kwargs): """TestRail CLI""" if not sys.argv[1:]: diff --git a/trcli/cli_styles.py b/trcli/cli_styles.py new file mode 100644 index 00000000..0fd69301 --- /dev/null +++ b/trcli/cli_styles.py @@ -0,0 +1,171 @@ +import inspect +import os +import re +import sys + +import click +from click import _compat + + +LOBSTER_PALETTE = { + "accent": "33", + "accent_bright": "93", + "accent_dim": "33", + "info": "93", + "success": "32", + "warn": "33", + "error": "31", + "muted": "37", +} + + +ANSI_ESCAPE_RE = re.compile(r"\x1b\[[0-9;]*m") + + +def strip_ansi(text: str) -> str: + return ANSI_ESCAPE_RE.sub("", text) + + +def should_color(ctx: click.Context | None) -> bool: + if os.getenv("NO_COLOR"): + return False + if ctx is None: + return False + return not _compat.should_strip_ansi(sys.stdout, getattr(ctx, "color", None)) + + +def style_text( + text: str, + color_name: str, + *, + ctx: click.Context | None = None, + bold: bool = False, + dim: bool = False, +) -> str: + if not should_color(ctx): + return text + + codes = [LOBSTER_PALETTE[color_name]] + if dim: + codes.append("2") + return f"\033[{';'.join(codes)}m{text}\033[0m" + + +class StyledHelpMixin: + command_class = None + group_class = None + + @staticmethod + def _style_usage_piece(ctx: click.Context, piece: str) -> str: + if not piece: + return piece + if piece.startswith("-") or piece.startswith("[") or piece.startswith("<"): + return style_text(piece, "muted", ctx=ctx, bold=True) + if piece.isupper(): + return style_text(piece, "muted", ctx=ctx, bold=True) + return style_text(piece, "accent_bright", ctx=ctx, bold=True) + + def collect_usage_pieces(self, ctx: click.Context) -> list[str]: + return [self._style_usage_piece(ctx, piece) for piece in super().collect_usage_pieces(ctx)] + + def format_usage(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + pieces = self.collect_usage_pieces(ctx) + formatter.write_usage( + style_text(ctx.command_path, "accent_bright", ctx=ctx, bold=True), + " ".join(pieces), + prefix=style_text("Usage: ", "accent", ctx=ctx, bold=True), + ) + + def format_help_text(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + if self.help: + formatter.write_paragraph() + formatter.write_text(style_text(self.help, "muted", ctx=ctx)) + formatter.write_paragraph() + + def format_options(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + records = [] + for param in self.get_params(ctx): + record = param.get_help_record(ctx) + if record is None: + continue + option, help_text = record + records.append( + ( + style_text(option, "accent_bright", ctx=ctx, bold=True), + style_text(help_text, "muted", ctx=ctx), + ) + ) + + if records: + with formatter.section(style_text("Options", "accent", ctx=ctx, bold=True)): + formatter.write_dl(records) + + if isinstance(self, click.MultiCommand): + self.format_commands(ctx, formatter) + + def format_commands(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + commands = [] + for subcommand in self.list_commands(ctx): + cmd = self.get_command(ctx, subcommand) + if cmd is None or cmd.hidden: + continue + commands.append( + ( + style_text(subcommand, "accent_bright", ctx=ctx, bold=True), + style_text(cmd.get_short_help_str(), "muted", ctx=ctx), + ) + ) + + if commands: + with formatter.section(style_text("Commands", "accent", ctx=ctx, bold=True)): + formatter.write_dl(commands) + + def format_epilog(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: + if not self.epilog: + return + + formatter.write_paragraph() + lines = inspect.cleandoc(self.epilog).splitlines() + in_examples = False + + for line in lines: + stripped = line.strip() + if not stripped: + formatter.write("\n") + continue + + if stripped == "Examples:": + in_examples = True + formatter.write(f"{style_text('Examples:', 'accent', ctx=ctx, bold=True)}\n") + continue + + if stripped.startswith("Docs:"): + in_examples = False + label, url = stripped.split(":", 1) + formatter.write("\n") + formatter.write( + f"{style_text(label + ':', 'muted', ctx=ctx)} {style_text(url.strip(), 'accent_bright', ctx=ctx)}\n" + ) + continue + + leading_spaces = len(line) - len(line.lstrip(" ")) + indent = " " * leading_spaces + + if in_examples: + color = "accent_bright" if leading_spaces <= 2 else "muted" + formatter.write(f"{indent}{style_text(stripped, color, ctx=ctx)}\n") + continue + + formatter.write(f"{indent}{style_text(stripped, 'muted', ctx=ctx)}\n") + + +class StyledCommand(StyledHelpMixin, click.Command): + pass + + +class StyledGroup(StyledHelpMixin, click.Group): + command_class = StyledCommand + group_class = None + + +StyledGroup.group_class = StyledGroup diff --git a/trcli/commands/cmd_add_run.py b/trcli/commands/cmd_add_run.py index 7e84073d..721cc175 100644 --- a/trcli/commands/cmd_add_run.py +++ b/trcli/commands/cmd_add_run.py @@ -3,6 +3,8 @@ from trcli.api.project_based_client import ProjectBasedClient from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment +from trcli.cli_styles import StyledCommand +from trcli.commands.results_parser_helpers import build_command_json, json_output_option from trcli.data_classes.dataclass_testrail import TestRailSuite @@ -42,7 +44,41 @@ def write_run_to_file(environment: Environment, run_id: int): environment.log("Done.") -@click.command(context_settings=CONTEXT_SETTINGS) +def _emit_add_run_json(environment: Environment, *, ok: bool, action: str, run_id: int = None, + errors: list[str] = None, file_written: bool = False): + effective_run_id = environment.run_id if environment.dry_run and environment.run_id else run_id + if environment.dry_run and not environment.run_id: + effective_run_id = None + environment.emit_json( + build_command_json( + "add_run", + ok=ok, + dry_run=bool(environment.dry_run), + data={ + "action": action, + "run_id": effective_run_id, + "title": environment.title, + "project": environment.project, + "project_id": environment.project_id, + "suite_id": environment.suite_id, + "description": environment.run_description, + "milestone_id": environment.milestone_id, + "assigned_to_id": environment.run_assigned_to_id, + "include_all": bool(environment.run_include_all), + "case_ids": environment.run_case_ids, + "refs": environment.run_refs, + "refs_action": getattr(environment, "run_refs_action", "add"), + "auto_close_run": bool(environment.auto_close_run), + "file": environment.file, + "file_written": file_written, + }, + errors=errors, + ) + ) + + +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) +@json_output_option @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.") @click.option( "--run-id", @@ -121,13 +157,25 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.cmd = "add_run" environment.set_parameters(context) environment.check_for_required_parameters() + action = "update" if environment.run_id else "create" if environment.run_refs and len(environment.run_refs) > 250: - environment.elog("Error: References field cannot exceed 250 characters.") + error_message = "Error: References field cannot exceed 250 characters." + if environment.wants_json_output: + _emit_add_run_json(environment, ok=False, action=action, errors=[error_message]) + else: + environment.elog(error_message) exit(1) if environment.run_refs_action and environment.run_refs_action != 'add' and not environment.run_id: - environment.elog("Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run (--run-id required).") + error_message = ( + "Error: --run-refs-action 'update' and 'delete' can only be used when updating an existing run " + "(--run-id required)." + ) + if environment.wants_json_output: + _emit_add_run_json(environment, ok=False, action=action, errors=[error_message]) + else: + environment.elog(error_message) exit(1) if environment.run_refs_action == 'delete' and not environment.run_refs and environment.run_id: @@ -143,10 +191,28 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): project_client.resolve_suite() run_id, error_message = project_client.create_or_update_test_run() if error_message: + if environment.wants_json_output: + _emit_add_run_json(environment, ok=False, action=action, run_id=run_id, errors=[error_message]) exit(1) - environment.run_id = run_id - environment.log(f"title: {environment.title}") - environment.log(f"run_id: {run_id}") - if environment.file is not None: + if environment.dry_run: + if environment.wants_json_output: + _emit_add_run_json(environment, ok=True, action=action, run_id=run_id) + return + environment.log("Dry run: no TestRail changes were made.") + environment.log(f"title: {environment.title}") + if environment.run_id: + environment.log(f"run_id: {environment.run_id}") + else: + environment.log("run_id: ") + else: + environment.run_id = run_id + if not environment.wants_json_output: + environment.log(f"title: {environment.title}") + environment.log(f"run_id: {run_id}") + file_written = False + if environment.file is not None and not environment.dry_run: write_run_to_file(environment, run_id) + file_written = True + if environment.wants_json_output: + _emit_add_run_json(environment, ok=True, action=action, run_id=run_id, file_written=file_written) diff --git a/trcli/commands/cmd_export_gherkin.py b/trcli/commands/cmd_export_gherkin.py index cc4941f2..edad0959 100644 --- a/trcli/commands/cmd_export_gherkin.py +++ b/trcli/commands/cmd_export_gherkin.py @@ -2,6 +2,7 @@ from pathlib import Path from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.cli_styles import StyledCommand from trcli.constants import FAULT_MAPPING from trcli.api.api_client import APIClient from trcli.api.api_request_handler import ApiRequestHandler @@ -9,7 +10,7 @@ import trcli -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @click.option( "--case-id", type=click.IntRange(min=1), @@ -67,6 +68,7 @@ def cli(environment: Environment, context: click.Context, case_id: int, output: verbose_logging_function=environment.vlog, logging_function=environment.log, uploader_metadata=uploader_metadata, + dry_run=bool(getattr(environment, "dry_run", False)), ) # Set credentials after initialization diff --git a/trcli/commands/cmd_import_gherkin.py b/trcli/commands/cmd_import_gherkin.py index d46aaf0d..2f0bd371 100644 --- a/trcli/commands/cmd_import_gherkin.py +++ b/trcli/commands/cmd_import_gherkin.py @@ -2,6 +2,8 @@ from pathlib import Path from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.cli_styles import StyledCommand +from trcli.commands.results_parser_helpers import build_command_json from trcli.constants import FAULT_MAPPING from trcli.api.api_client import APIClient from trcli.api.api_request_handler import ApiRequestHandler @@ -9,7 +11,7 @@ import trcli -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @click.option( "-f", "--file", @@ -32,7 +34,7 @@ required=False, help="TestRail case ID to update (required with --update flag).", ) -@click.option("--json-output", is_flag=True, help="Output case IDs in JSON format.") +@click.option("--json-output", "--json", "json_output", is_flag=True, help="Output structured results in JSON format.") @click.option("--update", is_flag=True, help="Update existing BDD test case instead of creating new one.") @click.pass_context @pass_environment @@ -108,6 +110,29 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: environment.vlog(f"Target {id_type}: {target_id}") environment.vlog(f"API endpoint: POST /api/v2/{endpoint_name}/{target_id}") + if environment.dry_run: + if json_output: + environment.emit_json( + build_command_json( + "import_gherkin", + dry_run=True, + data={ + "action": "update" if update_mode else "create", + "target_id": target_id, + "target_type": id_type, + "feature_file": str(feature_path), + "feature_size": len(feature_content), + }, + ) + ) + else: + action = "update" if update_mode else "upload" + environment.log(f"Dry run: would {action} feature file to TestRail.") + environment.log(f" Target {id_type}: {target_id}") + environment.log(f" Feature file: {feature_path}") + environment.log(f" Feature size: {len(feature_content)} characters") + return + # Initialize API client environment.log("Connecting to TestRail...") @@ -119,6 +144,7 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: verbose_logging_function=environment.vlog, logging_function=environment.log, uploader_metadata=uploader_metadata, + dry_run=bool(getattr(environment, "dry_run", False)), ) # Set credentials after initialization @@ -159,9 +185,19 @@ def cli(environment: Environment, context: click.Context, file: str, section_id: # Display results if kwargs.get("json_output"): - import json - - print(json.dumps({"case_ids": case_ids, "count": len(case_ids)}, indent=2)) + environment.emit_json( + build_command_json( + "import_gherkin", + data={ + "action": "update" if update_mode else "create", + "target_id": target_id, + "target_type": id_type, + "feature_file": str(feature_path), + "case_ids": case_ids, + "count": len(case_ids), + }, + ) + ) else: if update_mode: environment.log(f"\nSuccessfully updated feature file!") diff --git a/trcli/commands/cmd_init.py b/trcli/commands/cmd_init.py new file mode 100644 index 00000000..ce9b5aa4 --- /dev/null +++ b/trcli/commands/cmd_init.py @@ -0,0 +1,282 @@ +from pathlib import Path + +import click +import yaml + +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.cli import CONTEXT_SETTINGS, Environment, pass_environment +from trcli.cli_styles import StyledCommand, style_text +from trcli.commands.cmd_status import _build_api_client, _validate_host +from trcli.constants import ProjectErrors, SuiteModes +from trcli.data_classes.dataclass_testrail import ProjectData, TestRailSuite +from trcli.settings import DEFAULT_API_CALL_TIMEOUT, DEFAULT_BATCH_SIZE + + +def _echo(message: str, color: str = "muted", *, bold: bool = False, err: bool = False): + ctx = click.get_current_context(silent=True) + click.echo(style_text(message, color, ctx=ctx, bold=bold), err=err) + + +def _prompt(message: str, **kwargs): + ctx = click.get_current_context(silent=True) + return click.prompt(style_text(message, "accent_bright", ctx=ctx, bold=True), **kwargs) + + +def _prompt_host() -> str: + while True: + host = _prompt("TestRail host", type=click.STRING).strip() + if _validate_host(host): + return host + _echo("Enter a full URL such as https://example.testrail.io", "error", err=True) + + +def _prompt_auth_method() -> str: + _echo("Authentication:", "accent") + _echo(" 1. API key (recommended)") + _echo(" 2. Password") + return _prompt("Select auth method", type=click.Choice(["1", "2"]), default="1", show_choices=False) + + +def _build_config_data( + host: str, + project: str, + project_id: int | None, + username: str, + auth_method: str, + secret: str, + suite_id: int | None = None, + proxy: str | None = None, + proxy_user: str | None = None, + noproxy: str | None = None, + insecure: bool = False, + timeout: float | None = None, + batch_size: int | None = None, +) -> dict: + data = { + "host": host, + "project": project, + "username": username, + } + if project_id: + data["project_id"] = project_id + if auth_method == "1": + data["key"] = secret + else: + data["password"] = secret + if suite_id: + data["suite_id"] = suite_id + if proxy: + data["proxy"] = proxy + if proxy_user: + data["proxy_user"] = proxy_user + if noproxy: + data["noproxy"] = noproxy + if insecure: + data["insecure"] = True + if timeout is not None: + data["timeout"] = timeout + if batch_size is not None: + data["batch_size"] = batch_size + return data + + +def _write_config(output_path: Path, data: dict): + header = [ + "# Generated by `trcli init`.", + "# Keep this file out of version control if it contains secrets.", + "", + ] + yaml_content = yaml.safe_dump(data, sort_keys=False, default_flow_style=False) + with open(output_path, "w", encoding="utf-8") as config_file: + config_file.write("\n".join(header)) + config_file.write(yaml_content) + + +def _configure_advanced_settings() -> dict: + ctx = click.get_current_context(silent=True) + if not click.confirm( + style_text( + "Configure advanced connection settings (proxy, SSL, timeout, batch size)?", + "accent_bright", + ctx=ctx, + bold=True, + ), + default=False, + ): + return {} + + _echo("") + _echo("Advanced settings:", "accent") + proxy = _prompt("Proxy URL", default="", show_default=False).strip() or None + proxy_user = None + noproxy = None + if proxy: + proxy_user = _prompt("Proxy credentials username:password", default="", show_default=False).strip() or None + noproxy = _prompt("No-proxy hosts (comma-separated)", default="", show_default=False).strip() or None + insecure = click.confirm( + style_text("Allow insecure SSL?", "accent_bright", ctx=ctx, bold=True), + default=False, + ) + timeout = _prompt("Request timeout in seconds", type=float, default=DEFAULT_API_CALL_TIMEOUT, show_default=True) + batch_size = _prompt("Batch size", type=click.IntRange(min=2), default=DEFAULT_BATCH_SIZE, show_default=True) + return { + "proxy": proxy, + "proxy_user": proxy_user, + "noproxy": noproxy, + "insecure": insecure, + "timeout": timeout, + "batch_size": batch_size, + } + + +def _resolve_project_context(environment: Environment) -> tuple[ProjectData | None, ApiRequestHandler | None]: + try: + api_handler = ApiRequestHandler( + environment=environment, + api_client=_build_api_client(environment), + suites_data=TestRailSuite(name="init"), + verify=False, + ) + project_data = api_handler.get_project_data(environment.project, environment.project_id) + if project_data.project_id > 0: + return project_data, api_handler + return project_data, api_handler + except Exception: + return None, None + + +def _configure_suite_id(environment: Environment) -> int | None: + project_data, api_handler = _resolve_project_context(environment) + if project_data and project_data.project_id > 0: + _echo("") + _echo(f"Project validation: OK (ID {project_data.project_id})", "success") + if not environment.project_id: + environment.project_id = project_data.project_id + + if project_data.suite_mode == SuiteModes.single_suite: + suite_ids, error = api_handler.get_suite_ids(project_data.project_id) + if not error and len(suite_ids) == 1: + ctx = click.get_current_context(silent=True) + if click.confirm( + style_text("Store the detected default Suite ID in config?", "accent_bright", ctx=ctx, bold=True), + default=False, + ): + return suite_ids[0] + return None + _echo("This project uses a single suite. Storing a Suite ID is optional.", "muted") + else: + _echo("This project can use explicit Suite IDs. A default Suite ID is optional.", "muted") + elif project_data and project_data.project_id in [ + ProjectErrors.multiple_project_same_name, + ProjectErrors.not_existing_project, + ProjectErrors.other_error, + ]: + _echo(f"Project validation skipped: {project_data.error_message}", "warn") + else: + _echo("Project validation skipped. You can still save a Suite ID manually.", "warn") + + ctx = click.get_current_context(silent=True) + if not click.confirm( + style_text("Configure a default Suite ID for this project?", "accent_bright", ctx=ctx, bold=True), + default=False, + ): + return None + + while True: + suite_id = _prompt("Default Suite ID", type=click.IntRange(min=1)) + if project_data and project_data.project_id > 0 and api_handler: + api_handler.suites_data_from_provider.suite_id = suite_id + suite_exists, error_message = api_handler.check_suite_id(project_data.project_id) + if suite_exists: + _echo(f"Suite validation: OK (ID {suite_id})", "success") + return suite_id + _echo(error_message, "error", err=True) + continue + return suite_id + + +@click.command(name="init", cls=StyledCommand, context_settings=CONTEXT_SETTINGS) +@click.option( + "--output", + type=click.Path(dir_okay=False, path_type=Path), + default=Path("config.yml"), + show_default=True, + help="Path to the generated TRCLI config file.", +) +@click.option( + "--overwrite", + is_flag=True, + help="Overwrite an existing config file without prompting.", +) +@pass_environment +def cli(environment: Environment, output: Path, overwrite: bool): + """Interactively create a starter TRCLI config file""" + environment.cmd = "init" + + _echo("TRCLI initialization", "accent", bold=True) + _echo("This wizard writes a starter config file for the current workspace.") + _echo("") + + output_path = output.expanduser().resolve() + if output_path.exists() and not overwrite: + replace = click.confirm( + style_text( + f"{output_path} already exists. Overwrite it?", + "warn", + ctx=click.get_current_context(silent=True), + ) + ) + if not replace: + _echo("Initialization cancelled. Existing config was left unchanged.", "warn") + raise click.Abort() + + host = _prompt_host() + project = _prompt("Default project name", type=click.STRING).strip() + project_id = _prompt("Project ID if needed for duplicate names", default="", show_default=False).strip() + username = _prompt("Username", type=click.STRING).strip() + auth_method = _prompt_auth_method() + secret_label = "API key" if auth_method == "1" else "Password" + secret = _prompt(secret_label, hide_input=True, confirmation_prompt=True, type=click.STRING).strip() + environment.host = host + environment.project = project + environment.project_id = int(project_id) if project_id else None + environment.username = username + environment.password = secret if auth_method == "2" else None + environment.key = secret if auth_method == "1" else None + + advanced_settings = _configure_advanced_settings() + for key, value in advanced_settings.items(): + setattr(environment, key, value) + + suite_id = _configure_suite_id(environment) + + output_path.parent.mkdir(parents=True, exist_ok=True) + config_data = _build_config_data( + host=host, + project=project, + project_id=environment.project_id, + username=username, + auth_method=auth_method, + secret=secret, + suite_id=suite_id, + proxy=advanced_settings.get("proxy"), + proxy_user=advanced_settings.get("proxy_user"), + noproxy=advanced_settings.get("noproxy"), + insecure=advanced_settings.get("insecure", False), + timeout=advanced_settings.get("timeout"), + batch_size=advanced_settings.get("batch_size"), + ) + _write_config(output_path, config_data) + + _echo("") + _echo("Saved configuration:", "accent") + _echo(f" Path: {output_path}", "muted") + _echo(f" Host: {host}", "muted") + _echo(f" Project: {project}", "muted") + _echo(f" Auth: {'username + API key' if auth_method == '1' else 'username + password'}", "muted") + if suite_id: + _echo(f" Suite ID: {suite_id}", "muted") + if advanced_settings: + _echo(" Advanced settings: configured", "muted") + _echo("") + _echo("Next step: run `trcli status` to verify the saved configuration.", "success") diff --git a/trcli/commands/cmd_labels.py b/trcli/commands/cmd_labels.py index 7e535153..23be267f 100644 --- a/trcli/commands/cmd_labels.py +++ b/trcli/commands/cmd_labels.py @@ -2,6 +2,7 @@ from trcli.api.project_based_client import ProjectBasedClient from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment +from trcli.cli_styles import StyledGroup from trcli.data_classes.dataclass_testrail import TestRailSuite @@ -11,7 +12,7 @@ def print_config(env: Environment, action: str): f"\n> Project: {env.project if env.project else env.project_id}") -@click.group(context_settings=CONTEXT_SETTINGS) +@click.group(cls=StyledGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, *args, **kwargs): @@ -20,6 +21,12 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.set_parameters(context) +def _log_dry_run(environment: Environment, message: str, details: list[str] = None): + environment.log(message) + for detail in details or []: + environment.log(f" {detail}") + + @cli.command() @click.option("--title", required=True, metavar="", help="Title of the label to add (max 20 characters).") @click.pass_context @@ -38,6 +45,14 @@ def add(environment: Environment, context: click.Context, title: str, *args, **k suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would add a label in TestRail.", + [f"Project ID: {project_client.project.project_id}", f"Title: '{title}'"], + ) + return environment.log(f"Adding label '{title}'...") @@ -74,6 +89,14 @@ def update(environment: Environment, context: click.Context, label_id: int, titl suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would update a label in TestRail.", + [f"Project ID: {project_client.project.project_id}", f"Label ID: {label_id}", f"Title: '{title}'"], + ) + return environment.log(f"Updating label with ID {label_id}...") @@ -113,6 +136,14 @@ def delete(environment: Environment, context: click.Context, ids: str, *args, ** suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would delete label(s) in TestRail.", + [f"Label IDs: {', '.join(map(str, label_ids))}"], + ) + return environment.log(f"Deleting labels with IDs: {', '.join(map(str, label_ids))}...") @@ -258,6 +289,14 @@ def add_to_cases(environment: Environment, context: click.Context, case_ids: str suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would add a label to test case(s).", + [f"Project ID: {project_client.project.project_id}", f"Case IDs: {', '.join(map(str, case_id_list))}", f"Title: '{title}'"], + ) + return environment.log(f"Adding label '{title}' to {len(case_id_list)} test case(s)...") @@ -525,6 +564,18 @@ def add_to_tests(environment: Environment, context: click.Context, test_ids: str suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would add label(s) to test(s).", + [ + f"Project ID: {project_client.project.project_id}", + f"Test IDs: {', '.join(map(str, test_id_list))}", + f"Labels: {', '.join(title_list)}", + ], + ) + return # Log message adjusted for single/multiple labels if len(title_list) == 1: @@ -679,4 +730,4 @@ def get_test_labels(environment: Environment, context: click.Context, test_ids: environment.log(f" - ID: {label.get('id')}, Title: '{label.get('title')}'") else: environment.log(f" Labels: No labels assigned") - environment.log("") \ No newline at end of file + environment.log("") diff --git a/trcli/commands/cmd_parse_cucumber.py b/trcli/commands/cmd_parse_cucumber.py index ba1e6a7e..5a69325b 100644 --- a/trcli/commands/cmd_parse_cucumber.py +++ b/trcli/commands/cmd_parse_cucumber.py @@ -3,13 +3,19 @@ from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS -from trcli.commands.results_parser_helpers import bdd_parser_options, print_config +from trcli.cli_styles import StyledCommand +from trcli.commands.results_parser_helpers import ( + bdd_parser_options, + emit_parser_result_json, + print_config, + print_dry_run_preview, +) from trcli.constants import FAULT_MAPPING, ProjectErrors from trcli.data_classes.validation_exception import ValidationException from trcli.readers.cucumber_json import CucumberParser -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @bdd_parser_options @click.option( "-v", @@ -43,6 +49,14 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): print_config(environment) try: + created_case_ids = {} + if environment.dry_run: + parser = CucumberParser(environment) + parsed_suites = parser.parse_file(bdd_matching_mode=False) + print_dry_run_preview(environment, parsed_suites, "upload Cucumber results to TestRail") + environment.log(" Note: BDD matching and auto-creation checks are skipped in dry-run mode.") + return + # Setup API client and handler (needed for both modes) from trcli.api.api_request_handler import ApiRequestHandler from trcli.api.api_client import APIClient @@ -56,6 +70,7 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): verbose_logging_function=environment.vlog, logging_function=environment.log, uploader_metadata=uploader_metadata, + dry_run=bool(getattr(environment, "dry_run", False)), ) # Set credentials @@ -264,9 +279,24 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): # Summary if run_id: - environment.log(f"Results uploaded successfully to run ID: {run_id}") + if environment.wants_json_output: + emit_parser_result_json( + environment, + parsed_suites=parsed_suites, + run_id=run_id, + extra_data={"auto_created_case_ids": list(created_case_ids.values())}, + ) + else: + environment.log(f"Results uploaded successfully to run ID: {run_id}") else: - environment.log("Results processing completed") + if environment.wants_json_output: + emit_parser_result_json( + environment, + parsed_suites=parsed_suites, + extra_data={"auto_created_case_ids": list(created_case_ids.values())}, + ) + else: + environment.log("Results processing completed") except FileNotFoundError as e: environment.elog(str(e)) diff --git a/trcli/commands/cmd_parse_junit.py b/trcli/commands/cmd_parse_junit.py index 2c95beb2..7da87ef3 100644 --- a/trcli/commands/cmd_parse_junit.py +++ b/trcli/commands/cmd_parse_junit.py @@ -6,13 +6,19 @@ from trcli import settings from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS -from trcli.commands.results_parser_helpers import results_parser_options, print_config +from trcli.cli_styles import StyledCommand +from trcli.commands.results_parser_helpers import ( + emit_parser_result_json, + results_parser_options, + print_config, + print_dry_run_preview, +) from trcli.constants import FAULT_MAPPING from trcli.data_classes.validation_exception import ValidationException from trcli.readers.junit_xml import JunitParser -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @results_parser_options @click.option( "--special-parser", @@ -33,7 +39,6 @@ metavar="", help="Comma-separated list of reference IDs to append to the test run (up to 250 characters total).", ) -@click.option("--json-output", is_flag=True, help="Output reference operation results in JSON format.") @click.option( "--update-existing-cases", type=click.Choice(["yes", "no"], case_sensitive=False), @@ -66,8 +71,12 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): print_config(environment) try: parsed_suites = JunitParser(environment).parse_file() + if environment.dry_run: + print_dry_run_preview(environment, parsed_suites, "upload JUnit results to TestRail") + return run_id = None case_update_results = {} + test_run_ref_result = None for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() @@ -80,15 +89,41 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): case_update_results = result_uploader.case_update_results if environment.test_run_ref and run_id: - _handle_test_run_references(environment, run_id) + test_run_ref_result = _handle_test_run_references(environment, run_id) # Handle case update reporting if enabled if environment.update_existing_cases == "yes" and case_update_results is not None: - _handle_case_update_reporting(environment, case_update_results) + case_update_summary = _handle_case_update_reporting(environment, case_update_results) # Exit with error if there were case update failures (after reporting) if case_update_results.get("failed_cases"): + if environment.wants_json_output: + emit_parser_result_json( + environment, + parsed_suites=parsed_suites, + run_id=run_id, + ok=False, + errors=["One or more case updates failed."], + extra_data={ + "test_run_references": test_run_ref_result, + "case_updates": case_update_summary, + }, + ) exit(1) + else: + case_update_summary = None + + if environment.wants_json_output: + emit_parser_result_json( + environment, + parsed_suites=parsed_suites, + run_id=run_id, + extra_data={ + "special_parser": environment.special_parser, + "test_run_references": test_run_ref_result, + "case_updates": case_update_summary, + }, + ) except FileNotFoundError as e: environment.elog(str(e)) exit(1) @@ -130,7 +165,6 @@ def _handle_test_run_references(environment: Environment, run_id: int): """ from trcli.api.project_based_client import ProjectBasedClient from trcli.data_classes.dataclass_testrail import TestRailSuite - import json refs = [ref.strip() for ref in environment.test_run_ref.split(",") if ref.strip()] @@ -148,11 +182,14 @@ def _handle_test_run_references(environment: Environment, run_id: int): final_refs = run_data.get("refs", "") if run_data else "" - if environment.json_output: - # JSON output - result = {"run_id": run_id, "added": added_refs, "skipped": skipped_refs, "total_references": final_refs} - print(json.dumps(result, indent=2)) - else: + result = { + "run_id": run_id, + "added": added_refs, + "skipped": skipped_refs, + "total_references": final_refs, + } + + if not environment.wants_json_output: environment.log(f"References appended successfully:") environment.log(f" Run ID: {run_id}") environment.log(f" Total references: {len(final_refs.split(',')) if final_refs else 0}") @@ -162,34 +199,31 @@ def _handle_test_run_references(environment: Environment, run_id: int): ) if final_refs: environment.log(f" All references: {final_refs}") + return result def _handle_case_update_reporting(environment: Environment, case_update_results: dict): """ Handle reporting of case update results. """ - import json - # Handle None input gracefully if case_update_results is None: return - if environment.json_output: - # JSON output for case updates - result = { - "summary": { - "updated_cases": len(case_update_results.get("updated_cases", [])), - "skipped_cases": len(case_update_results.get("skipped_cases", [])), - "failed_cases": len(case_update_results.get("failed_cases", [])), - }, - "details": { - "updated_cases": case_update_results.get("updated_cases", []), - "skipped_cases": case_update_results.get("skipped_cases", []), - "failed_cases": case_update_results.get("failed_cases", []), - }, - } - print(json.dumps(result, indent=2)) - else: + result = { + "summary": { + "updated_cases": len(case_update_results.get("updated_cases", [])), + "skipped_cases": len(case_update_results.get("skipped_cases", [])), + "failed_cases": len(case_update_results.get("failed_cases", [])), + }, + "details": { + "updated_cases": case_update_results.get("updated_cases", []), + "skipped_cases": case_update_results.get("skipped_cases", []), + "failed_cases": case_update_results.get("failed_cases", []), + }, + } + + if not environment.wants_json_output: # Console output for case updates updated_cases = case_update_results.get("updated_cases", []) skipped_cases = case_update_results.get("skipped_cases", []) @@ -222,3 +256,4 @@ def _handle_case_update_reporting(environment: Environment, case_update_results: case_id = case_info["case_id"] error = case_info.get("error", "Unknown error") environment.log(f" C{case_id}: {error}") + return result diff --git a/trcli/commands/cmd_parse_openapi.py b/trcli/commands/cmd_parse_openapi.py index 3f9c4b98..5597d228 100644 --- a/trcli/commands/cmd_parse_openapi.py +++ b/trcli/commands/cmd_parse_openapi.py @@ -6,8 +6,10 @@ from trcli import settings from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS +from trcli.cli_styles import StyledCommand from trcli.constants import FAULT_MAPPING from trcli.data_classes.validation_exception import ValidationException +from trcli.commands.results_parser_helpers import emit_parser_result_json, json_output_option, print_dry_run_preview from trcli.readers.openapi_yml import OpenApiParser @@ -20,7 +22,8 @@ def print_config(env: Environment): f"\n> Auto-create entities: {env.auto_creation_response}") -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) +@json_output_option @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.") @click.option( "--suite-id", @@ -47,9 +50,18 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): print_config(environment) try: parsed_suites = OpenApiParser(environment).parse_file() + if environment.dry_run: + print_dry_run_preview(environment, parsed_suites, "create OpenAPI-derived test cases in TestRail") + return for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite, skip_run=True) result_uploader.upload_results() + if environment.wants_json_output: + emit_parser_result_json( + environment, + parsed_suites=parsed_suites, + extra_data={"skip_run": True}, + ) except FileNotFoundError: environment.elog(FAULT_MAPPING["missing_file"]) exit(1) diff --git a/trcli/commands/cmd_parse_robot.py b/trcli/commands/cmd_parse_robot.py index a09ac21b..7973ba90 100644 --- a/trcli/commands/cmd_parse_robot.py +++ b/trcli/commands/cmd_parse_robot.py @@ -5,13 +5,19 @@ from trcli import settings from trcli.api.results_uploader import ResultsUploader from trcli.cli import pass_environment, Environment, CONTEXT_SETTINGS -from trcli.commands.results_parser_helpers import results_parser_options, print_config +from trcli.cli_styles import StyledCommand +from trcli.commands.results_parser_helpers import ( + emit_parser_result_json, + results_parser_options, + print_config, + print_dry_run_preview, +) from trcli.constants import FAULT_MAPPING from trcli.data_classes.validation_exception import ValidationException from trcli.readers.robot_xml import RobotParser -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @results_parser_options @click.pass_context @pass_environment @@ -24,9 +30,14 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): print_config(environment) try: parsed_suites = RobotParser(environment).parse_file() + if environment.dry_run: + print_dry_run_preview(environment, parsed_suites, "upload Robot Framework results to TestRail") + return for suite in parsed_suites: result_uploader = ResultsUploader(environment=environment, suite=suite) result_uploader.upload_results() + if environment.wants_json_output: + emit_parser_result_json(environment, parsed_suites=parsed_suites) except FileNotFoundError as e: environment.elog(str(e)) exit(1) diff --git a/trcli/commands/cmd_references.py b/trcli/commands/cmd_references.py index 2329c255..31dd3e33 100644 --- a/trcli/commands/cmd_references.py +++ b/trcli/commands/cmd_references.py @@ -2,6 +2,7 @@ from trcli.api.project_based_client import ProjectBasedClient from trcli.cli import pass_environment, CONTEXT_SETTINGS, Environment +from trcli.cli_styles import StyledGroup from trcli.data_classes.dataclass_testrail import TestRailSuite @@ -11,7 +12,7 @@ def print_config(env: Environment, action: str): f"\n> Project: {env.project if env.project else env.project_id}") -@click.group(context_settings=CONTEXT_SETTINGS) +@click.group(cls=StyledGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @pass_environment def cli(environment: Environment, context: click.Context, *args, **kwargs): @@ -20,6 +21,12 @@ def cli(environment: Environment, context: click.Context, *args, **kwargs): environment.set_parameters(context) +def _log_dry_run(environment: Environment, message: str, details: list[str] = None): + environment.log(message) + for detail in details or []: + environment.log(f" {detail}") + + @cli.group() @click.pass_context @pass_environment @@ -62,6 +69,14 @@ def add_references(environment: Environment, context: click.Context, case_ids: s suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would add references to test case(s).", + [f"Case IDs: {', '.join(map(str, test_case_ids))}", f"References: {', '.join(references)}"], + ) + return environment.log(f"Adding references to {len(test_case_ids)} test case(s)...") environment.log(f"References: {', '.join(references)}") @@ -126,6 +141,14 @@ def update_references(environment: Environment, context: click.Context, case_ids suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + _log_dry_run( + environment, + "Dry run: would replace references on test case(s).", + [f"Case IDs: {', '.join(map(str, test_case_ids))}", f"References: {', '.join(references)}"], + ) + return environment.log(f"Updating references for {len(test_case_ids)} test case(s)...") environment.log(f"New references: {', '.join(references)}") @@ -187,6 +210,15 @@ def delete_references(environment: Environment, context: click.Context, case_ids suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_client.resolve_project() + + if environment.dry_run: + details = [f"Case IDs: {', '.join(map(str, test_case_ids))}"] + if specific_refs: + details.append(f"References: {', '.join(specific_refs)}") + else: + details.append("References: all") + _log_dry_run(environment, "Dry run: would delete references from test case(s).", details) + return if specific_refs: environment.log(f"Deleting specific references from {len(test_case_ids)} test case(s)...") diff --git a/trcli/commands/cmd_status.py b/trcli/commands/cmd_status.py new file mode 100644 index 00000000..ec0b2db6 --- /dev/null +++ b/trcli/commands/cmd_status.py @@ -0,0 +1,322 @@ +from requests.models import PreparedRequest, InvalidURL, MissingSchema + +import click + +import trcli +from click.core import ParameterSource + +from trcli.api.api_client import APIClient +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.cli import CONTEXT_SETTINGS, Environment, pass_environment +from trcli.cli_styles import StyledCommand, style_text +from trcli.commands.results_parser_helpers import build_command_json, json_output_option +from trcli.data_classes.dataclass_testrail import TestRailSuite +from trcli.version_checker import _query_pypi + + +def _build_api_client(environment: Environment) -> APIClient: + api_client = APIClient( + environment.host, + verbose_logging_function=environment.vlog, + logging_function=environment.log, + verify=not environment.insecure, + proxy=environment.proxy, + proxy_user=environment.proxy_user, + noproxy=environment.noproxy, + uploader_metadata=APIClient.build_uploader_metadata(version=trcli.__version__), + timeout=environment.timeout, + dry_run=bool(getattr(environment, "dry_run", False)), + ) + api_client.username = environment.username + api_client.password = environment.password + api_client.api_key = environment.key + return api_client + + +def _auth_mode(environment: Environment) -> str: + if environment.username and environment.key: + return "username + API key" + if environment.username and environment.password: + return "username + password" + if environment.username or environment.password or environment.key: + return "incomplete" + return "not configured" + + +def _validate_host(host: str) -> bool: + if not host: + return False + try: + request = PreparedRequest() + request.prepare_url(host, params=None) + return True + except (InvalidURL, MissingSchema): + return False + + +def _resolved_parameter_sources(environment: Environment, context: click.Context) -> dict: + resolved_sources = {} + config_override_sources = [ParameterSource.DEFAULT] if environment.default_config_file else [ + ParameterSource.DEFAULT, + ParameterSource.ENVIRONMENT, + ] + + for current_context in [context.parent, context]: + if not current_context: + continue + for param in current_context.params: + if param == "config": + continue + source = current_context.get_parameter_source(param) + if source is None: + continue + if source in config_override_sources and environment.params_from_config.get(param) is not None: + resolved_sources[param] = "config file" + elif source == ParameterSource.COMMANDLINE: + resolved_sources[param] = "command line" + elif source == ParameterSource.ENVIRONMENT: + resolved_sources[param] = "environment variable" + elif source == ParameterSource.DEFAULT: + resolved_sources[param] = "default" + + return resolved_sources + + +def _format_source_summary(source_map: dict) -> str: + ordered = [] + labels = ["command line", "environment variable", "config file", "default"] + if any(label in source_map.values() for label in labels[:-1]): + labels = labels[:-1] + for label in labels: + if label in source_map.values(): + ordered.append(label) + return " + ".join(ordered) if ordered else "default" + + +def _print_section(environment: Environment, title: str, rows: list[tuple[str, str]]): + ctx = click.get_current_context(silent=True) + environment.log(style_text(f"{title}:", "accent", ctx=ctx)) + for key, value in rows: + environment.log( + f" {style_text(key + ':', 'muted', ctx=ctx)} {style_text(value, 'muted', ctx=ctx)}" + ) + environment.log("") + + +def _render_status_heading(environment: Environment, verdict: str): + ctx = click.get_current_context(silent=True) + verdict_color = {"Ready": "success", "Partial": "warn", "Error": "error"}[verdict] + environment.log( + f"{style_text('TRCLI Status:', 'accent', ctx=ctx)} {style_text(verdict, verdict_color, ctx=ctx)}" + ) + environment.log("") + + +def _render_message_block(environment: Environment, title: str, items: list[str], color: str): + if not items: + return + + ctx = click.get_current_context(silent=True) + environment.log(style_text(f"{title}:", color, ctx=ctx)) + for item in items: + environment.log(f" {style_text('-', color, ctx=ctx)} {style_text(item, 'muted', ctx=ctx)}") + environment.log("") + + +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) +@json_output_option +@click.option( + "--suite-id", + type=click.IntRange(min=1), + metavar="", + help="Optional suite ID to validate within the selected project.", +) +@click.pass_context +@pass_environment +def cli(environment: Environment, context: click.Context, suite_id: int, *args, **kwargs): + """Show TRCLI configuration and connectivity status""" + environment.cmd = "status" + environment.set_parameters(context) + + source_map = _resolved_parameter_sources(environment, context) + status_data = { + "version": { + "installed": trcli.__version__, + "latest": _query_pypi() or "unavailable", + }, + "config": { + "source": _format_source_summary(source_map), + "config_file": str(environment.config) if environment.config else "not used", + "proxy": environment.proxy or "disabled", + "proxy_user": "configured" if environment.proxy_user else "not configured", + "noproxy": environment.noproxy or "not configured", + "insecure_ssl": "yes" if environment.insecure else "no", + "timeout": str(environment.timeout), + "batch_size": str(environment.batch_size), + }, + "connection": { + "host": environment.host or "not configured", + "auth": _auth_mode(environment), + "reachable": "not checked", + "authentication": "not checked", + }, + "context": { + "project": environment.project or "not configured", + "project_id": str(environment.project_id) if environment.project_id else "not configured", + "suite_id": str(environment.suite_id) if environment.suite_id else "not configured", + "project_check": "not checked", + "suite_check": "not checked", + }, + } + + warnings = [] + errors = [] + + host_is_valid = _validate_host(environment.host) + if not environment.host: + warnings.append("Host is not configured.") + elif not host_is_valid: + errors.append("Host is invalid.") + + auth_mode = status_data["connection"]["auth"] + if auth_mode == "not configured": + warnings.append("Authentication is not configured.") + elif auth_mode == "incomplete": + errors.append("Authentication configuration is incomplete.") + + api_handler = None + project_id = None + + if host_is_valid and auth_mode not in ["not configured", "incomplete"]: + api_client = _build_api_client(environment) + connectivity = api_client.send_get("get_projects") + if connectivity.status_code != -1 and 200 <= connectivity.status_code < 300 and not connectivity.error_message: + status_data["connection"]["reachable"] = "yes" + status_data["connection"]["authentication"] = "valid" + api_handler = ApiRequestHandler( + environment=environment, + api_client=api_client, + suites_data=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + verify=False, + ) + else: + status_data["connection"]["reachable"] = "no" + status_data["connection"]["authentication"] = "failed" + message = connectivity.error_message or f"HTTP {connectivity.status_code}" + errors.append(f"Connectivity/authentication check failed: {message}") + + if environment.project: + if api_handler is None: + status_data["context"]["project_check"] = "skipped" + else: + project_data = api_handler.get_project_data(environment.project, environment.project_id) + if project_data.project_id > 0: + project_id = project_data.project_id + status_data["context"]["project_id"] = str(project_id) + status_data["context"]["project_check"] = "valid" + else: + status_data["context"]["project_check"] = "invalid" + errors.append(project_data.error_message) + else: + warnings.append("Project is not configured.") + + if environment.suite_id: + if project_id is None or api_handler is None: + status_data["context"]["suite_check"] = "skipped" + warnings.append("Suite ID is configured but could not be validated without a valid project context.") + else: + suite_exists, error_message = api_handler.check_suite_id(project_id) + if suite_exists: + status_data["context"]["suite_check"] = "valid" + else: + status_data["context"]["suite_check"] = "invalid" + errors.append(error_message) + + if errors: + verdict = "Error" + elif warnings: + verdict = "Partial" + else: + verdict = "Ready" + + if environment.wants_json_output: + data = { + "verdict": verdict, + **status_data, + } + if environment.verbose: + data["parameter_sources"] = source_map + environment.emit_json( + build_command_json( + "status", + ok=verdict != "Error", + dry_run=bool(getattr(environment, "dry_run", False)), + data=data, + warnings=warnings, + errors=errors, + ) + ) + if verdict == "Error": + exit(1) + return + + _render_status_heading(environment, verdict) + _print_section( + environment, + "Connection", + [ + ("Host", status_data["connection"]["host"]), + ("Auth", status_data["connection"]["auth"]), + ("Reachable", status_data["connection"]["reachable"]), + ("Authentication", status_data["connection"]["authentication"]), + ], + ) + _print_section( + environment, + "Context", + [ + ("Project", status_data["context"]["project"]), + ("Project ID", status_data["context"]["project_id"]), + ("Project Check", status_data["context"]["project_check"]), + ("Suite ID", status_data["context"]["suite_id"]), + ("Suite Check", status_data["context"]["suite_check"]), + ], + ) + _print_section( + environment, + "Config", + [ + ("Source", status_data["config"]["source"]), + ("Config File", status_data["config"]["config_file"]), + ("Proxy", status_data["config"]["proxy"]), + ("Proxy User", status_data["config"]["proxy_user"]), + ("No Proxy", status_data["config"]["noproxy"]), + ("Insecure SSL", status_data["config"]["insecure_ssl"]), + ("Timeout", status_data["config"]["timeout"]), + ("Batch Size", status_data["config"]["batch_size"]), + ], + ) + _print_section( + environment, + "Version", + [ + ("Installed", status_data["version"]["installed"]), + ("Latest", status_data["version"]["latest"]), + ], + ) + + _render_message_block(environment, "Warnings", warnings, "warn") + _render_message_block(environment, "Errors", errors, "error") + + if environment.verbose: + ctx = click.get_current_context(silent=True) + environment.log(style_text("Verbose:", "accent", ctx=ctx)) + environment.log(f" {style_text('Resolved parameter sources:', 'muted', ctx=ctx)}") + for key in sorted(source_map): + environment.log( + f" {style_text(key + ':', 'muted', ctx=ctx)} {style_text(source_map[key], 'muted', ctx=ctx)}" + ) + environment.log("") + + if verdict == "Error": + exit(1) diff --git a/trcli/commands/cmd_update.py b/trcli/commands/cmd_update.py index 3c8cb66c..5bb182b1 100644 --- a/trcli/commands/cmd_update.py +++ b/trcli/commands/cmd_update.py @@ -9,12 +9,13 @@ import click from trcli.cli import CONTEXT_SETTINGS +from trcli.cli_styles import StyledCommand from trcli.version_checker import _query_pypi, _compare_and_format, _save_cache from trcli import __version__ from datetime import datetime -@click.command(context_settings=CONTEXT_SETTINGS) +@click.command(cls=StyledCommand, context_settings=CONTEXT_SETTINGS) @click.option( "--check-only", is_flag=True, diff --git a/trcli/commands/results_parser_helpers.py b/trcli/commands/results_parser_helpers.py index fcb1abaf..b87cae23 100644 --- a/trcli/commands/results_parser_helpers.py +++ b/trcli/commands/results_parser_helpers.py @@ -1,4 +1,5 @@ import functools +from typing import Optional import click from click import BadParameter @@ -12,6 +13,16 @@ def print_config(env: Environment): if hasattr(env, "assign_failed_to") and env.assign_failed_to and env.assign_failed_to.strip() else "No" ) + + +def json_output_option(f): + return click.option( + "--json-output", + "--json", + "json_output", + is_flag=True, + help="Output structured results in JSON format.", + )(f) env.log( f"Parser Results Execution Parameters" f"\n> Report file: {env.file}" @@ -35,6 +46,7 @@ def resolve_comma_separated_list(ctx, param, value): def results_parser_options(f): + @json_output_option @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.") @click.option("--close-run", is_flag=True, help="Close the newly created run") @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.") @@ -114,6 +126,7 @@ def wrapper_common_options(*args, **kwargs): def bdd_parser_options(f): """Options decorator for BDD/Cucumber parsers that don't need case-matcher or section-id""" + @json_output_option @click.option("-f", "--file", type=click.Path(), metavar="", help="Filename and path.") @click.option("--close-run", is_flag=True, help="Close the newly created run") @click.option("--title", metavar="", help="Title of Test Run to be created or updated in TestRail.") @@ -162,3 +175,106 @@ def wrapper_bdd_options(*args, **kwargs): return f(*args, **kwargs) return wrapper_bdd_options + + +def summarize_parsed_suites(parsed_suites) -> dict: + sections = 0 + cases = 0 + results = 0 + + for suite in parsed_suites: + sections += len(suite.testsections) + for section in suite.testsections: + cases += len(section.testcases) + for test_case in section.testcases: + if getattr(test_case, "result", None) is not None: + results += 1 + + return { + "suites": len(parsed_suites), + "sections": sections, + "cases": cases, + "results": results, + } + + +def build_command_json( + command: str, + *, + ok: bool = True, + dry_run: bool = False, + data: Optional[dict] = None, + warnings: Optional[list[str]] = None, + errors: Optional[list[str]] = None, +) -> dict: + return { + "ok": ok, + "command": command, + "dry_run": dry_run, + "data": data or {}, + "warnings": warnings or [], + "errors": errors or [], + } + + +def emit_parser_result_json( + env: Environment, + *, + parsed_suites, + run_id=None, + warnings: Optional[list[str]] = None, + errors: Optional[list[str]] = None, + extra_data: Optional[dict] = None, + ok: bool = True, +): + payload = build_command_json( + env.cmd, + ok=ok, + dry_run=bool(getattr(env, "dry_run", False)), + data={ + "file": env.file, + "title": env.title, + "run_id": run_id, + "project": env.project, + "project_id": env.project_id, + "suite_id": env.suite_id, + "parsed": summarize_parsed_suites(parsed_suites), + **(extra_data or {}), + }, + warnings=warnings, + errors=errors, + ) + env.emit_json(payload) + + +def print_dry_run_preview(env: Environment, parsed_suites, action: str): + summary = summarize_parsed_suites(parsed_suites) + if env.wants_json_output: + env.emit_json( + build_command_json( + env.cmd, + dry_run=True, + data={ + "action": action, + "parsed": summary, + "target_run_id": env.run_id, + "title": env.title, + "close_run": bool(env.close_run), + }, + ) + ) + return + + env.log(f"Dry run: would {action}.") + env.log(f" Parsed suites: {summary['suites']}") + env.log(f" Parsed sections: {summary['sections']}") + env.log(f" Parsed cases: {summary['cases']}") + env.log(f" Parsed results: {summary['results']}") + + if env.run_id: + env.log(f" Target run: {env.run_id}") + else: + env.log(f" Run title: {env.title}") + + if env.close_run: + env.log(" Close run: yes") diff --git a/trcli/constants.py b/trcli/constants.py index 85548f22..7e345b5d 100644 --- a/trcli/constants.py +++ b/trcli/constants.py @@ -100,6 +100,7 @@ TOOL_VERSION = f"""TestRail CLI v{trcli.__version__} Copyright 2025 Gurock Software GmbH - www.gurock.com""" TOOL_USAGE = f"""Supported and loaded modules: + - init: Interactively create a starter config file - parse_junit: JUnit XML Files (& Similar) - parse_cucumber: Cucumber JSON results (BDD) - import_gherkin: Upload .feature files to TestRail BDD @@ -113,6 +114,22 @@ MISSING_COMMAND_SLOGAN = """Usage: trcli [OPTIONS] COMMAND [ARGS]...\nTry 'trcli --help' for help. \nError: Missing command.""" +HELP_EPILOG = """Examples: + trcli init + Create a starter config file interactively. + trcli parse_junit -f results.xml --title "Automated Test Run" -y + Parse a JUnit XML report and upload results to TestRail. + trcli parse_cucumber -f cucumber.json --title "BDD Results" -y + Parse Cucumber JSON results and upload them to TestRail. + trcli add_run --title "Release Validation" --project "My Project" + Create a new empty test run in TestRail. + trcli labels list --project "My Project" + List labels available in the selected project. + trcli references cases add --case-ids 1,2 --refs REQ-123,REQ-456 + Add references to existing test cases. + +Docs: https://github.com/gurock/trcli#readme""" + class ProjectErrors(enum.IntEnum): multiple_project_same_name = -1