-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_cli_integration.sh
More file actions
executable file
·202 lines (168 loc) · 6.87 KB
/
test_cli_integration.sh
File metadata and controls
executable file
·202 lines (168 loc) · 6.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
#!/bin/bash
# Integration test script for CLI commands
# Tests each CLI command with actual execution
set -e # Exit on first error
echo "=================================="
echo "CLI Integration Tests"
echo "=================================="
echo ""
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Test date - dynamically set to today
TEST_DATE=$(date +%m-%d-%Y)
# Create test output directory
TEST_OUTPUT_DIR="outputs/test_run_$(date +%s)"
mkdir -p "$TEST_OUTPUT_DIR"
# Counter for passed/failed tests
PASSED=0
FAILED=0
# Helper function to run a test
run_test() {
local test_name="$1"
local command="$2"
echo -e "${YELLOW}Testing: ${test_name}${NC}"
echo "Command: $command"
if eval "$command"; then
echo -e "${GREEN}✓ PASSED${NC}"
((PASSED++))
echo ""
return 0
else
echo -e "${RED}✗ FAILED (exit code: $?)${NC}"
((FAILED++))
echo ""
return 1
fi
}
# Test 1: Help command (should always work)
run_test "CLI Help" \
"uv run python cli.py --help"
# Test 2: Collect event integration rankings
run_test "Collect Event Integration Rankings" \
"uv run python cli.py collect-event-integration-rankings --date $TEST_DATE"
# Test 3: Collect event rankings with force refresh
run_test "Event Rankings (Force Refresh)" \
"uv run python cli.py collect-event-integration-rankings --date $TEST_DATE --force-refresh"
# Test 4: Collect KER analytics
run_test "Collect KER Analytics" \
"uv run python cli.py collect-ker-analytics --date $TEST_DATE"
# Test 5: Search KERs for concordance text
run_test "Search KERs for Concordance" \
"uv run python cli.py search-kers-for-concordance-text --date $TEST_DATE"
# Test 6: Harmonize KER evidence
run_test "Harmonize KER Evidence" \
"uv run python cli.py harmonize-ker-evidence --date $TEST_DATE"
# Test 7: Search with config (regulatory_relevance)
run_test "Search with Config: Regulatory Relevance" \
"uv run python cli.py search-with-config regulatory_relevance --date $TEST_DATE"
# Test 8: Search with config (methods_nams)
run_test "Search with Config: Methods NAMS" \
"uv run python cli.py search-with-config methods_nams --date $TEST_DATE"
# Test 9: Search with config (lung_and_immune_aops)
run_test "Search with Config: Lung and Immune AOPs" \
"uv run python cli.py search-with-config lung_and_immune_aops --date $TEST_DATE"
# Test 10: Search with config and --co-occurrence-only flag
run_test "Search with Co-occurrence Only Flag" \
"uv run python cli.py search-with-config lung_and_immune_aops --co-occurrence-only --date $TEST_DATE"
# Test 11: Search with config and force refresh
run_test "Search with Config (Force Refresh)" \
"uv run python cli.py search-with-config regulatory_relevance --force-refresh --date $TEST_DATE"
# Test 12: Check help for each command
echo -e "${YELLOW}Testing: Command Help Pages${NC}"
COMMANDS=("collect-event-integration-rankings" "collect-ker-analytics" "search-kers-for-concordance-text" "harmonize-ker-evidence" "search-with-config")
for cmd in "${COMMANDS[@]}"; do
echo " Checking: $cmd --help"
if uv run python cli.py "$cmd" --help > /dev/null 2>&1; then
echo -e " ${GREEN}✓${NC} $cmd help works"
((PASSED++))
else
echo -e " ${RED}✗${NC} $cmd help failed"
((FAILED++))
fi
done
echo ""
# Test 13: Verify JSON output structure for co-occurrence-only
echo -e "${YELLOW}Testing: JSON Output Verification${NC}"
LUNG_IMMUNE_JSON=$(find outputs/lung_and_immune_aops -name "lung_and_immune_aops_*.json" -type f | head -n 1)
if [ -f "$LUNG_IMMUNE_JSON" ]; then
echo " Checking JSON structure in: $LUNG_IMMUNE_JSON"
# Check for co_occurrence_only flag
if python3 -c "import json; data = json.load(open('$LUNG_IMMUNE_JSON')); assert data.get('co_occurrence_only') == True, 'co_occurrence_only should be True'" 2>/dev/null; then
echo -e " ${GREEN}✓${NC} co_occurrence_only flag is set correctly"
((PASSED++))
else
echo -e " ${RED}✗${NC} co_occurrence_only flag not found or incorrect"
((FAILED++))
fi
# Check for has_priority_co_occurrence in results
if python3 -c "import json; data = json.load(open('$LUNG_IMMUNE_JSON')); results = data.get('results', {}); assert any('has_priority_co_occurrence' in r for r in results.values()), 'has_priority_co_occurrence should be in results'" 2>/dev/null; then
echo -e " ${GREEN}✓${NC} has_priority_co_occurrence field present in results"
((PASSED++))
else
echo -e " ${RED}✗${NC} has_priority_co_occurrence field missing"
((FAILED++))
fi
# Check for total_entities_with_co_occurrence_matches in summary
if python3 -c "import json; data = json.load(open('$LUNG_IMMUNE_JSON')); assert 'total_entities_with_co_occurrence_matches' in data.get('summary', {}), 'total_entities_with_co_occurrence_matches should be in summary'" 2>/dev/null; then
echo -e " ${GREEN}✓${NC} total_entities_with_co_occurrence_matches in summary"
((PASSED++))
else
echo -e " ${RED}✗${NC} total_entities_with_co_occurrence_matches missing from summary"
((FAILED++))
fi
# Check for co_occurrence_fields in results
if python3 -c "import json; data = json.load(open('$LUNG_IMMUNE_JSON')); results = data.get('results', {}); assert any('co_occurrence_fields' in r for r in results.values()), 'co_occurrence_fields should be in results'" 2>/dev/null; then
echo -e " ${GREEN}✓${NC} co_occurrence_fields field present in results"
((PASSED++))
else
echo -e " ${RED}✗${NC} co_occurrence_fields field missing"
((FAILED++))
fi
else
echo -e " ${YELLOW}⚠${NC} No lung_and_immune_aops JSON file found for verification"
fi
echo ""
# Summary
echo "=================================="
echo "Test Summary"
echo "=================================="
echo -e "${GREEN}Passed: $PASSED${NC}"
if [ $FAILED -gt 0 ]; then
echo -e "${RED}Failed: $FAILED${NC}"
else
echo -e "${GREEN}Failed: $FAILED${NC}"
fi
echo ""
# Verify output files were created
echo "=================================="
echo "Output File Verification"
echo "=================================="
OUTPUT_DIRS=(
"outputs/event_rankings"
"outputs/json"
"outputs/ker_analytics"
"outputs/ker_evidence"
"outputs/lung_and_immune_aops"
"outputs/reference_search_results"
"outputs/regulatory_relevance_screening"
)
for dir in "${OUTPUT_DIRS[@]}"; do
if [ -d "$dir" ]; then
file_count=$(find "$dir" -type f | wc -l)
echo -e "${GREEN}✓${NC} $dir exists ($file_count files)"
else
echo -e "${YELLOW}⚠${NC} $dir does not exist"
fi
done
echo ""
# Exit with appropriate code
if [ $FAILED -eq 0 ]; then
echo -e "${GREEN}All tests passed!${NC}"
exit 0
else
echo -e "${RED}Some tests failed.${NC}"
exit 1
fi