This repository was archived by the owner on Apr 22, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprd_validator.py
More file actions
178 lines (143 loc) · 6.19 KB
/
prd_validator.py
File metadata and controls
178 lines (143 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
"""
PRD Completeness Validator
Validates product requirement documents against templates used in my career as a Product person.
This was created by Dimos Papadopoulos, currently working in Product for an entertainment company, while having published my own books and coaching people.
"""
import yaml
import re
from pathlib import Path
from typing import Dict, List, Tuple
from dataclasses import dataclass
@dataclass
class ValidationResult:
"""Result of a single section validation"""
section_name: str
required: bool
found: bool
severity: str
keywords_found: List[str]
score: int
class PRDValidator:
"""Validates PRDs against YAML templates"""
def __init__(self, template_path: str):
"""Load template from YAML file"""
self.template = self._load_template(template_path)
self.weights = self.template.get('scoring', {})
def _load_template(self, path: str) -> Dict:
"""Load and parse YAML template"""
with open(path, 'r') as f:
return yaml.safe_load(f)
def validate_prd(self, prd_path: str) -> Tuple[List[ValidationResult], int]:
"""
Validate a PRD file against the template
Returns: (list of results, overall score)
"""
# Read PRD content
with open(prd_path, 'r', encoding='utf-8') as f:
prd_content = f.read().lower() # Case-insensitive matching
results = []
total_possible_score = 0
earned_score = 0
# Check each required section
for section in self.template['sections']:
result = self._validate_section(section, prd_content)
results.append(result)
# Calculate score
section_weight = self._get_weight(section['severity'])
total_possible_score += section_weight
if result.found:
earned_score += result.score
# Calculate percentage
overall_score = int((earned_score / total_possible_score) * 100) if total_possible_score > 0 else 0
return results, overall_score
def _validate_section(self, section: Dict, content: str) -> ValidationResult:
"""Check if a section exists in the PRD"""
keywords = section.get('keywords', [])
keywords_found = []
# Check for keywords
for keyword in keywords:
if keyword.lower() in content:
keywords_found.append(keyword)
# Section is "found" if at least 1 keyword matches
found = len(keywords_found) > 0
# Calculate score for this section
weight = self._get_weight(section['severity'])
score = weight if found else 0
return ValidationResult(
section_name=section['name'],
required=section['required'],
found=found,
severity=section['severity'],
keywords_found=keywords_found,
score=score
)
def _get_weight(self, severity: str) -> int:
"""Get point weight for severity level"""
weights = {
'critical': self.weights.get('critical_weight', 10),
'high': self.weights.get('high_weight', 7),
'medium': self.weights.get('medium_weight', 3)
}
return weights.get(severity, 3)
def format_results(self, results: List[ValidationResult], score: int) -> str:
"""Format validation results as readable output"""
output = []
output.append("=" * 60)
output.append("PRD VALIDATION RESULTS")
output.append("=" * 60)
output.append("")
# Group by status
missing_critical = [r for r in results if not r.found and r.required and r.severity == 'critical']
missing_high = [r for r in results if not r.found and r.required and r.severity == 'high']
missing_medium = [r for r in results if not r.found and r.severity == 'medium']
found = [r for r in results if r.found]
# Missing Critical (show first - most important)
if missing_critical:
output.append("🔴 CRITICAL MISSING:")
for result in missing_critical:
output.append(f" ✗ {result.section_name}")
output.append(f" Severity: {result.severity.upper()}")
output.append("")
# Missing High Priority
if missing_high:
output.append("🟡 HIGH PRIORITY MISSING:")
for result in missing_high:
output.append(f" ✗ {result.section_name}")
output.append("")
# Missing Medium Priority
if missing_medium:
output.append("🟢 OPTIONAL MISSING:")
for result in missing_medium:
output.append(f" - {result.section_name}")
output.append("")
# What's Present
if found:
output.append("✓ FOUND:")
for result in found:
keywords_str = ", ".join(result.keywords_found[:3]) # Show first 3 keywords
output.append(f" ✓ {result.section_name} ({keywords_str})")
output.append("")
# Overall Score
output.append("=" * 60)
output.append(f"OVERALL SCORE: {score}/100")
passing_score = self.weights.get('passing_score', 90)
if score >= passing_score:
output.append(f"STATUS: ✓ READY FOR REVIEW (>= {passing_score})")
else:
output.append(f"STATUS: ✗ NOT READY - needs work (target: {passing_score})")
output.append("=" * 60)
return "\n".join(output)
# Example usage
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python validator.py <path_to_prd.md>")
print("Example: python validator.py examples/sample_prd.md")
sys.exit(1)
prd_path = sys.argv[1]
template_path = "templates/prd_template.yaml"
print(f"\nValidating: {prd_path}")
print(f"Template: {template_path}\n")
validator = PRDValidator(template_path)
results, score = validator.validate_prd(prd_path)
print(validator.format_results(results, score))