-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathverifier.py
More file actions
2546 lines (2104 loc) · 88.2 KB
/
verifier.py
File metadata and controls
2546 lines (2104 loc) · 88.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Generative Logic: A deterministic reasoning and knowledge generation engine.
# Copyright (C) 2025 Generative Logic UG (haftungsbeschränkt)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ------------------------------------------------------------------------------
#
# This software is also available under a commercial license. For details,
# see: https://generative-logic.com/license
#
# Contributions to this project must be made under the terms of the
# Contributor License Agreement (CLA). See the project's CONTRIBUTING.md file.
import os
import re
import sys
import itertools
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Optional
# ---------------------------------------------------------------------------
# Data structures
# ---------------------------------------------------------------------------
@dataclass
class ProofLine:
"""One tab-separated line from a chapter file."""
expression: str
namespace: str
tag: str
rest: List[str]
raw: str
line_no: int
@dataclass
class TagCounter:
success: int = 0
failure: int = 0
def record(self, passed: bool):
if passed:
self.success += 1
else:
self.failure += 1
@dataclass
class VerifierState:
tag_counters: Dict[str, TagCounter] = field(default_factory=dict)
goal_reached: TagCounter = field(default_factory=TagCounter)
# Global theorem list: expression → { "type": ..., "ref": ... }
global_theorems: Dict[str, dict] = field(default_factory=dict)
# Ordered list of (expression, type, ref) for chapter mapping
global_theorem_list: List[Tuple[str, str, str]] = field(default_factory=list)
# Output indices: core_name → 0-based index of output argument
output_indices: Dict[str, int] = field(default_factory=dict)
# Input indices: core_name → list of 0-based indices of input arguments
input_indices: Dict[str, List[int]] = field(default_factory=dict)
# GL binaries: tag → { name → { "category", "elements", "signature", "definedSet", ... } }
gl_binaries: Dict[str, dict] = field(default_factory=dict)
# Definition sets: core_name → { 1-based-position-str → [defset_str, bool] }
definition_sets: Dict[str, dict] = field(default_factory=dict)
# Transient: set per-chapter before dispatching line checkers
current_chapter_thm: Optional[Tuple[str, str, str]] = None
current_chapter_type: str = ""
# Definition sets: core_name → { "1-based-position" → [def_set_str, bool] }
definition_sets: Dict[str, Dict[str, list]] = field(default_factory=dict)
# External theorems (raw + renamed forms)
external_theorems: set = field(default_factory=set)
def counter_for(self, tag: str) -> TagCounter:
if tag not in self.tag_counters:
self.tag_counters[tag] = TagCounter()
return self.tag_counters[tag]
# ---------------------------------------------------------------------------
# Parsing
# ---------------------------------------------------------------------------
def parse_chapter_file(filepath: str) -> List[ProofLine]:
lines: List[ProofLine] = []
with open(filepath, "r", encoding="utf-8") as f:
for i, raw_line in enumerate(f, start=1):
raw_line = raw_line.rstrip("\r\n")
if not raw_line.strip():
continue
parts = raw_line.split("\t")
lines.append(ProofLine(
expression=parts[0] if parts else "",
namespace=parts[1] if len(parts) > 1 else "",
tag=parts[2] if len(parts) > 2 else "<malformed>",
rest=parts[3:] if len(parts) > 3 else [],
raw=raw_line,
line_no=i,
))
return lines
def load_global_theorem_list(base_dir: str) -> Tuple[Dict[str, dict],
List[Tuple[str, str, str]]]:
path = os.path.join(base_dir, "global_theorem_list.txt")
theorems: Dict[str, dict] = {}
ordered: List[Tuple[str, str, str]] = []
if not os.path.isfile(path):
return theorems, ordered
with open(path, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\r\n")
if not line.strip():
continue
parts = line.split("\t")
expr = parts[0]
thm_type = parts[1] if len(parts) > 1 else ""
ref = parts[2] if len(parts) > 2 else ""
theorems[expr] = {"type": thm_type, "ref": ref}
ordered.append((expr, thm_type, ref))
return theorems, ordered
# ---------------------------------------------------------------------------
# Build chapter → theorem mapping
#
# Walk global_theorem_list in order, consume chapter files sequentially.
# "induction" theorems consume 2 chapters (check_zero + check_induction_condition).
# All other types consume 1 chapter.
# ---------------------------------------------------------------------------
def build_chapter_theorem_map(
chapter_files: List[str],
theorem_list: List[Tuple[str, str, str]],
) -> Dict[str, Tuple[str, str, str]]:
"""Returns chapter_filename → (theorem_expr, theorem_type, theorem_ref)."""
mapping: Dict[str, Tuple[str, str, str]] = {}
ci = 0
for thm_expr, thm_type, thm_ref in theorem_list:
if ci >= len(chapter_files):
break
if thm_type == "induction":
mapping[chapter_files[ci]] = (thm_expr, thm_type, thm_ref)
if ci + 1 < len(chapter_files):
mapping[chapter_files[ci + 1]] = (thm_expr, thm_type, thm_ref)
ci += 2
else:
mapping[chapter_files[ci]] = (thm_expr, thm_type, thm_ref)
ci += 1
return mapping
# ---------------------------------------------------------------------------
# Theorem goal reached check
# ---------------------------------------------------------------------------
def _find_matching_paren(expr: str, start: int) -> int:
"""Return index of ')' matching '(' at position *start*."""
depth = 0
for i in range(start, len(expr)):
if expr[i] == '(':
depth += 1
elif expr[i] == ')':
depth -= 1
if depth == 0:
return i
return -1
def disintegrate_implication_head(expr: str) -> str:
"""
Peel off all ``(>[vars](premise)(body))`` layers and return the
innermost conclusion as-is. No renaming.
"""
while expr.startswith('(>['):
bracket_close = expr.index(']', 3)
p_start = bracket_close + 1
if p_start >= len(expr) or expr[p_start] != '(':
break
p_end = _find_matching_paren(expr, p_start)
if p_end < 0:
break
b_start = p_end + 1
if b_start >= len(expr):
break
if expr[b_start] == '!':
# Negated body: !(...)
inner_start = b_start + 1
if inner_start >= len(expr) or expr[inner_start] != '(':
break
inner_end = _find_matching_paren(expr, inner_start)
if inner_end < 0:
break
expr = expr[b_start:inner_end + 1]
return expr
if expr[b_start] != '(':
break
b_end = _find_matching_paren(expr, b_start)
if b_end < 0:
break
expr = expr[b_start:b_end + 1]
return expr
# ---------------------------------------------------------------------------
# Mirroring helpers (independent copy — no imports from create_expressions)
# ---------------------------------------------------------------------------
def _extract_args(expr: str) -> List[str]:
"""Extract arguments from an expression like ``(in3[a,b,c,+])`` → [a,b,c,+]."""
m = re.search(r'\[([^\]]*)\]', expr)
if not m:
return []
return [a for a in m.group(1).split(',') if a]
def _extract_core_name(expr: str) -> str:
"""Extract core name: ``(in3[a,b,c,+])`` → ``in3``."""
m = re.match(r'\((\w+)\[', expr)
return m.group(1) if m else ""
def _trace_back_to(expr: str,
expr_to_lines: Dict[str, List["ProofLine"]],
is_target,
should_follow=None,
visited: set = None) -> bool:
"""Generic recursive trace-back through proof chain.
Finds *expr* on the LHS of chapter lines, then:
1. If any such line satisfies *is_target(line)* → True
2. Otherwise follows rest-field expressions that pass *should_follow(src)*
(all if *should_follow* is None) and recurses.
"""
if visited is None:
visited = set()
if expr in visited:
return False
visited.add(expr)
for ch_line in expr_to_lines.get(expr, []):
if is_target(ch_line):
return True
for i in range(0, len(ch_line.rest), 2):
src = ch_line.rest[i]
if should_follow is not None and not should_follow(src):
continue
if _trace_back_to(src, expr_to_lines, is_target, should_follow, visited):
return True
return False
def disintegrate_implication_full(expr: str) -> Tuple[List[str], str]:
"""
Peel off all ``(>[vars](premise)(body))`` layers.
Returns (premises_list, head) where premises_list is list of premise expressions.
"""
premises: List[str] = []
while expr.startswith('(>['):
bracket_close = expr.index(']', 3)
p_start = bracket_close + 1
if p_start >= len(expr) or expr[p_start] != '(':
break
p_end = _find_matching_paren(expr, p_start)
if p_end < 0:
break
premise = expr[p_start:p_end + 1]
b_start = p_end + 1
if b_start >= len(expr):
break
if expr[b_start] == '!':
# Negated body: !(...)
inner_start = b_start + 1
if inner_start >= len(expr) or expr[inner_start] != '(':
break
inner_end = _find_matching_paren(expr, inner_start)
if inner_end < 0:
break
premises.append(premise)
expr = expr[b_start:inner_end + 1]
return premises, expr
if expr[b_start] != '(':
break
b_end = _find_matching_paren(expr, b_start)
if b_end < 0:
break
premises.append(premise)
expr = expr[b_start:b_end + 1]
return premises, expr
def _replace_arg_safe(expr: str, old: str, new: str) -> str:
"""Replace an argument in expression, argument-level safe (bracket-delimited)."""
pattern = r'(?<=[\[,])' + re.escape(old) + r'(?=[\],])'
return re.sub(pattern, new, expr)
def _normalize_expr_list(exprs: List[str]) -> List[str]:
"""
Rename v-variables across a list of expressions by order of first
appearance in expression arguments (not bound vars).
"""
seen: Dict[str, str] = {}
counter = [0]
def _repl(m: re.Match) -> str:
v = m.group(0)
if v not in seen:
counter[0] += 1
seen[v] = f'v{counter[0]}'
return seen[v]
result: List[str] = []
for expr in exprs:
result.append(re.sub(r'v\d+', _repl, expr))
return result
def _check_mirror(source_expr: str, target_expr: str,
output_indices: Dict[str, int]) -> bool:
"""
Verify that target_expr is a valid mirror of source_expr.
1. Disintegrate both into premises + head.
2. From source: find premise with same output var as head → swap.
3. Try all permutations of non-anchor source premises.
4. For each permutation: normalize expression lists (ignoring bound vars),
compare to normalized target expression list.
"""
src_premises, src_head = disintegrate_implication_full(source_expr)
tgt_premises, tgt_head = disintegrate_implication_full(target_expr)
if len(src_premises) < 2 or len(src_premises) != len(tgt_premises):
return False
# Normalize target: [anchor, premises..., head] by expression arg order
tgt_list = tgt_premises + [tgt_head]
tgt_norm = _normalize_expr_list(tgt_list)
# Find output var of source head
head_core = _extract_core_name(src_head)
head_args = _extract_args(src_head)
if head_core not in output_indices:
return False
out_idx = output_indices[head_core]
if out_idx >= len(head_args):
return False
head_out_var = head_args[out_idx]
# Find non-anchor source premise with same output var
swap_idx = -1
for i in range(1, len(src_premises)):
prem_core = _extract_core_name(src_premises[i])
prem_args = _extract_args(src_premises[i])
if prem_core in output_indices:
p_out_idx = output_indices[prem_core]
if p_out_idx < len(prem_args) and prem_args[p_out_idx] == head_out_var:
swap_idx = i
break
if swap_idx < 0:
return False
# After swap: new head = old premise, old head takes the premise's slot
new_head = src_premises[swap_idx]
src_non_anchor: List[str] = []
for i in range(1, len(src_premises)):
if i == swap_idx:
src_non_anchor.append(src_head)
else:
src_non_anchor.append(src_premises[i])
# Try all permutations of non-anchor premises
for perm in itertools.permutations(src_non_anchor):
candidate_list = [src_premises[0]] + list(perm) + [new_head]
candidate_norm = _normalize_expr_list(candidate_list)
if candidate_norm == tgt_norm:
return True
return False
def _check_reformulation(source_expr: str, target_expr: str,
gl_binaries: Dict[str, dict]) -> bool:
"""
Verify that target_expr is a valid reformulation of source_expr.
1. Disintegrate both into premises + head.
2. Target head must be an existence expression (per GL_binary).
3. Determine anchor tag → select correct GL_binary.
4. Expand existence head into left + right using GL_binary elements.
5. New bound var = first unused v-index in the target theorem.
6. Check left expression's new-bound-var position == definedSet of that expression.
7. Left expression joins target premises, right expression is new head.
8. Permute non-anchor premises, normalize, compare to normalized source.
"""
src_premises, src_head = disintegrate_implication_full(source_expr)
tgt_premises, tgt_head = disintegrate_implication_full(target_expr)
if not tgt_premises:
return False
# Determine anchor tag from first premise (e.g. AnchorGauss → Gauss)
anchor_core = _extract_core_name(tgt_premises[0])
if not anchor_core.startswith("Anchor"):
return False
tag = anchor_core[len("Anchor"):] # e.g. "Gauss"
# Select GL binary
binary = gl_binaries.get(tag)
if binary is None:
return False
# Target head must be an existence expression
head_core = _extract_core_name(tgt_head)
if head_core not in binary:
return False
head_spec = binary[head_core]
if head_spec.get("category") != "existence":
return False
elements = head_spec.get("elements", [])
signature = head_spec.get("signature", "")
if len(elements) != 2 or not signature:
return False
# Build substitution map: u_i → actual arg
sig_args = _extract_args(signature)
head_args = _extract_args(tgt_head)
if len(sig_args) != len(head_args):
return False
subst: Dict[str, str] = {}
for s_arg, h_arg in zip(sig_args, head_args):
subst[s_arg] = h_arg
# Find unused v-index for the new bound variable
all_v_indices = set()
for m in re.finditer(r'v(\d+)', target_expr):
all_v_indices.add(int(m.group(1)))
new_v_idx = 1
while new_v_idx in all_v_indices:
new_v_idx += 1
new_bound_var = f"v{new_v_idx}"
# Expand elements: substitute args + replace placeholder "1" with new bound var
def _apply_subst(element_expr: str) -> str:
result = element_expr
# Replace "1" placeholder with new bound var (in argument positions)
# Use the same lookaround-based replacement as process_proof_graphs
full_map = dict(subst)
full_map["1"] = new_bound_var
escaped_keys = [re.escape(k) for k in full_map]
pattern = r'(?<=[\[,])(' + '|'.join(escaped_keys) + r')(?=[\],])'
return re.compile(pattern).sub(lambda m: full_map.get(m.group(1), m.group(1)), result)
left_expr = _apply_subst(elements[0])
right_expr = _apply_subst(elements[1])
# Check definedSet: left expression's new bound var must be at the definedSet position
left_core = _extract_core_name(left_expr)
if left_core in binary:
defined_set_uvar = binary[left_core].get("definedSet", "")
if defined_set_uvar:
left_sig = binary[left_core].get("signature", "")
left_sig_args = _extract_args(left_sig)
if defined_set_uvar in left_sig_args:
ds_idx = left_sig_args.index(defined_set_uvar)
left_actual_args = _extract_args(left_expr)
if ds_idx >= len(left_actual_args) or left_actual_args[ds_idx] != new_bound_var:
return False
# Build expanded target: original premises + left_expr as extra premise, right_expr as head
expanded_non_anchor = []
for i in range(1, len(tgt_premises)):
expanded_non_anchor.append(tgt_premises[i])
expanded_non_anchor.append(left_expr)
# Source non-anchor premises
src_non_anchor = src_premises[1:]
if len(expanded_non_anchor) != len(src_non_anchor):
return False
# Normalize source: [anchor, non_anchor..., head]
src_list = src_premises + [src_head]
src_norm = _normalize_expr_list(src_list)
# Try all permutations of expanded non-anchor premises
for perm in itertools.permutations(expanded_non_anchor):
candidate_list = [tgt_premises[0]] + list(perm) + [right_expr]
candidate_norm = _normalize_expr_list(candidate_list)
if candidate_norm == src_norm:
return True
return False
def load_gl_binaries(binaries_dir: str) -> Dict[str, dict]:
"""
Load GL binary JSON files. Returns tag → binary_dict.
E.g. "Peano" → contents of GL_binary_Peano.json
"""
import json
result: Dict[str, dict] = {}
if not os.path.isdir(binaries_dir):
return result
for fname in os.listdir(binaries_dir):
if fname.startswith("GL_binary_") and fname.endswith(".json"):
tag = fname[len("GL_binary_"):-len(".json")]
with open(os.path.join(binaries_dir, fname), "r", encoding="utf-8") as f:
result[tag] = json.load(f)
return result
def load_output_indices(config_dir: str) -> Dict[str, int]:
"""
Load output indices from ConfigVisu.json.
Returns core_name → 0-based index of the output argument.
"""
indices: Dict[str, int] = {}
config_path = os.path.join(config_dir, "ConfigVisu.json")
if not os.path.isfile(config_path):
return indices
import json
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
for name, spec in config.items():
if not isinstance(spec, dict):
continue
out_args = spec.get("output_args", [])
if not out_args:
continue
short_mpl = spec.get("short_mpl", "")
m = re.search(r'\[([^\]]*)\]', short_mpl)
if not m:
continue
ordered_args = [a.strip() for a in m.group(1).split(',') if a.strip()]
for out_name in out_args:
if out_name in ordered_args:
indices[name] = ordered_args.index(out_name)
break
return indices
def load_definition_sets(config_dir: str) -> Dict[str, Dict[str, list]]:
"""
Load definition_sets from ConfigVisu.json.
Returns core_name → { "1-based-position-str" → [def_set_str, bool] }.
"""
import json
result: Dict[str, Dict[str, list]] = {}
config_path = os.path.join(config_dir, "ConfigVisu.json")
if not os.path.isfile(config_path):
return result
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
for name, spec in config.items():
if not isinstance(spec, dict):
continue
ds = spec.get("definition_sets", {})
if ds:
result[name] = ds
return result
def load_input_indices(config_dir: str) -> Dict[str, List[int]]:
"""
Load input indices from ConfigVisu.json.
Returns core_name → list of 0-based indices of input arguments.
"""
import json
result: Dict[str, List[int]] = {}
config_path = os.path.join(config_dir, "ConfigVisu.json")
if not os.path.isfile(config_path):
return result
with open(config_path, "r", encoding="utf-8") as f:
config = json.load(f)
for name, spec in config.items():
if not isinstance(spec, dict):
continue
in_args = spec.get("input_args", [])
if not in_args:
continue
short_mpl = spec.get("short_mpl", "")
m = re.search(r'\[([^\]]*)\]', short_mpl)
if not m:
continue
ordered = [a.strip() for a in m.group(1).split(',') if a.strip()]
indices = []
for in_name in in_args:
if in_name in ordered:
indices.append(ordered.index(in_name))
if indices:
result[name] = indices
return result
def _find_digit_args(all_exprs: List[str],
anchor_expr: str,
input_indices: Dict[str, List[int]],
output_indices: Dict[str, int]) -> set:
"""
Replicate C++ findDigitArgs: collect all input args from all expressions,
remove anchor args, subtract output args.
"""
all_input_args: set = set()
for expr in all_exprs:
core = _extract_core_name(expr)
if core in input_indices:
args = _extract_args(expr)
for idx in input_indices[core]:
if idx < len(args):
all_input_args.add(args[idx])
# Remove anchor args
for a in _extract_args(anchor_expr):
all_input_args.discard(a)
# Subtract output args
all_output_args: set = set()
for expr in all_exprs:
core = _extract_core_name(expr)
if core in output_indices:
args = _extract_args(expr)
out_idx = output_indices[core]
if out_idx < len(args):
all_output_args.add(args[out_idx])
return all_input_args - all_output_args
def _find_immutable_args(chain_exprs: List[str],
digit_args: set,
ind_var: str,
input_indices: Dict[str, List[int]],
output_indices: Dict[str, int]) -> set:
"""
Replicate C++ findImmutableArgs: start from digit args minus ind_var,
propagate through outputs where all inputs are immutable.
"""
immutables = set(digit_args)
immutables.discard(ind_var)
changed = True
while changed:
changed = False
for expr in chain_exprs:
core = _extract_core_name(expr)
if core not in input_indices or core not in output_indices:
continue
args = _extract_args(expr)
# Check all inputs are immutable
all_immutable = True
for idx in input_indices[core]:
if idx < len(args) and args[idx] not in immutables:
all_immutable = False
break
if all_immutable:
out_idx = output_indices[core]
if out_idx < len(args) and args[out_idx] not in immutables:
immutables.add(args[out_idx])
changed = True
return immutables
# ---------------------------------------------------------------------------
# Theorem goal reached dispatcher
# ---------------------------------------------------------------------------
def check_theorem_goal_reached(
chapter_file: str,
chapter_type: str,
lines: List[ProofLine],
chapter_thm: Optional[Tuple[str, str, str]],
output_indices: Dict[str, int],
gl_binaries: Dict[str, dict],
) -> bool:
if not lines or chapter_thm is None:
return False
thm_expr, thm_type, _ = chapter_thm
first = lines[0]
# reformulated_statement: expand existence head, permute, compare
if chapter_type == "reformulated_statement":
if first.namespace != "main" or first.tag != "reformulated from":
return False
if len(first.rest) < 1:
return False
source_expr = first.rest[0]
return _check_reformulation(source_expr, first.expression, gl_binaries)
# mirrored_statement: mirror the source theorem and compare
if chapter_type == "mirrored_statement":
if first.namespace != "main" or first.tag != "mirrored from":
return False
if len(first.rest) < 1:
return False
source_expr = first.rest[0]
return _check_mirror(source_expr, first.expression, output_indices)
# back_reformulated_statement: single line with tag "incubator back reformulation"
if chapter_type == "back_reformulated_statement":
if first.namespace != "main" or first.tag != "incubator back reformulation":
return False
if len(first.rest) < 1:
return False
return True
# direct proof, check_zero, check_induction_condition:
# extract head from theorem, compare with first line
if chapter_type in ("direct_proof", "check_zero", "check_induction_condition"):
if first.namespace != "main":
return False
head = disintegrate_implication_head(thm_expr)
return first.expression == head
return False
# ---------------------------------------------------------------------------
# Empty checker stubs — one per proof tag
#
# Each returns True (success) or False (failure).
# Currently every checker returns False (= not yet implemented).
# ---------------------------------------------------------------------------
def _normalize_implication(expr: str) -> str:
"""
Normalize v-variables in an implication by order of first appearance
in expression arguments (not bound var lists in >[...]).
Two-pass: build map from expression args, then apply to everything.
"""
seen: Dict[str, str] = {}
counter = [0]
i = 0
while i < len(expr):
if expr[i:i+2] == '>[':
i = expr.index(']', i + 2) + 1
elif expr[i] == '[' and i > 0 and expr[i-1] != '>':
j = expr.index(']', i + 1)
for m in re.finditer(r'v\d+', expr[i:j+1]):
v = m.group(0)
if v not in seen:
counter[0] += 1
seen[v] = f'v{counter[0]}'
i = j + 1
else:
i += 1
def _repl(m: re.Match) -> str:
return seen.get(m.group(0), m.group(0))
return re.sub(r'v\d+', _repl, expr)
def _reconstruct_implication(chain_exprs: List[str], head: str) -> str:
"""
Reconstruct an implication from premises + head.
Only v-variables are candidates for binding.
Multi-occurrence v-vars become bound at their first appearance.
"""
all_exprs = chain_exprs + [head]
counter: Dict[str, int] = {}
for e in all_exprs:
for a in _extract_args(e):
if not re.match(r'v\d+$', a):
continue
counter[a] = counter.get(a, 0) + 1
multi = {a for a, c in counter.items() if c > 1}
placed: set = set()
when: List[List[str]] = [[] for _ in range(len(chain_exprs))]
for idx, e in enumerate(all_exprs):
for a in _extract_args(e):
if a in multi and a not in placed:
placed.add(a)
if idx < len(chain_exprs):
when[idx].append(a)
result = head
for i in range(len(chain_exprs) - 1, -1, -1):
bound_str = ','.join(when[i])
result = f'(>[{bound_str}]{chain_exprs[i]}{result})'
return result
def _collect_bound_vars(expr: str) -> set:
"""Collect all variables listed inside >[...] brackets."""
result = set()
i = 0
while i < len(expr):
if expr[i:i+2] == '>[':
j = expr.index(']', i + 2)
for a in expr[i+2:j].split(','):
a = a.strip()
if a:
result.add(a)
i = j + 1
else:
i += 1
return result
def _collect_all_expr_vars(expr: str) -> set:
"""Collect all variables from expression argument brackets (not >[...])."""
result = set()
i = 0
while i < len(expr):
if expr[i:i+2] == '>[':
i = expr.index(']', i + 2) + 1
elif expr[i] == '[' and i > 0 and expr[i-1] != '>':
j = expr.index(']', i + 1)
for a in expr[i+1:j].split(','):
a = a.strip()
if a:
result.add(a)
i = j + 1
else:
i += 1
return result
def _normalize_all_vars_in_list(exprs: List[str]) -> List[str]:
"""Rename ALL variables across a list of expressions by order of first
appearance in expression arguments. Consistent across the whole list."""
seen: Dict[str, str] = {}
counter = [0]
for expr in exprs:
for m in re.finditer(r'(?<=[\[,])([^,\[\]]+)(?=[\],])', expr):
a = m.group(1)
if a not in seen:
counter[0] += 1
seen[a] = f'v{counter[0]}'
if not seen:
return list(exprs)
escaped = [re.escape(k) for k in seen]
pattern = re.compile(r'(?<=[\[,])(' + '|'.join(escaped) + r')(?=[\],])')
return [pattern.sub(lambda m: seen[m.group(1)], e) for e in exprs]
def check_implication(line: ProofLine, chapter: List[ProofLine],
state: VerifierState) -> bool:
"""
IMPLICATION checker.
Namespace rules:
- Implication (rest[0]) must have namespace "main" (rest[1]).
- Among premise namespaces, at most one non-main namespace kind.
- Result namespace must equal that non-main kind, or "main" if all
premises are "main".
Structural check — two cases:
Disintegrate the reference implication. If the first premise is an
Anchor expression (theorem-level): all variables are changeable.
Disintegrate into flat premise+head lists, normalize ALL vars by
first appearance, permutate actual premises, compare.
Otherwise (disintegration-level): unchangeables = all_expr_vars -
bound_vars. Normalize, rebuild, compare via >[...] structure.
"""
if len(line.rest) < 2:
return False
impl = line.rest[0]
impl_ns = line.rest[1]
# Collect premise namespaces
premise_nss = [line.rest[i] for i in range(3, len(line.rest), 2)]
# All non-main namespaces across implication + premises
all_nss = [impl_ns] + premise_nss
non_main = set(ns for ns in all_nss if ns != "main")
# At most one non-main namespace kind among implication + premises
if len(non_main) > 1:
return False
# Result namespace: must be the non-main kind from premises/impl,
# OR the result can be in a non-main validity context when all
# sources are "main" (a main implication firing inside a non-main scope)
if non_main:
if line.namespace != list(non_main)[0]:
return False
# --- Structural check ---
result_expr = line.expression
premises = [line.rest[i] for i in range(2, len(line.rest), 2)]
ref_premises, ref_head = disintegrate_implication_full(impl)
if ref_premises and ref_premises[0].startswith("(Anchor"):
# Anchor (theorem-level): all vars changeable.
# Compare flat disintegrated lists with all vars normalized.
ref_chain = ref_premises + [ref_head]
norm_ref = _normalize_all_vars_in_list(ref_chain)
for perm in itertools.permutations(premises):
act_chain = list(perm) + [result_expr]
norm_act = _normalize_all_vars_in_list(act_chain)
if norm_act == norm_ref:
return True
else:
# Non-anchor: substitution-based matching.
# Unchangeables = vars not bound in >[...]. Changeables = bound vars.
# Find a permutation of actual premises where core names align,
# unchangeables match at every position, and changeables map
# consistently (well-defined function).
all_vars = _collect_all_expr_vars(impl)
bound_vars = _collect_bound_vars(impl)
unchangeables = all_vars - bound_vars
ref_chain = ref_premises + [ref_head]
for perm in itertools.permutations(premises):
act_chain = list(perm) + [result_expr]
if len(ref_chain) != len(act_chain):
continue
changeable_map: Dict[str, str] = {}
ok = True
for ref_e, act_e in zip(ref_chain, act_chain):
if _extract_core_name(ref_e) != _extract_core_name(act_e):
ok = False
break
ra = _extract_args(ref_e)
aa = _extract_args(act_e)
if len(ra) != len(aa):
ok = False
break
for rv, av in zip(ra, aa):
if rv in unchangeables:
if rv != av:
ok = False
break
elif rv in bound_vars:
if rv in changeable_map:
if changeable_map[rv] != av:
ok = False
break
else:
changeable_map[rv] = av
else:
if rv != av:
ok = False
break
if not ok:
break
if ok:
return True
return False
def _normalize_with_unchangeables(expr: str, unchangeables: set) -> str:
"""
Normalize variables by order of first appearance in expression arguments
(skipping >[...] brackets). Variables in unchangeables are kept as-is.
Two-pass: build map from expression args, then apply everywhere.
"""
seen: Dict[str, str] = {}
counter = [0]
i = 0
while i < len(expr):
if expr[i:i+2] == '>[':
i = expr.index(']', i + 2) + 1
elif expr[i] == '[' and i > 0 and expr[i-1] != '>':
j = expr.index(']', i + 1)
for a in expr[i+1:j].split(','):
a = a.strip()
if a and a not in unchangeables and a not in seen:
counter[0] += 1
seen[a] = f'v{counter[0]}'
i = j + 1
else:
i += 1
if not seen:
return expr
escaped = [re.escape(k) for k in seen]