Theory Recursion

Up to index of Isabelle/HOL/IsarTut

theory Recursion = Main:
theory Recursion = Main:

text {* Beginning with section 3.4.1 *}

datatype 'a aexp = IF "'a bexp" "'a aexp" "'a aexp"
                 | Sum  "'a aexp" "'a aexp"
                 | Diff "'a aexp" "'a aexp"
                 | Var 'a
                 | Num nat
and      'a bexp = Less "'a aexp" "'a aexp"
                 | And  "'a bexp" "'a bexp"
                 | Neg "'a bexp"

consts evala:: "'a aexp => ('a => nat) => nat"
       evalb:: "'a bexp => ('a => nat) => bool"

"evala (IF b a1 a2) env =
    (if evalb b env then evala a1 env else evala a2 env)"
"evala (Sum a1 a2) env  = evala a1 env + evala a2 env"
"evala (Diff a1 a2) env = evala a1 env - evala a2 env"
"evala (Var a) env = env a"
"evala (Num n) env = n"

"evalb (Less a1 a2) env = (evala a1 env < evala a2 env)"
"evalb (And b1 b2) env  = (evalb b1 env & evalb b2 env)"
"evalb (Neg b) env      = (~ evalb b env)"

consts substa:: "('a => 'b aexp) => 'a aexp => 'b aexp"
       substb:: "('a => 'b aexp) => 'a bexp => 'b bexp"

text {* Perhaps there are too many a's and b's running around
here. *}

"substa s (IF b a1 a2) =
    IF (substb s b) (substa s a1) (substa s a2)"
"substa s (Sum a1 a2)  = Sum (substa s a1) (substa s a2)"
"substa s (Diff a1 a2) = Diff (substa s a1) (substa s a2)"
"substa s (Var a)      = s a"
"substa s (Num n)      = Num n"

"substb s (Less a1 a2) = Less (substa s a1) (substa s a2)"
"substb s (And b1 b2)  = And (substb s b1) (substb s b2)"
"substb s (Neg b)      = Neg (substb s b)"

"evala (substa s a) env = evala a (%x. evala (s x) env) &
 evalb (substb s b) env = evalb b (%x. evala (s x) env)" (is "?P a b")
    by (induct a and b, auto)

text {* A one-line proof for a two-line theorem!  
Since there is nothing but structural recursion
going on in substitution it is nice that theorems
about it can be proved by pure structural induction
without a lot of rigamarole. The conditional
constructs in the operationally defined functions
are what make proofs about evaluation hard(er). *}

text {* Exercise 3.4.1, normalizing binary expressions *}

consts normala:: "'a aexp => bool"
       normalb:: "'a bexp => bool"
"normala (IF b a1 a2) = (normalb b & normala a1 & normala a2)"
"normala (Sum a1 a2)  = (normala a1 & normala a2)"
"normala (Diff a1 a2) = (normala a1 & normala a2)"
"normala (Var a)      = True"
"normala (Num n)      = True"

"normalb (Less a1 a2) = (normala a1 & normala a2)"
"normalb (And b1 b2)  = False"
"normalb (Neg b)      = False"

text {* Remark -- it would be interesting to take this
farther by defining "canonical" as having all the IFs
confined to the outermost layer of the construct, so
that all interior arithmetic expressions are IF-free. *}

consts norma::  "'a aexp => 'a aexp"
       normif:: "'a bexp => 'a aexp => 'a aexp => 'a aexp"

"norma (IF b a1 a2) = normif b (norma a1) (norma a2)"
"norma (Sum a1 a2)  = Sum (norma a1) (norma a2)"
"norma (Diff a1 a2) = Diff (norma a1) (norma a2)"
"norma (Var a)      = Var a"
"norma (Num n)      = Num n"

"normif (Less a1 a2) t e = IF (Less (norma a1) (norma a2)) t e"
"normif (And b1 b2)  t e = normif b1 (normif b2 t e) e"
"normif (Neg b)      t e = normif b e t"

theorem "normala (norma (a::'a aexp)) &
         (!t e. normala (normif (b::'a bexp) t e)
                = (normala t & normala e))"
by (induct a and b, auto)

text {* OK, I think I see what was happening.  I'd make
a mistake in the theory or the theorem statement, then
when a ``by'' proof attempt failed I'd try to expand it.

Eventually I'd get down to a subgoal that I could see
was broken and I'd go fix the theory.  Then the expanded
proof would go through, but I didn't know how to
formulate the one-line incantation and get rid of
all the intermediate stuff.

I will keep going, since I have not yet reached the
point where I gave up on Isabelle scripts.  *}

text {* Section 3.4.2, Nested Recursion. *}

datatype ('v,'f)"term" = Var 'v | App 'f "('v,'f) term list"

subst :: "('a => ('b,'f)term) => ('a,'f)term      => ('b,'f)term"
substs:: "('a => ('b,'f)term) => ('a,'f)term list => ('b,'f)term list"

"subst s (Var x) = s x"
"subst s (App f ts) = App f (substs s ts)"

"substs s [] = []"
"substs s (t#ts) = subst s t # substs s ts"

lemma "subst Var t = (t ::('v,'f)term) &
       substs Var ts = (ts ::('v,'f)term list)"
by (induct_tac t and ts, auto)

text {* Hmmm.... induct\_tac works but induct does not!
Something about the types having incompatible induction rules.
Let's see if we can prove it without resorting to the tactic. 

Or at least see what it is doing. How do I get
this thing to show me the rules it is trying to use?  *}

lemma "subst  Var t  = (t::('v,'f)term) &
       substs Var ts = (ts::('v,'f)term list)"
       (is "?P t & ?Q ts" is "?R t ts")
proof (induct_tac t and ts)
   fix v show "?P (Var v)" by simp
   show "?Q []" by simp
   fix f ts assume "?Q ts"
   show "?P (App f ts)" by simp
   fix t ts assume "?P t" "?Q ts"
   from prems show "?Q (t#ts)" by simp

text {* OK, I had to go home, read ahead in the tutorial,
and sleep, but I got it done (using @{text induct_tac}.)
Isar is extremely picky
about what you try to assume.  For example, if you try
to assume @{text "?R t ts"} in the last step, the show
does not go on!. *}

text {*
Note for introductory remarks:

A proposition containing @{text "==>"} is a claim that
the right-hand-side can be proved from the left hand side.
A proposition based on @{text "-->"} on the other hand is
a \emph{predicate} and you are claiming that in all cases
(in the quantification context) the predicate evaluates to
@{term True}.  In Isabelle implications are pretty much
automatically elevated to meta-implications but in Isar
you should try to say what you mean in the first place. *}

text {* Second note: some not-so-old proverbs, part one:

Isabelle proofs are unreadable.  Isar proofs are unwritable.

You can have anything you want but you can only
show a pending goal at Isar's Restaurant.

You can't even show a pending goal if you've assumed
something or fixed something that Isar doesn't like
or failed to fix something that Isar does like.
How do you know it didn't like your assumption?  The show
gives a warning.  Then when you finish proving the claim
you get a really cryptic error message and apparent progress

More meta-musings:

You use @{text rule} to \emph{undo} the effect of a rule.
This is because we're fundamentally working backwards.

The fundamental rule for proof by contradiction is called
@{text classical}.  The classical reasoner is called
@{text clarify} if you don't want it to simplify.  If you
do it's called @{text clarsimp}.  So far so good, but if
you want to allow goal splitting they are called
@{text safe} and @{text auto}, respectively.
The rules for contrapositives don't
rewrite an implication in your goal -- they re-arrange the
list of hypotheses, swapping one of them for the goal.
To turn an implication into the equivalent disjunction
or its contrapositive you need another rule, the name
of which escapes me at the moment.  But it is a safe bet
you'd never guess it correctly in less than three tries!

A rule, by the way, is nothing more than a proved proposition
involving schematic variables and where the outermost operator
is meta-implication.  Applying a rule forwards means that if
you can match up the left-hand-side of the rule with stuff in 
the left-hand-side of your goal then you can add the right-hand-
side of the rule to the left-hand side of your goal.

Got that?  Good.  Applying a rule \emph{backwards} means
that if the right-hand-side of your goal can be matched
with the right-hand-side of the rule, the right-hand-side
of your goal is \emph{replaced} by the left-hand-side
of the rule.  There is some hocus-pocus having to do
with matching up pieces on the left too, but the take-home
from that is if you don't like what @{text rule} does, try
@{text frule}.

But wait, there's more!  If the rule you're trying to use
backwards is one that, if applied frontwards, would introduce
a symbol that already appears in your goal you can use @{text rule}.
If on the other hand you want to convert your goal to one that
uses a particular symbol, you use the \emph{elimination} rule
for that symbol with @{text erule}.

There, piece of cake.  I don't understand why people are
intimidated by this stuff.

Fundamentally, the objective in either Isabelle or Isar
is to eliminate goals.  A goal is eliminated most basically
by the @{text assumption} method, which works only if the
right-hand-side appears among the terms on the left-hand-side
verbatim.  Since matching a theorem against your goal
allows you to insert the RHS into the LHS, @{text "by rule thm_foo"}
is the next-most-fundamental way of eliminating a goal.
All @{text by} does is run the given method, try to eliminate
all pending (sub)goals by @{text assumption}, and fail if the goals
don't go away.  Once the list of goals is empty you get
to say @{text qed} in Isar or @{text done} in Isabelle, except
that @{text by} says that for you. *}

text {* Now Exercise 3.4.2 

The goal is to show that substitustion and a certain kind
of composition commute.  The first challenge is to cleanly
model the composition.  Since @{text "g a"} is an expression
it can't be an argument to @{term f}.  On the other hand,
we don't want the supposed composition to be trivially
equivalent to nested substitution.  

Let's try this: *}

constdefs subcomp:: "('b => ('v,'f) term)
                  => ('a => ('b,'f) term)
                  => ('a => ('v,'f) term)"
         "subcomp f g == (%a. subst f (g a))"

theorem "subst (subcomp f g) (t ::('a,'f)term) =    ( subst f ( subst g t))
      & substs (subcomp f g) (ts::('a,'f)term list)=(substs f (substs g ts))"
        (is "?P t & ?Q ts")
by (simp add: subcomp_def, induct_tac t and ts, auto)

text {* OK, so that's a three-line Isabelle proof collapsed
into a one-line Isar proof in an unsubtle manner.  Sue me,
once you've fixed @{text induct} so it works here.

Exercise 3.4.3 

As the tutorial suggests, it is very hard to get
it to accept a functional definition directly.  Let's
just do it the easy way: *}

consts trev:: "('v,'f)term => ('v,'f)term"
       trevs:: "('v,'f)term list => ('v,'f)term list"
"trev (Var v) = Var v"
"trev (App f ts) = App f (trevs ts)"
"trevs [] = []"
"trevs (t#ts) = (trevs ts)@[trev t]"

lemma trevs_map: "trevs ts = map trev (rev ts)"
by (induct ts, auto)

theorem "trev (trev t)   = (t::('v,'f)term) &
        trevs (trevs ts) = (ts::('v,'f)term list)"
        (is "?P t & ?Q ts")
by (induct_tac t and ts, auto simp: trevs_map)

text {* Is it obvious yet that I've been reading ahead
into chapter four? *}

datatype ('a,'i)bigtree = Tip | Br 'a "'i => ('a,'i)bigtree"

consts map_bt:: "('a => 'b) => ('a,'i)bigtree => ('b,'i)bigtree"
"map_bt f Tip = Tip"
"map_bt f (Br a F) = Br (f a) (%i. map_bt f (F i))"

lemma "map_bt (g o f) T = map_bt g (map_bt f T)" (is "?P T")
proof (induct T) -- {* At last, a case where @{text induct} works! *}
  show "?P Tip" by simp
  fix a fun show "?P (Br a fun)"
apply (simp add: o_def)
apply (intro) -- {* I want @{text lambdaI} here, but that's
not the right name.  Fortunately, @{text intro} alone does what I want. *}
apply (cases "fun i")

text {* Well, that was fun.  That theorm is provable
in one step.  What magic incantation
is missing from the more explicit text?  The world may never know ... *}

lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
by (induct T, auto)



  evala (substa s a) env = evala a (%x. evala (s x) env) &
  evalb (substb s b) env = evalb b (%x. evala (s x) env)


  normala (norma a) & (ALL t e. normala (normif b t e) = (normala t & normala e))


  subst term.Var t = t & substs term.Var ts = ts


  subst term.Var t = t & substs term.Var ts = ts


  subst (subcomp f g) t = subst f (subst g t) &
  substs (subcomp f g) ts = substs f (substs g ts)

lemma trevs_map:

  trevs ts = map trev (rev ts)


  trev (trev t) = t & trevs (trevs ts) = ts


  map_bt (g o f) T = map_bt g (map_bt f T)