Lamb demo v. 0.6.1, Last updated Sep 10, 2013
# load some classes and then put some convenience names into the local namespace
import lamb
from lamb import *
from lamb.tree_mini import Tree
from lamb.utils import *
import imp
#imp.reload(lamb)
#imp.reload(lang)
from lamb.types import TypeMismatch, type_e, type_t, type_property
from lamb.lang import te
from lamb.utils import ltx_print
from lamb.meta import TypedTerm, TypedExpr, LFun, CustomTerm
warning: coerced guessed type t for 'Cat' into <e,t>, to match argument 'x' warning: coerced guessed type t for 'Gray' into <e,t>, to match argument 'x' warning: coerced guessed type t for 'In' into <e,t>, to match argument 'y' warning: coerced guessed type t for 'In(y)' into <e,t>, to match argument 'x'
# it will become clear what this does later
lamb.meta.constants_use_custom(False)
Solution: a system for developing interactive fragments: "lamb"
Inspired (to some degree) by interactive/automatic proof assistants such as agda and coq. In the python world, SymPy provides some of this functionality but has very little overlap with what is needed for formal semantics.
Pros:
Cons:
Some solutions exist, but are not widely known / adopted, and are ad-hoc to varying degrees.
This all basically worked off-the-shelf, all I had to do was write the latex/html code that gets handed off to mathjax!
meta.pmw_test1._repr_latex_()
'$\\lambda{} p_{t} \\: . \\: \\lambda{} x_{e} \\: . \\: ({P}({x}_{e}) \\wedge{} {p}_{t})$'
meta.pmw_test1
Starting point: a few implementations of things like predicate logic do exist, this is an intro AI exercise sometimes. I started with the AIMA python Expr class, based on the standard Russell and Norvig AI text. But, had to scrap most of it.
Class TypedExpr: parent class for typed expressions
Many straightforward expressions can be parsed. Most expressions are created using a call to TypedExpr.factory, which is abbreviate as "te" in the following examples.
te("x_e")
meta.TypedTerm("x", types.type_e)
Future: IPython cell magic for metalanguage
Terms: capital letters are constants, lower case are variables. Some convenience type guessing to make expressions shorter.
te("Cat(x_e)")
warning: coerced guessed type t for 'Cat' into <e,t>, to match argument 'x'
x1 = te("L x_e: Cat(x)")
x2 = te("λx: Dog(x_e)")
ltx_print(x1, x2)
warning: coerced guessed type t for 'Cat' into <e,t>, to match argument 'x' warning: coerced guessed type t for 'Dog' into <e,t>, to match argument 'x'
cat_term = meta.CustomTerm("Cat", typ=types.type_property)
var = te("x_e")
cat_term(var)
(cat_term(var)).type
cat_term.type
Various convenience python operators are overloaded:
(cat_term(var) & te("p_t")) >> te("q_t")
cat_fun = meta.LFun(types.type_e, cat_term(var), "x")
cat_fun
cat_fun(te("y_e")) #.reduce()
TypedExpr s in general consist of an operator followed by some number of arguments, possibly 0.
P = TypedTerm("P", types.FunType(type_e, type_t))
Q = TypedTerm("Q", types.FunType(type_e, type_t))
x = TypedTerm("x", type_e)
y = TypedTerm("y", type_e)
t = TypedExpr.factory(P, x)
t2 = TypedExpr.factory(Q, x)
# shorter way: t = te("P(x_e)")
ltx_print(P, t2)
# propositional variable
p = TypedTerm("p", type_t)
pmw_test1 = LFun(type_t, LFun(type_e, t & p, "x"), "p")
pmw_test1b = LFun(type_e, t & t2, "x")
ltx_print(pmw_test1, pmw_test1.type,
pmw_test1b, pmw_test1b.type,
t2)
pmw_test1(t2).reduce()
pmw_test1(x) # function is type <t<et>>
--------------------------------------------------------------------------- TypeMismatch Traceback (most recent call last) <ipython-input-20-ddac926bdda2> in <module>() ----> 1 pmw_test1(x) # function is type <t<et>> /Users/advil/Projects/lambda/lamb/meta.py in __call__(self, *args) 1416 1417 call + reduce is equivalent to apply, for an LFun""" -> 1418 return TypedExpr(self, *args) 1419 1420 def unsafe_variables(fun, arg): /Users/advil/Projects/lambda/lamb/meta.py in __init__(self, op, *args) 115 #if arg.type != op.argtype and not arg.type.undetermined: 116 if unify_f is None: --> 117 raise TypeMismatch(op, arg, "Lambda term+arg expression") #TODO: do I always want to check types here? 118 self.type = unify_r #op.returntype 119 #TODO is unification here the right thing? TypeMismatch: Type mismatch: '(λ p). (λ x). (P(x) & p)'/<t,<e,t>> and 'x'/e conflict (mode: Lambda term+arg expression)
Key class(/mixin): Composable. Some key subclasses:
Key class: CompositionSystem. Describes a set of composition operations.
Future key class: Grammar. Describes a composition system along with a lexicon.
def demo_fa_fun(fun, arg, assignment=None):
if (not fun.type.functional()) or fun.type.left != arg.type:
raise TypeMismatch(fun, arg, "Function Application")
return lang.BinaryComposite(fun, arg,
(fun.content.under_assignment(assignment)(arg.content.under_assignment(assignment))).reduce())
pm_op = lang.te("L f_<e,t> : L g_<e,t> : L x_e : f(x) & g(x)")
def demo_pm_fun(fun1, fun2, assignment=None):
"""H&K predicate modification -- restricted to type <et>."""
ts = meta.get_type_system()
if not (ts.eq_check(fun1.type, types.type_property) and
ts.eq_check(fun2.type, types.type_property)):
raise TypeMismatch(fun1, fun2, "Predicate Modification")
#if fun1.type != fun2.type or fun1.type != type_property:
# raise TypeMismatch(fun1, fun2, "Predicate Modification")
varname = fun1.content.varname
c1 = fun1.content.under_assignment(assignment)
c2 = fun2.content.under_assignment(assignment)
result = pm_op.apply(c1).apply(c2).reduce_all()
return lang.BinaryComposite(fun1, fun2, result)
pm = lang.BinaryCompositionOp("PM", demo_pm_fun, commutative=False)
fa = lang.BinaryCompositionOp("FA", demo_fa_fun)
pa = lang.BinaryCompositionOp("PA", lang.pa_fun, allow_none=True)
demo_hk_system = lang.CompositionSystem(rules=[fa, pm, pa], basictypes={type_e, type_t})
lang.set_system(demo_hk_system)
#imp.reload(lamb)
meta.constants_use_custom(True)
cat = lang.Item("cat", "L x_e: Cat(x)")
gray = lang.Item("gray", "L x_e: Gray(x)")
gray.content.args[0].op.custom = "is"
john = lang.Item("John", "John_e")
julius = lang.Item("Julius", "Julius_e")
inP = lang.Item("in", "L x: L y: In(y)(x)")
inP.content.args[0].args[0].op.op.custom = "is"
texas = lang.Item("Texas", "Texas_e")
ltx_print(cat,gray, julius, inP, texas)
warning: coerced guessed type t for 'CAT' into <e,t>, to match argument 'x' warning: coerced guessed type t for 'GRAY' into <e,t>, to match argument 'x' warning: coerced guessed type t for 'IN' into <e,t>, to match argument 'y' warning: coerced guessed type t for 'y IN is a' into <e,t>, to match argument 'x'
#pvar = meta.TypedTerm("p", types.type_property)
#isV = lang.Item("is", meta.LFun(types.type_property, pvar, "p"))
isV = lang.Item("is", lang.te("L p_<e,t> : p"))
isV # identity function over properties
In the purely type-driven mode, composition is triggered by using the '*' operator on a composable.
inP * texas
sentence1 = julius * (isV * (inP * texas))
sentence1
sentence1.full_trace_latex()
I have temporarily disabled the fact that standard PM is symmetric, to illustrated multiple composition paths:
gray * cat
gray * cat * (inP * texas)
a = lang.Item("a", lang.isV.content) # identity function for copula as well
isV * (a * (gray * cat * (inP * texas)))
np = ((gray * cat) * (inP * texas))
vp = (isV * (a * np))
sentence2 = julius * vp
sentence2
julius * isV
sentence2.full_trace_latex()
sentence1.results[0]
sentence1.results[0].latex_step_tree()
sentence2.results[0].latex_step_tree()
One of the infamous examples from Heim and Kratzer (names different):
fond = lang.Item("fond", "L x_e : L y_e : Fond(y)(x)")
ofP = lang.Item("of", "L x_e : x")
sentence3 = julius * (lang.isV * (a * (np * (fond * (ofP * lang.john)))))
sentence3
warning: coerced guessed type t for 'FOND' into <e,t>, to match argument 'y' warning: coerced guessed type t for 'y FOND is a' into <e,t>, to match argument 'x'
sentence3.results[0].latex_step_tree()
The Composite class subclasses nltk.Tree, and so supports the things that class does. E.g. []-based paths:
parse_tree3 = sentence3.results[0]
parse_tree3[0][1][1].latex_step_tree()
Some rudimentary support for traces.
binder = lang.Item("23", None)
binder2 = lang.Item("5", None)
t = lang.Trace(23, types.type_e)
t2 = lang.Trace(5)
ltx_print(t, t2, binder)
((t * gray))
b1 = (binder * (binder2 * (t * (lang.inP * t2))))
b2 = (binder2 * (binder * (t * (lang.inP * t2))))
ltx_print(b1, b2)
b1.full_trace_latex()
b1.results[0].latex_step_tree()
Current work: implementing tree-based computation, and top-down/deferred computation
t2 = Tree("S", ["NP", "VP"])
r2 = lang.hk3_system.compose(t2)
r2.results[2].latex_step_tree() #._repr_latex_()
from lamb.tree_mini import Tree # hacked package to work with python 3
t = Tree("S", [cat, "VP"]) # this is a tree with a missing VP. Exploit the fact that Composites are trees and nodes can be Items in lamb.
local_tree = lang.tfa_l.build_local(t)
local_tree # need to implement ipython output routines for nltk.Tree.
#lang.hk3_system.compose(t)
ltx_print(local_tree[0], local_tree[0].type, "missing VP:", local_tree[1], local_tree[1].type) # type not yet inferred
r = lang.hk3_system.compose(t)
r
r = lang.tfa_l(t)
ltx_print(r,r.type)
l = lang.tfa_r(t)
l
Longer term:
eval
, and is generally less ad-hoc. (This is an issue inherited from AIMA-python's logic.py.)