@mastersthesis {175, title = {Markov networks structure discovery using independence tests}, volume = {Doctor of Philosophy}, year = {2007}, pages = {182}, school = {Iowa State University}, address = {Ames, IA, USA}, abstract = {

We investigate efficient algorithms for learning the structure of a Markov network from
data using the independence-based approach. Such algorithms conduct a series of conditional
independence tests on data, successively restricting the set of possible structures until there is
only a single structure consistent with the outcomes of the conditional independence tests exe-
cuted (if possible). As Pearl has shown, the instances of the conditional independence relation
in any domain are theoretically interdependent, made explicit in his well-known conditional
independence axioms. The first couple of algorithms we discuss, GSMN and GSIMN, exploit
Pearl{\textquoteright}s independence axioms to reduce the number of tests required to learn a Markov network.
This is useful in domains where independence tests are expensive, such as cases of very large
data sets or distributed data. Subsequently, we explore how these axioms can be exploited to
{\textquotedblleft}correct{\textquotedblright} the outcome of unreliable statistical independence tests, such as in applications where
little data is available. We show how the problem of incorrect tests can be mapped to inference
in inconsistent knowledge bases, a problem studied extensively in the field of non-monotonic
logic. We present an algorithm for inferring independence values based on a sub-class of non-
monotonic logics: the argumentation framework. Our results show the advantage of using our
approach in the learning of structures, with improvements in the accuracy of learned networks
of up to 20\%. As an alternative to logic-based interdependence among independence tests,
we also explore probabilistic interdependence. Our algorithm, called PFMN, takes a Bayesian
particle filtering approach, using a population of Markov network structures to maintain the
posterior probability distribution over them given the outcomes of the tests performed. The
result is an approximate algorithm (due to the use of particle filtering) that is useful in domains
where independence tests are expensive.

}, isbn = {9780549334941}, url = {http://lib.dr.iastate.edu/rtd/15575/}, author = {Bromberg, Facundo and Margaritis, Dimitris} }