@conference {86, title = {Learning Markov networks with context-specific independences}, booktitle = {IEEE 25th International Conference on Tools with Artificial Intelligence (ICTAI)}, year = {2013}, month = {Nov}, keywords = {Algorithm design and analysis, Complexity theory, conditional independences, Context, Context modeling, context-specific independences, CSPC algorithm, Encoding, independence relations, independence-based, independence-based algorithms, independence-based learning approach, Inference algorithms, learning (artificial intelligence), learning structures, log-linear model, machine learning, Markov network structure, Markov networks, Markov processes, Markov random fields, network theory (graphs), Structure learning, undirected graph}, doi = {10.1109/ICTAI.2013.88}, author = {Edera, A. and Schl{\"u}ter, F. and Bromberg, F.} } @article {211, title = {{B}lankets {J}oint {P}osterior score for learning {M}arkov network structures }, journal = {International Journal of Approximate Reasoning}, volume = {https://doi.org/10.1016/j.ijar.2017.10.018}, year = {2017}, month = {10/2017}, type = {Regular Issu}, abstract = {

Markov networks are extensively used to model\ complex sequential, spatial, and relational interactions\ in a wide range of fields.\ By learning the Markov network independence structure of a domain, more accurate joint probability distributions can be obtained for inference tasks or, more directly, for interpreting the most significant relations among the variables. Recently, several researchers have investigated techniques for automatically learning the structure from data by obtaining the probabilistic maximum-a-posteriori structure given the available data. However, all the approximations proposed decompose the posterior of the whole structure into local sub-problems, by assuming that the posteriors of\ the Markov blankets of all the variables are mutually independent.\ In this work, we propose a scoring function for relaxing such assumption.\ The Blankets Joint Posterior\ score computes the\ joint posterior of structures as a joint distribution of the collection of its Markov blankets. Essentially, the whole posterior is obtained by computing the posterior of the blanket of each variable as a conditional distribution that takes into account information from other blankets in the network.\ We show in our experimental results that the proposed approximation\ can improve the sample complexity of state-of-the-art competitors\ when learning complex networks, where the independence assumption between blanket variables is clearly incorrect.\ 

}, keywords = {blankets posterior, irregular structures, Markov network, scoring function, Structure learning}, author = {Schl{\"u}ter, Federico and Strappa, Yanela and Bromberg, Facundo and Milone, Diego H.} } @article {69, title = {The IBMAP approach for Markov network structure learning}, journal = {Annals of Mathematics and Artificial Intelligence}, volume = {72}, year = {2014}, month = {04/2014}, pages = {197--223}, chapter = {197}, abstract = {

In this work we consider the problem of learning the structure of Markov networks from data. We present an approach for tackling this problem called IBMAP, together with an efficient instantiation of the approach: the IBMAP-HC algorithm, designed for avoiding important limitations of existing independence-based algorithms. These algorithms proceed by performing statistical independence tests on data, trusting completely the outcome of each test. In practice tests may be incorrect, resulting in potential cascading errors and the consequent reduction in the quality of the structures learned. IBMAP contemplates this uncertainty in the outcome of the tests through a probabilistic maximum-a-posteriori approach. The approach is instantiated in the IBMAP-HC algorithm, a structure selection strategy that performs a polynomial heuristic local search in the space of possible structures. We present an extensive empirical evaluation on synthetic and real data, showing that our algorithm outperforms significantly the current independence-based algorithms, in terms of data efficiency and quality of learned structures, with equivalent computational complexities. We also show the performance of IBMAP-HC in a real-world application of knowledge discovery: EDAs, which are evolutionary algorithms that use structure learning on each generation for modeling the distribution of populations. The experiments show that when IBMAP-HC is used to learn the structure, EDAs improve the convergence to the optimum.

}, keywords = {68T05, EDAs, Independence tests, Knowledge discovery, Markov network, Structure learning}, issn = {1012-2443}, doi = {10.1007/s10472-014-9419-5}, url = {http://dx.doi.org/10.1007/s10472-014-9419-5}, author = {Schl{\"u}ter, Federico and Bromberg, Facundo and Edera, Alejandro} } @article {172, title = {Improving the reliability of causal discovery from small data sets using argumentation}, journal = {The Journal of Machine Learning Research}, volume = {10}, year = {2009}, month = {02/2009}, pages = {301{\textendash}340}, abstract = {

We address the problem of improving the reliability of independence-based causal discovery algorithms that results from the execution of statistical independence tests on small data sets, which typically have low reliability. We model the problem as a knowledge base containing a set of independence facts that are related through Pearl{\textquoteright}s well-known axioms. Statistical tests on finite data sets may result in errors in these tests and inconsistencies in the knowledge base. We resolve these inconsistencies through the use of an instance of the class of defeasible logics called argumentation, augmented with a preference function, that is used to reason about and possibly correct errors in these tests. This results in a more robust conditional independence test, called an argumentative independence test. Our experimental evaluation shows clear positive improvements in the accuracy of argumentative over purely statistical tests. We also demonstrate significant improvements on the accuracy of causal structure discovery from the outcomes of independence tests both on sampled data from randomly generated causal models and on real-world data sets.

}, keywords = {argumentation, causal Bayesian netw orks, Independence-based causal discovery, reliability improvement, Structure learning}, url = {http://www.jmlr.org/papers/v10/bromberg09a.html}, author = {Bromberg, Facundo and Margaritis, Dimitris} } @article {72, title = {A survey on independence-based Markov networks learning}, journal = {Artificial Intelligence Review}, volume = {42}, year = {2012}, month = {06/2012}, pages = {1093}, chapter = {1069}, abstract = {

The problem of learning\ the Markov network structure\ from data\ has become increasingly important in machine learning,
and in many other application fields.\ Markov networks are probabilistic graphical models,\ a widely used formalism for handling probability distributions in intelligent systems.\ This document focuses on a technology called \emph{independence-based} learning,\ which allows for the learning of the independence structure of Markov networks from data in an efficient and sound manner,\ whenever the dataset is sufficiently large, and data is a representative sample of the target distribution.\ In the analysis of such technology, this work surveys the current state-of-the-art algorithms,\ discussing its limitations, and posing a series of open problems\ where future work may produce some advances in the area, in terms of quality and efficiency.

}, keywords = {independence-based, Markov networks, Structure learning, survey}, issn = {1573-7462}, doi = {10.1007/s10462-012-9346-y}, url = {http://www.springerlink.com/content/e0l3113827341422}, author = {Federico Schl{\"u}ter} }