@conference {230, title = {Aplicaciones de Internet de las Cosas SIPIA6-Red de Sensores Inal{\'a}mbricos con IPv6}, booktitle = {XV Workshop de Investigadores en Ciencias de la Computaci{\'o}n}, year = {2013}, author = {Mercado, Gustavo and Borgo, Roberto and Gonzalez Antivilo, Francisco and Taffernaberry, Juan Carlos and Diedrichs, Ana and Aguirre, Mat{\'\i}as and Robles, Mar{\'\i}a In{\'e}s and Grunwaldt, Guillermo and Tabacchi, Germ{\'a}n and Tromer, Sebasti{\'a}n and others} } @conference {223, title = {A demo of the PEACH IoT-based frost event prediction system for precision agriculture}, booktitle = {Sensing, Communication, and Networking (SECON), 2016 13th Annual IEEE International Conference on}, year = {2016}, publisher = {IEEE}, organization = {IEEE}, doi = {10.1109/SAHCN.2016.7732963}, url = {https://hal.inria.fr/hal-01311527/document}, author = {Brun-Laguna, Keoma and Diedrichs, Ana Laura and Chaar, Javier Emilio and Dujovne, Diego and Taffernaberry, Juan Carlos and Mercado, Gustavo and Watteyne, Thomas} } @conference {183, title = {Low-power wireless sensor network for frost monitoring in agriculture research}, booktitle = {Biennial Congress of Argentina (ARGENCON), 2014 IEEE}, year = {2014}, publisher = {IEEE}, organization = {IEEE}, address = {Bariloche}, abstract = {

This work presents the development of a wireless sensor network (WSN), based on IEEE-802.15.4, in order to be used for frost characterization in precision agriculture by measuring temperature. Our key objective is to reduce the power consumption of the network to the minimum, allowing several measurement points per node and the remote monitoring of the sensors behaviour. For the communication interface between a WSN node and the sensors, we have developed a serial protocol inspired in SDI-12. Preliminary results show a low-cost and low-power WSN. The user can access and use the data for agronomic research.

}, isbn = {978-1-4799-4270-1}, doi = {10.1109/ARGENCON.2014.6868546}, url = {http://dx.doi.org/10.1109/ARGENCON.2014.6868546}, author = {Diedrichs, Ana Laura and Tabacchi, Germ{\'a}n and Grunwaldt, Guillermo and Pecchia, Mat{\'\i}as and Mercado, Gustavo and Gonzalez Antivilo, Francisco} } @conference {232, title = {RED SIPIA: Red de Sensores Inal{\'a}mbricos para Investigaci{\'o}n Agron{\'o}mica}, booktitle = {XIII Workshop de Investigadores en Ciencias de la Computaci{\'o}n}, year = {2011}, author = {Mercado, Gustavo and Borgo, Roberto and Gonzalez Antivilo, Francisco and Ortiz Uriburu, Gisela and Diedrichs, Ana and Farreras, Pablo and Aguirre, Mat{\'\i}as and Battaglia, Fernando and Tabacchi, Germ{\'a}n and Tromer, Sebasti{\'a}n} } @proceedings {178, title = {Characterization of LQI behavior in WSN for glacier area in Patagonia Argentina}, journal = {Embedded Systems (SASE/CASE), 2013 Fourth Argentine Symposium and Conference on}, year = {2013}, month = {08/2013}, pages = {1--6}, publisher = {IEEE}, address = {Buenos Aires, Argentina}, abstract = {

One of the most important aspects before installing a Wireless Sensor Network (WSN) is a previous study of connectivity constraints that exist in the area to be covered. This study is critical to the final distribution of the sensors, with an important impact in the life of the network by reducing consumption, and on the robustness by contemplating redundancy of paths and sensors. In this paper, we present a summary of the most important aspects of a preliminary empirical study of the Link Quality Indicator (LQI), on different landscapes in the glaciers area of Patagonia Argentina. The landscapes covered varied in geographical structures with different levels of attenuation and extreme environmental conditions. Through the analysis of the Cumulative Distribution Function (CDF) of the measured LQI values, we can characterize the behavior of four different scenarios and correlate the combined effects of the environmental structure with the distance from the transmitter. The measurements performed were designed for characterizing the links at the physical layer with the purpose of defining models to estimate the Packet Error Rate (PER) for the WSN deployment stage.

}, issn = {978-1-4799-1098-4}, doi = {10.1109/SASE-CASE.2013.6636777}, url = {http://dx.doi.org/10.1109/SASE-CASE.2013.6636777}, author = {Diedrichs, Ana Laura and Robles, Mar{\'\i}a In{\'e}s and Bromberg, Facundo and Mercado, Gustavo and Dujovne, Diego} } @proceedings {174, title = {Efficient and Robust Independence-Based Markov Network Structure Discovery.}, journal = {20th International Joint Conference of Artificial Inteliigence (IJCAI)}, year = {2007}, pages = {2431-2436 }, publisher = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco, CA}, abstract = {

In this paper we introduce a novel algorithm for the induction of the Markov network structure of a domain from the outcome of conditional independence tests on data. Such algorithms work by successively restricting the set of possible structures until there is only a single structure consistent with the conditional independence tests executed. Existing independence-based algorithms have wellknown shortcomings, such as rigidly ordering the sequence of tests they perform, resulting in potential inefficiencies in the number of tests required, and committing fully to the test outcomes, resulting in lack of robustness in case of unreliable tests. We address both problems through a Bayesian particle filtering approach, which uses a population of Markov network structures to maintain the posterior probability distribution over them, given the outcomes of the tests performed. Instead of a fixed ordering, our approach greedily selects, at each step, the optimally informative from a pool of candidate tests according to information gain. In addition, it maintains multiple candidate structures weighed by posterior probability, which makes it more robust to errors in the test outcomes. The result is an approximate algorithm (due to the use of particle filtering) that is useful in domains where independence tests are uncertain (such as applications where little data is available) or expensive (such as cases of very large data sets and/or distributed data).

}, url = {http://www.aaai.org/Library/IJCAI/2007/ijcai07-391.php}, author = {Bromberg, Facundo and Margaritis, Dimitris} } @proceedings {176, title = {Efficient Markov network structure discovery using independence tests}, journal = {Proceedings of the SIAM Conference in Data Mining}, year = {2006}, pages = {141--152}, address = {Bethesda, Maryland, USA}, abstract = {

We present two algorithms for learning the structure of a Markov network from discrete data: GSMN and GSIMN. Both algorithms use statistical conditional independence tests on data to infer the structure by successively constraining the set of structures consistent with the results of these tests. GSMN is a natural adaptation of the Grow-Shrink algorithm of Margaritis and Thrun for learning the structure of Bayesian networks. GSIMN extends GSMN by additionally exploiting Pearl{\textquoteright}s well-known properties of conditional independence relations to infer novel independencies from known independencies, thus avoiding the need to perform these tests. Experiments on artificial and real data sets show GSIMN can yield savings of up to 70\% with respect to GSMN, while generating a Markov network with comparable or in several cases considerably improved quality. In addition to GSMN, we also compare GSIMN to a forward-chaining implementation, called GSIMN-FCH, that produces all possible conditional independence results by repeatedly applying Pearl{\textquoteright}s theorems on the known conditional independence tests. The results of this comparison show that GSIMN is nearly optimal in terms of the number of tests it can infer, under a fixed ordering of the tests performed.

}, isbn = {978-0-89871-611-5}, doi = {10.1137-1.9781611972764.13}, url = {http://epubs.siam.org/doi/abs/10.1137/1.9781611972764.13}, author = {Bromberg, Facundo and Margaritis, Dimitris and Honavar, Vasant} } @proceedings {173, title = {Learning Markov Network Structure using Few Independence Tests.}, journal = {SIAM Data Mining}, year = {2008}, pages = {680--691}, abstract = {
In this paper we present the Dynamic Grow-Shrink Inference-based Markov network learning algorithm (abbreviated DGSIMN), which improves on GSIMN, the state-of-the-art algorithm for learning the structure of the Markov network of a domain from independence tests on data. DGSIMN, like other independence-based algorithms, works by conducting a series of statistical conditional independence tests toward the goal of restricting the number of possible structures to one, thus inferring that structure as the only possibly correct one. During this process, DGSIMN, like the GSIMN algorithm, uses the axioms that govern the proba bilistic independence relation to avoid unnecessary tests i.e.,tests that can be inferred from the results of known ones. This results in both efficiency and reliability advantages over the simple application of statistical tests. However, one weakness of GSIMN is its rigid and heuristic ordering of the execution of tests, which results in potentially inefficient execution. DGSIMN instead uses a principled strategy, dynamically selecting the locally optimal test that is expected to increase the state of our knowledge about the structure the most. This is done by calculating the expected number of independence facts that will become known (through inference) after executing a particular test (before it is actually evaluated on data), and by selecting the one that is expected to maximize the number of such inferences, thus avoiding their potentially expensive evaluation on data. As we demonstrate in our experiments, this results in an overall decrease in the computational requirements of the algorithm, sometimes dramatically, due to the decreased the number of tests required to be evaluated on data. Experiments show that DGSIMN yields savings of up to 88\% on both sampled and benchmark data while achieving similar\  or better accuracy in most cases.
}, isbn = { 978-1-61197-278-8}, issn = {978-0-89871-654-2}, doi = { 10.1137/1.9781611972788.62}, url = {http://epubs.siam.org/doi/abs/10.1137/1.9781611972788.62}, author = {Gandhi, Parichey and Bromberg, Facundo and Margaritis, Dimitris} } @proceedings {225, title = {Sonda de temperatura para uso agr{\'\i}cola}, journal = {CASE Congreso Argentino de Sistemas Embebidos}, year = {2015}, edition = {2015}, issn = {978-987-45523-3-4}, author = {Guillermo Gr{\"u}nwaldt and Mat{\'\i}as Pecchia and Ana Laura Diedrichs and Germ{\'a}n Tabacchi and Gustavo Mercado} } @article {211, title = {{B}lankets {J}oint {P}osterior score for learning {M}arkov network structures }, journal = {International Journal of Approximate Reasoning}, volume = {https://doi.org/10.1016/j.ijar.2017.10.018}, year = {2017}, month = {10/2017}, type = {Regular Issu}, abstract = {

Markov networks are extensively used to model\ complex sequential, spatial, and relational interactions\ in a wide range of fields.\ By learning the Markov network independence structure of a domain, more accurate joint probability distributions can be obtained for inference tasks or, more directly, for interpreting the most significant relations among the variables. Recently, several researchers have investigated techniques for automatically learning the structure from data by obtaining the probabilistic maximum-a-posteriori structure given the available data. However, all the approximations proposed decompose the posterior of the whole structure into local sub-problems, by assuming that the posteriors of\ the Markov blankets of all the variables are mutually independent.\ In this work, we propose a scoring function for relaxing such assumption.\ The Blankets Joint Posterior\ score computes the\ joint posterior of structures as a joint distribution of the collection of its Markov blankets. Essentially, the whole posterior is obtained by computing the posterior of the blanket of each variable as a conditional distribution that takes into account information from other blankets in the network.\ We show in our experimental results that the proposed approximation\ can improve the sample complexity of state-of-the-art competitors\ when learning complex networks, where the independence assumption between blanket variables is clearly incorrect.\ 

}, keywords = {blankets posterior, irregular structures, Markov network, scoring function, Structure learning}, author = {Schl{\"u}ter, Federico and Strappa, Yanela and Bromberg, Facundo and Milone, Diego H.} } @article {170, title = {Efficient Markov network discovery using particle filters}, journal = {Computational Intelligence}, volume = {25}, year = {2009}, month = {11/2009}, pages = {367{\textendash}394}, abstract = {

In this paper, we introduce an efficient independence-based algorithm for the induction of the Markov network (MN) structure of a domain from the outcomes of independence test conducted on data. Our algorithm utilizes a particle filter (sequential Monte Carlo) method to maintain a population of MN structures that represent the posterior probability distribution over structures, given the outcomes of the tests performed. This enables us to select, at each step, the maximally informative test to conduct next from a pool of candidates according to information gain, which minimizes the cost of the statistical tests conducted on data. This makes our approach useful in domains where independence tests are expensive, such as cases of very large data sets and/or distributed data. In addition, our method maintains multiple candidate structures weighed by posterior probability, which allows flexibility in the presence of potential errors in the test outcomes.

}, keywords = {graphical model structure learning, Markov networks, particle filters, sequential Monte Carlo}, doi = {10.1111/j.1467-8640.2009.00347.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-8640.2009.00347.x/abstract;jsessionid=C95A98B67CD44AF9ABF59B3B0CCAA979.f01t02?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Margaritis, Dimitris and Bromberg, Facundo} } @article {171, title = {Efficient Markov network structure discovery using independence tests}, journal = {Journal of Artificial Intelligence Research}, volume = {35}, year = {2009}, pages = {449{\textendash}484}, abstract = {

We present two algorithms for learning the structure of a Markov network from data: GSMN* and GSIMN. Both algorithms use statistical independence tests to infer the structure by successively constraining the set of structures consistent with the results of these tests. Until very recently, algorithms for structure learning were based on maximum likelihood estimation, which has been proved to be NP-hard for Markov networks due to the difficulty of estimating the parameters of the network, needed for the computation of the data likelihood. The independence-based approach does not require the computation of the likelihood, and thus both GSMN* and GSIMN can compute the structure efficiently (as shown in our experiments). GSMN* is an adaptation of the Grow-Shrink algorithm of Margaritis and Thrun for learning the structure of Bayesian networks. GSIMN extends GSMN* by additionally exploiting Pearls well-known properties of the conditional independence relation to infer novel independences from known ones, thus avoiding the performance of statistical tests to estimate them. To accomplish this efficiently GSIMN uses the Triangle theorem, also introduced in this work, which is a simplified version of the set of Markov axioms. Experimental comparisons on artificial and real-world data sets show GSIMN can yield significant savings with respect to GSMN*, while generating a Markov network with comparable or in some cases improved quality. We also compare GSIMN to a forward-chaining implementation, called GSIMN-FCH, that produces all possible conditional independences resulting from repeatedly applying Pearls theorems on the known conditional independence tests. The results of this comparison show that GSIMN, by the sole use of the Triangle theorem, is nearly optimal in terms of the set of independences tests that it infers.

}, doi = { 10.1613/jair.2773}, url = {http://www.jair.org/papers/paper2773.html}, author = {Bromberg, Facundo and Margaritis, Dimitris and Honavar, Vasant} } @article {233, title = {Grapevine buds detection and localization in 3D space based on Structure from Motion and 2D image classification}, journal = {Computers in Industry}, volume = {99C }, year = {2018}, month = {04/2018}, pages = {303-312}, chapter = {303}, abstract = {

In viticulture, there are several applications where 3D bud detection and localization in vineyards is a necessary task susceptible to automation: measurement of sunlight exposure, autonomous pruning, bud counting, type-of-bud classification, bud geometric characterization, internode length, and bud development stage.\ This paper presents a workflow to achieve quality 3D localizations of grapevine buds based on well-known computer vision and machine learning algorithms when provided with images captured in natural field conditions (i.e., natural sunlight and the addition of no artificial elements), during the winter season and using a mobile phone RGB camera. Our pipeline combines the Oriented FAST and Rotated BRIEF (ORB) for keypoint detection, a Fast Local Descriptor for Dense Matching (DAISY) for describing the keypoint, and the Fast Approximate Nearest Neighbor (FLANN) technique for matching keypoints, with the Structure from Motion multi-view scheme for generating consistent 3D point clouds. Next, it uses a 2D scanning window classifier based on Bag of Features and Support Vectors Machine for classification of 3D points in the cloud. Finally, the Density-Based Spatial Clustering of Applications with Noise (DBSCAN) for 3D bud localization is applied. Our approach resulted in a maximum precision\ of 1.0\ (i.e., no false detections), a maximum recall\ of 0.45\ (i.e. 45\%\ of the buds detected), and a localization error within the range of 259-554\ pixels (corresponding to approximately 3\ bud diameters, or 1.5cm) when evaluated over the whole range of user-given parameters of workflow components.

}, keywords = {computer vision, Grapevine bud detection, Precision viticulture}, issn = {0166-3615}, doi = {10.1016/j.compind.2018.03.033}, url = {https://www.sciencedirect.com/science/article/pii/S0166361517304815}, author = {Diaz, Carlos Ariel and P{\'e}rez, Diego Sebasti{\'a}n and Miatello, Humberto and Bromberg, Facundo} } @article {172, title = {Improving the reliability of causal discovery from small data sets using argumentation}, journal = {The Journal of Machine Learning Research}, volume = {10}, year = {2009}, month = {02/2009}, pages = {301{\textendash}340}, abstract = {

We address the problem of improving the reliability of independence-based causal discovery algorithms that results from the execution of statistical independence tests on small data sets, which typically have low reliability. We model the problem as a knowledge base containing a set of independence facts that are related through Pearl{\textquoteright}s well-known axioms. Statistical tests on finite data sets may result in errors in these tests and inconsistencies in the knowledge base. We resolve these inconsistencies through the use of an instance of the class of defeasible logics called argumentation, augmented with a preference function, that is used to reason about and possibly correct errors in these tests. This results in a more robust conditional independence test, called an argumentative independence test. Our experimental evaluation shows clear positive improvements in the accuracy of argumentative over purely statistical tests. We also demonstrate significant improvements on the accuracy of causal structure discovery from the outcomes of independence tests both on sampled data from randomly generated causal models and on real-world data sets.

}, keywords = {argumentation, causal Bayesian netw orks, Independence-based causal discovery, reliability improvement, Structure learning}, url = {http://www.jmlr.org/papers/v10/bromberg09a.html}, author = {Bromberg, Facundo and Margaritis, Dimitris} } @article {224, title = {Peach: Predicting frost events in peach orchards using iot technology}, journal = {EAI Endorsed Transactions on the Internet of Things}, year = {2016}, abstract = {

In 2013, 85\% of the peach production in the Mendoza region (Argentina) was lost because of frost. In a couple of hours, farmers can lose everything. Handling a frost event is possible, but it is hard to predict when it is going to happen. The goal of the PEACH project is to predict frost events by analyzing measurements from sensors deployed around an orchard. This article provides an in-depth description of a complete solution we designed and deployed: the low-power wireless network and the back-end system. The low-power wireless network is composed entirely of commercial o -the-shelf devices. We develop a methodology for deploying the network and present the open-source tools to assist with the deployment and to monitor the network. The deployed low-power wireless mesh network is 100\% reliable, with end-to-end latency below 2 s, and over 3 years of battery lifetime. This article discusses how the technology used is the right one for precision agriculture applications.

}, doi = {http://dx.doi.org/10.4108/eai.1-12-2016.151711}, author = {Watteyne, Thomas and Diedrichs, Ana Laura and Brun-Laguna, Keoma and Chaar, Javier Emilio and Dujovne, Diego and Taffernaberry, Juan Carlos and Mercado, Gustavo} } @article {256, title = {Towards practical 2D grapevine bud detection with fully convolutional networks}, journal = {Computers and Electronics in Agriculture}, volume = {182}, year = {2021}, month = {03/2021}, pages = {105947}, abstract = {

In Viticulture, visual inspection of the plant is a necessary task for measuring relevant variables. In many cases, these visual inspections are susceptible to automation through computer vision methods. Bud detection is one such visual task, central for the measurement of important variables such as: measurement of bud sunlight exposure, autonomous pruning, bud counting, type-of-bud classification, bud geometric characterization, internode length, bud area, and bud development stage, among others. This paper presents a computer method for grapevine bud detection based on a Fully Convolutional Networks MobileNet architecture (FCN-MN). To validate its performance, this architecture was compared in the detection task with a strong method for bud detection, Scanning Windows (SW) based on a patch classifier, showing improvements over three aspects of detection: segmentation, correspondence identification and localization. The best version of FCN-MN showed a detection F1-measure of 88.6\% (for true positives defined as detected components whose intersection-over-union with the true bud is above 0.5), and false positives that are small and near the true bud. Splits {\textendash}false positives overlapping the true bud{\textendash} showed a mean segmentation precision of 89.3\%(21.7), while false alarms {\textendash}false positives not overlapping the true bud{\textendash} showed a mean pixel area of only 8\% the area of a true bud, and a distance (between mass centers) of 1.1 true bud diameters. The paper concludes by discussing how these results for FCN-MN would produce sufficiently accurate measurements of bud variables such as bud number, bud area, and internode length, suggesting a good performance in a practical setup.

}, keywords = {computer vision, Fully convolutional network, Grapevine bud detection, Precision viticulture}, issn = {0168-1699}, doi = {https://doi.org/10.1016/j.compag.2020.105947}, url = {https://www.sciencedirect.com/science/article/pii/S0168169920331525}, author = {Wenceslao Villegas Marset and Diego Sebastian Perez and Carlos Ariel D{\'\i}az and Facundo Bromberg} } @mastersthesis {175, title = {Markov networks structure discovery using independence tests}, volume = {Doctor of Philosophy}, year = {2007}, pages = {182}, school = {Iowa State University}, address = {Ames, IA, USA}, abstract = {

We investigate efficient algorithms for learning the structure of a Markov network from
data using the independence-based approach. Such algorithms conduct a series of conditional
independence tests on data, successively restricting the set of possible structures until there is
only a single structure consistent with the outcomes of the conditional independence tests exe-
cuted (if possible). As Pearl has shown, the instances of the conditional independence relation
in any domain are theoretically interdependent, made explicit in his well-known conditional
independence axioms. The first couple of algorithms we discuss, GSMN and GSIMN, exploit
Pearl{\textquoteright}s independence axioms to reduce the number of tests required to learn a Markov network.
This is useful in domains where independence tests are expensive, such as cases of very large
data sets or distributed data. Subsequently, we explore how these axioms can be exploited to
{\textquotedblleft}correct{\textquotedblright} the outcome of unreliable statistical independence tests, such as in applications where
little data is available. We show how the problem of incorrect tests can be mapped to inference
in inconsistent knowledge bases, a problem studied extensively in the field of non-monotonic
logic. We present an algorithm for inferring independence values based on a sub-class of non-
monotonic logics: the argumentation framework. Our results show the advantage of using our
approach in the learning of structures, with improvements in the accuracy of learned networks
of up to 20\%. As an alternative to logic-based interdependence among independence tests,
we also explore probabilistic interdependence. Our algorithm, called PFMN, takes a Bayesian
particle filtering approach, using a population of Markov network structures to maintain the
posterior probability distribution over them given the outcomes of the tests performed. The
result is an approximate algorithm (due to the use of particle filtering) that is useful in domains
where independence tests are expensive.

}, isbn = {9780549334941}, url = {http://lib.dr.iastate.edu/rtd/15575/}, author = {Bromberg, Facundo and Margaritis, Dimitris} } @unpublished {255, title = {Deep Learning for 2D Grapevine Bud Detection}, year = {2020}, url = {https://arxiv.org/abs/2008.11872}, author = {Villegas Marset, Wenceslao and P{\'e}rez, Diego S and D{\'\i}az, Carlos A and Bromberg, Facundo} }