Conclude AI chapter
This commit is contained in:
parent
7285832779
commit
41c6409ebe
|
@ -8,6 +8,7 @@
|
||||||
#+PANDOC_OPTIONS: csl:assets/ieee.csl
|
#+PANDOC_OPTIONS: csl:assets/ieee.csl
|
||||||
#+PANDOC_OPTIONS: pdf-engine:xelatex
|
#+PANDOC_OPTIONS: pdf-engine:xelatex
|
||||||
#+PANDOC_OPTIONS: top-level-division:chapter
|
#+PANDOC_OPTIONS: top-level-division:chapter
|
||||||
|
#+PANDOC_OPTIONS: mathjax:t
|
||||||
#+PANDOC_METADATA: link-citations:t
|
#+PANDOC_METADATA: link-citations:t
|
||||||
* Resumen
|
* Resumen
|
||||||
|
|
||||||
|
@ -114,13 +115,27 @@ Una neurona artificial es un modelo de una neurona biológica, cada neurona reci
|
||||||
#+NAME: fig:artificial-neuron
|
#+NAME: fig:artificial-neuron
|
||||||
[[./assets/figures/artificial-neuron.png]]
|
[[./assets/figures/artificial-neuron.png]]
|
||||||
|
|
||||||
Una red neuronal artificial (ANN) es una red de capas de neuronas artificiales. Una ANN está formada por una capa de entrada, capas ocultas y una capa de salida. Las neuronas de una capa están conectadas, total o parcialmente, a las neuronas de la capa siguiente. También son posibles las conexiones de retroalimentación con las capas anteriores. La estructura típica de una ANN es la siguiente:
|
El proceso de activación se puede expresar como un modelo matemático:
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
y= f \left(\sum\limits_{i=0}^{n} w_{i}x_{i} - T \right)
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
donde $y$ es la salida del nodo, $f$ es la función de activación, $w_i$ es el peso de la entrada $x_{i}$ , y $T$ es el valor del umbral. cite:Zou2009
|
||||||
|
|
||||||
|
Una red neuronal artificial (ANN) es una red de capas de neuronas artificiales. Una ANN está formada por una capa de entrada, capas ocultas y una capa de salida. Las neuronas de una capa están conectadas, total o parcialmente, a las neuronas de la capa siguiente. También son posibles las conexiones de retroalimentación con las capas anteriores. cite:book:80129 La estructura típica de una ANN es la siguiente:
|
||||||
|
|
||||||
|
\clearpage
|
||||||
|
|
||||||
#+CAPTION: Estructura de una red neuronal artificial cite:book:80129
|
#+CAPTION: Estructura de una red neuronal artificial cite:book:80129
|
||||||
#+ATTR_HTML: :height 30% :width 50%
|
#+ATTR_HTML: :height 30% :width 50%
|
||||||
#+NAME: fig:neural-network
|
#+NAME: fig:neural-network
|
||||||
[[./assets/figures/neural-network.png]]
|
[[./assets/figures/neural-network.png]]
|
||||||
|
|
||||||
|
Los principios básicos de las redes neuronales artificiales fueron formulados por primera vez en 1943, y el perceptrón, que históricamente es posiblemente la primera neurona artificial, se propuso en 1958. cite:book:2610592 Sin embargo, estos modelos no fueron populares hasta mediados de la década de 1980, cuando se reinventó el algoritmo de /back-propagation/. cite:book:771224
|
||||||
|
|
||||||
|
En la actualidad, los avances tanto en potencia de cálculo del /hardware/, especialmente en las tarjetas gráficas (GPU) cite:Cireşan2010, como la disponibilidad de grandes /datasets/ cite:book:771224 han dado lugar al /Deep Learning/.
|
||||||
|
|
||||||
** Hacia el Deep Learning
|
** Hacia el Deep Learning
|
||||||
* Estado del arte
|
* Estado del arte
|
||||||
** Bioinformática
|
** Bioinformática
|
||||||
|
|
BIN
Dissertation.pdf
BIN
Dissertation.pdf
Binary file not shown.
|
@ -561,7 +561,7 @@
|
||||||
year = 2010,
|
year = 2010,
|
||||||
series = {Prentice Hall Series in Artificial Intelligence},
|
series = {Prentice Hall Series in Artificial Intelligence},
|
||||||
edition = {3rd},
|
edition = {3rd},
|
||||||
pages = {38-45, 55-56}
|
pages = {38-45, 48-49, 55-56}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{McCarthy_Minsky_Rochester_Shannon_2006,
|
@article{McCarthy_Minsky_Rochester_Shannon_2006,
|
||||||
|
@ -599,3 +599,66 @@
|
||||||
edition = 2,
|
edition = 2,
|
||||||
pages = {39-40}
|
pages = {39-40}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Inbook{Zou2009,
|
||||||
|
author = "Zou, Jinming and Han, Yi and So, Sung-Sau",
|
||||||
|
editor = "Livingstone, David J.",
|
||||||
|
title = "Overview of Artificial Neural Networks",
|
||||||
|
bookTitle = "Artificial Neural Networks: Methods and Applications",
|
||||||
|
year = 2009,
|
||||||
|
publisher = "Humana Press",
|
||||||
|
address = "Totowa, NJ",
|
||||||
|
pages = "14--22",
|
||||||
|
abstract = "The artificial neural network (ANN), or simply neural
|
||||||
|
network, is a machine learning method evolved from the idea of
|
||||||
|
simulating the human brain. The data explosion in modern drug
|
||||||
|
discovery research requires sophisticated analysis methods to
|
||||||
|
uncover the hidden causal relationships between single or
|
||||||
|
multiple responses and a large set of properties. The ANN is
|
||||||
|
one of many versatile tools to meet the demand in drug
|
||||||
|
discovery modeling. Compared to a traditional regression
|
||||||
|
approach, the ANN is capable of modeling complex nonlinear
|
||||||
|
relationships. The ANN also has excellent fault tolerance and
|
||||||
|
is fast and highly scalable with parallel processing. This
|
||||||
|
chapter introduces the background of ANN development and
|
||||||
|
outlines the basic concepts crucially important for
|
||||||
|
understanding more sophisticated ANN. Several commonly used
|
||||||
|
learning methods and network setups are discussed briefly at
|
||||||
|
the end of the chapter.",
|
||||||
|
isbn = "978-1-60327-101-1",
|
||||||
|
doi = "10.1007/978-1-60327-101-1_2",
|
||||||
|
url = "https://doi.org/10.1007/978-1-60327-101-1_2"
|
||||||
|
}
|
||||||
|
|
||||||
|
@book{book:2610592,
|
||||||
|
title = {Principles of artificial neural networks},
|
||||||
|
author = {Graupe, Daniel},
|
||||||
|
publisher = {World Scientific Publ},
|
||||||
|
isbn = {9789814522731,9814522732},
|
||||||
|
year = 2013,
|
||||||
|
edition = {3. ed},
|
||||||
|
pages = {28-31}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Article{Cireşan2010,
|
||||||
|
author = {Cire{\c{s}}an, Dan Claudiu and Meier, Ueli and Gambardella,
|
||||||
|
Luca Maria and Schmidhuber, J{\"u}rgen},
|
||||||
|
title = {Deep, Big, Simple Neural Nets for Handwritten Digit
|
||||||
|
Recognition},
|
||||||
|
journal = {Neural Computation},
|
||||||
|
year = 2010,
|
||||||
|
month = {Dec},
|
||||||
|
day = 01,
|
||||||
|
volume = 22,
|
||||||
|
number = 12,
|
||||||
|
pages = {3207-3220},
|
||||||
|
abstract = {Good old online backpropagation for plain multilayer
|
||||||
|
perceptrons yields a very low 0.35{\%} error rate on the MNIST
|
||||||
|
handwritten digits benchmark. All we need to achieve this best
|
||||||
|
result so far are many hidden layers, many neurons per layer,
|
||||||
|
numerous deformed training images to avoid overfitting, and
|
||||||
|
graphics cards to greatly speed up learning.},
|
||||||
|
issn = {0899-7667},
|
||||||
|
doi = {10.1162/NECO_a_00052},
|
||||||
|
url = {https://doi.org/10.1162/NECO_a_00052}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue