<?xml version="1.0" encoding="UTF-8"?><?xml-model type="application/xml-dtd" href="https://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "https://jats.nlm.nih.gov/publishing/1.3/JATS-journalpublishing1-3.dtd">
<article xmlns:ali="http://www.niso.org/schemas/ali/1.0/" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" specific-use="Marcalyc 1.3" dtd-version="1.3" article-type="research-article" xml:lang="en">
<front>
<journal-meta>
<journal-id journal-id-type="index">3442</journal-id>
<journal-title-group>
<journal-title specific-use="original" xml:lang="es">TecnoLógicas</journal-title>
</journal-title-group>
<issn pub-type="ppub">0123-7799</issn>
<issn pub-type="epub">2256-5337</issn>
<publisher>
<publisher-name>Instituto Tecnológico Metropolitano</publisher-name>
<publisher-loc>
<country>Colombia</country>
<email>tecnologicas@itm.edu.co</email>
</publisher-loc>
</publisher>
</journal-meta>
<article-meta>
<article-id pub-id-type="art-access-id" specific-use="redalyc">344281653008</article-id>
<article-id pub-id-type="doi">10.22430/22565337.3220</article-id>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Investigación</subject>
</subj-group>
</article-categories>
<title-group>
<article-title xml:lang="en">Image Processing for Laser Impact Detection in  Shooting Simulators</article-title>
<trans-title-group>
<trans-title xml:lang="es">Procesamiento de imágenes para la detección de un impacto láser en simuladores de tiro</trans-title>
</trans-title-group>
</title-group>
<contrib-group>
<contrib contrib-type="author" corresp="no">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-5592-3027</contrib-id>
<name name-style="western">
<surname>García Torres</surname>
<given-names>José Antonio</given-names>
</name>
<xref ref-type="aff" rid="aff1"/>
<email>jose.garciato@esmic.edu.co</email>
</contrib>
<contrib contrib-type="author" corresp="no">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-4329-565X</contrib-id>
<name name-style="western">
<surname>Guzmán Pérez</surname>
<given-names>Daniel</given-names>
</name>
<xref ref-type="aff" rid="aff2"/>
<email>daniel.guno@esmic.edu.co</email>
</contrib>
<contrib contrib-type="author" corresp="no">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-4626-4365</contrib-id>
<name name-style="western">
<surname>Rincón Morantes</surname>
<given-names>Jhon Fredy</given-names>
</name>
<xref ref-type="aff" rid="aff3"/>
<email>jhon.rincon@esmic.edu.co</email>
</contrib>
<contrib contrib-type="author" corresp="yes">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-7926-1822</contrib-id>
<name name-style="western">
<surname>Molina Martínez</surname>
<given-names>Daniel Felipe</given-names>
</name>
<xref ref-type="corresp" rid="corresp1"/>
<xref ref-type="aff" rid="aff4"/>
<email>daniel.molinam@esmic.edu.co</email>
</contrib>
<contrib contrib-type="author" corresp="no">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0001-7351-8132</contrib-id>
<name name-style="western">
<surname>García Rodríguez</surname>
<given-names>Cristian Camilo</given-names>
</name>
<xref ref-type="aff" rid="aff5"/>
<email>cristhian.garcia@esmic.edu.co</email>
</contrib>
<contrib contrib-type="author" corresp="no">
<contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-6179-0101</contrib-id>
<name name-style="western">
<surname>Zamudio Palacios</surname>
<given-names>Jhonnatan Eduardo</given-names>
</name>
<xref ref-type="aff" rid="aff6"/>
<email>jhonatan.zamudio@esmic.edu.co</email>
</contrib>
</contrib-group>
<aff id="aff1">
<institution content-type="original">José Antonio García Torres; Orcid: 0000-0002-5592-3027 Escuela Militar de Cadetes General José María Córdova jose.garciato@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<aff id="aff2">
<institution content-type="original">Daniel Guzmán Pérez; Orcid: 0000-0002-4329-565X Escuela Militar de Cadetes General José María Córdova daniel.guno@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<aff id="aff3">
<institution content-type="original">Jhon Fredy Rincón Morantes; Orcid: 0000-0002-4626-4365 Escuela Militar de Cadetes General José María Córdova. jhon.rincon@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<aff id="aff4">
<institution content-type="original">Daniel Felipe Molina Martínez; Orcid: 0000-0002-7926-1822 Correspondence: daniel.molinam@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<aff id="aff5">
<institution content-type="original">Cristian Camilo García Rodríguez; Orcid: 0000-0001-7351-8132 Escuela Militar de Cadetes General José María Córdova cristhian.garcia@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<aff id="aff6">
<institution content-type="original">Jhonnatan Eduardo Zamudio Palacios; Orcid: 0000-0002-6179-0101 Escuela Militar de Cadetes General José María Córdova jhonatan.zamudio@esmic.edu.co</institution>
<country country="CO">Colombia</country>
<institution-wrap>
<institution content-type="orgname">Escuela Militar de Cadetes General José María Córdova</institution>
</institution-wrap>
</aff>
<author-notes>
<corresp id="corresp1">
<email> daniel.molinam@esmic.edu.co</email>
</corresp>
</author-notes>
<pub-date pub-type="epub-ppub">
<season>April-June</season>
<year>2025</year>
</pub-date>
<volume>28</volume>
<issue>62</issue>
<fpage>1</fpage>
<lpage>21</lpage>
<history>
<date date-type="received" publication-format="dd mes yyyy">
<day>30</day>
<month>08</month>
<year>2024</year>
</date>
<date date-type="accepted" publication-format="dd mes yyyy">
<day>04</day>
<month>03</month>
<year>2025</year>
</date>
<date date-type="pub" publication-format="dd mes yyyy">
<day>31</day>
<month>03</month>
<year>2025</year>
</date>
</history>
<permissions>
<copyright-statement>los datos personales incluidos en la presente publicación son propiedad de sus titulares quienes autorizan que los mismos sean tratados conforme lo indica la política de tratamiento de datos del ITM  en su Resolución 395 de 2014, como «Políticas para el tratamiento y la protección de datos personales», disponible en su sitio web. Particularmente y para efecto de mediciones y reporte de producción científica, estos datos serán tratados en consonancia con las leyes vigentes en la materia, especialmente la Ley 1581 de 2012 de Colombia y podrán ser compartidos para efectos estadísticos, de medición y en función de las actividades propias de la misión institucional del ITM.</copyright-statement>
<copyright-year>2018</copyright-year>
<copyright-holder>Institución Universitaria ITM</copyright-holder>
<ali:free_to_read/>
<license xlink:href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
<ali:license_ref>https://creativecommons.org/licenses/by-nc-sa/4.0/</ali:license_ref>
<license-p>Esta obra está bajo una Licencia Creative Commons Atribución-NoComercial-CompartirIgual 4.0 Internacional.</license-p>
</license>
</permissions>
<self-uri content-type="html" xlink:href="https://revistas.itm.edu.co/index.php/tecnologicas/article/view/3220">https://revistas.itm.edu.co/index.php/tecnologicas/article/view/3220</self-uri>
<abstract xml:lang="en">
<title>Abstract</title>
<p>Simulation systems play a crucial role in firearms training by offering advantages such as the progressive improvement of shooting skills, reduced logistical costs, ammunition savings, and decreased need for personnel deployment to shooting ranges. A common feature of current systems is the use of wired communication between components, which ensures stability but introduces latency in data transmission. Moreover, wired setups limit their use in outdoor environments due to the lack of access to a power source. This study developed an image-processing-based method to replace live ammunition with a laser-emitting device. The methodology was structured in four phases: (1) system requirements analysis, (2) hardware and software development, (3) system integration with a real firearm, and (4) functional testing in both controlled and open environments. The system incorporates an automatic calibration mechanism that adapts to ambient lighting to ensure accuracy. When the trigger is pulled, the laser activates and projects onto an LCD screen; a camera captures the impact, and an integrated system detects the (x, y) coordinates. As a result, the prototype achieved an accuracy of 95.4% with latency under 80 ms. In conclusion, a portable, wireless system was designed, adaptable to various lighting conditions, consisting of 10 lanes with components specifically designed to integrate with a real firearm—offering a versatile and efficient alternative for training purposes.</p>
</abstract>
<trans-abstract xml:lang="es">
<title>Resumen</title>
<p>Los sistemas de simulación desempeñan un papel crucial en el entrenamiento de tiro, al ofrecer ventajas como la mejora progresiva de las habilidades del tirador, reducción de costos logísticos, ahorro de munición y menor necesidad de despliegue de personal a los polígonos de tiro. Un rasgo común en los sistemas actuales es el uso de comunicación por cable entre componentes, lo cual proporciona estabilidad, pero introduce latencia en la transmisión de datos. Además, las configuraciones cableadas limitan su uso en entornos exteriores por la falta de acceso a una fuente de energía. Este estudio desarrolló un método basado en procesamiento de imágenes para reemplazar la munición real por un dispositivo emisor láser. La metodología se estructuró en cuatro fases: (1) análisis de requisitos del sistema, (2) desarrollo de hardware y software, (3) integración del sistema con un arma de fuego real y (4) pruebas funcionales en ambientes controlados y abiertos. El sistema incorpora un mecanismo de calibración automática que se adapta a la iluminación ambiental para garantizar precisión. Al accionar el gatillo, el láser se activa y proyecta sobre una pantalla LCD; una cámara captura el impacto y un sistema integrado detecta las coordenadas (x, y). Como resultado, el prototipo alcanzó una precisión del 95.4 %, con una latencia inferior a 80 ms. En conclusión, se diseñó un sistema portátil, inalámbrico y adaptable a distintas condiciones de luz, compuesto por 10 pistas con componentes diseñados para integrarse con un arma de fuego real, como alternativa versátil y eficiente para el entrenamiento.</p>
</trans-abstract>
<kwd-group xml:lang="en">
<title>Keywords</title>
<kwd>Embedded systems</kwd>
<kwd>image processing</kwd>
<kwd>lasers</kwd>
<kwd>shooting range</kwd>
<kwd>simulation systems</kwd>
</kwd-group>
<kwd-group xml:lang="es">
<title>Palabras clave</title>
<kwd>Sistemas embebidos</kwd>
<kwd>procesamiento de imágenes</kwd>
<kwd>láseres</kwd>
<kwd>polígono de tiro</kwd>
<kwd>sistemas de simulación</kwd>
</kwd-group>
<counts>
<fig-count count="10"/>
<table-count count="2"/>
<equation-count count="5"/>
<ref-count count="64"/>
</counts>
<custom-meta-group>
<custom-meta>
<meta-name>How to cite / Cómo citar</meta-name>
<meta-value>J. A. García Torres, D. Guzmán Pérez, J. F. Rincón Morantes, D. F. Molina Martínez, C. C. García Rodríguez, and J. E. Zamudio Palacios, “Image Processing for Laser Impact Detection in Shooting Simulators,” <italic>TecnoLógicas</italic>, vol. 28, no. 62, e3220, 2025. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.22430/22565337.3220">https://doi.org/10.22430/22565337.3220</ext-link>
</meta-value>
</custom-meta>
</custom-meta-group>
<custom-meta-group>
<custom-meta>
<meta-name>redalyc-journal-id</meta-name>
<meta-value>3442</meta-value>
</custom-meta>
</custom-meta-group>
</article-meta>
</front>
<body>
<sec>
<title>
<bold>Highlights</bold>
</title>
<p>
<list list-type="simple">
<list-item>
<p>Sistema portátil de simulación de tiro basado en detección láser con procesamiento de imágenes.</p>
<p>El método LID-ESMIC (Laser Impact Detection - Escuela Militar de Cadetes) permite detectar impactos láser en un polígono de tiro con precisión mejorada.</p>
<p>El sistema inalámbrico optimiza el entrenamiento militar sin necesidad de munición real.</p>
<p>Incorpora corrección automática por luz ambiental para garantizar la precisión del impacto.</p>
<p>El diseño modular permite escalar el sistema hasta 10 subsistemas con monitoreo en tiempo real. </p>
</list-item>
</list>
</p>
</sec>
<sec>
<title>
<bold>Highlights</bold>
</title>
<p>
<list list-type="simple">
<list-item>
<p>Portable Shooting Simulation System Based on Laser Detection with Image Processing.</p>
<p>The LID-ESMIC method (Laser Impact Detection – Military Cadet School) enables the detection of laser impacts on a shooting range with improved accuracy. </p>
<p>The wireless system enhances military training without the need for live ammunition. </p>
<p>It includes automatic ambient light correction to ensure impact accuracy. </p>
<p>The modular design allows the system to scale up to 10 subsystems with real-time monitoring. </p>
</list-item>
</list>
</p>
</sec>
<sec sec-type="intro">
<title>
<bold>1. INTRODUCTION</bold>
</title>
<p>The National Army of Colombia, in collaboration with military training academies, is enhancing the military education of future service members, particularly in the field of marksmanship, using innovative simulation technologies and training support systems. These advances aim to improve skills, optimize time and resources, and address key concerns such as ammunition conservation during shooting practice, the additional costs associated with live-fire training, and the increased risk of accidents.</p>
<p>Therefore, the development of a portable system to facilitate rapid shooting practice is justified, integrating as a complementary tool within rigorous training with the Galil 5.56 mm rifle. This system is envisioned as a new resource to reinforce marksmanship training, serving as an additional asset within a comprehensive instructional program to ensure the progressive development of students' skills and practices.</p>
<p>Furthermore, the primary objective of this work is to present the development of the technology implemented in the LID-ESMIC, providing a foundation for future research aimed at assessing the impact of simulator-based training on shooters' performance and their physiological and psychological responses under controlled conditions.</p>
<p>This work proposes a laser impact detection method called “LID-ESMIC,” implemented in a portable shooting practice system known as the Portable Laser Polygon (PLP). In this system, real ammunition is replaced by a laser device that impacts a digital target with Liquid Crystal Display (LCD) technology. These impact signals are captured and stored using image processing techniques to precisely determine the silhouette of the impact point.</p>
<p>The National Army of Colombia operates Instruction, Training, and Retraining Battalions (BITER), with at least one of these battalions present in each of the Army’s divisions and its 26 brigades <xref ref-type="bibr" rid="redalyc_344281653008_ref1">[1]</xref>-<xref ref-type="bibr" rid="redalyc_344281653008_ref3">[3]</xref>. However, the availability of shooting ranges within these units is limited, preventing all soldiers, including officers and non-commissioned officers, from regularly participating in retraining exercises. This limitation underscores the need to develop alternative and complementary systems that improve both the frequency and quality of marksmanship training within the institution <xref ref-type="bibr" rid="redalyc_344281653008_ref4">[4]</xref>.</p>
<p>
<xref ref-type="fig" rid="gf1">Figure 1A</xref> shows one of the ten (10) subsystems of the PLP developed by the research group in Engineering and Simulation from the Military School of Cadets General José María Cordova <xref ref-type="bibr" rid="redalyc_344281653008_ref5">[5]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref6">[6]</xref>. Each subsystem is made up of a digital target and elements that are attached to the weapon (Galil ACE 23 rifle): i) a power supplier with a snap-in system for the rifle, ii) an electromechanical recoil system, and iii) a laser device adapted for assembly on the picatinny rail (<xref ref-type="fig" rid="gf1">Figure 1B</xref>). It is worth mentioning that the ten subsystems can be used simultaneously with administration software.</p>
<p>
<fig id="gf1">
<label>Figure 1.</label>
<caption>
<title>System parts. A) Views from the target of the Portable Laser Polygon, a red circle indicates the camera location used to capture images during a shooting practice. B) Elements arranged to the Galil ACE 23 rifle, (1) electromechanical recoil system, (2) trigger, (3) power supplier, (4) laser coupling, (5) headphones to provide auditory immersion during a shooting practice.</title>
</caption>
<alt-text>Figure 1.  System parts. A) Views from the target of the Portable Laser Polygon, a red circle indicates the camera location used to capture images during a shooting practice. B) Elements arranged to the Galil ACE 23 rifle, (1) electromechanical recoil system, (2) trigger, (3) power supplier, (4) laser coupling, (5) headphones to provide auditory immersion during a shooting practice.</alt-text>
<graphic xlink:href="344281653008_gf2.png" position="anchor" orientation="portrait">
<alt-text>Figure 1.  System parts. A) Views from the target of the Portable Laser Polygon, a red circle indicates the camera location used to capture images during a shooting practice. B) Elements arranged to the Galil ACE 23 rifle, (1) electromechanical recoil system, (2) trigger, (3) power supplier, (4) laser coupling, (5) headphones to provide auditory immersion during a shooting practice.</alt-text>
</graphic>
<attrib>Source: own elaboration.</attrib>
</fig>
</p>
<p>In general terms, the trajectory of a projectile is represented by a laser light beam, allowing the system to detect impact coordinates on the LCD screen. A calibrated camera continuously scans the target, capturing a set of three (3) images associated with the shooting instant <xref ref-type="bibr" rid="redalyc_344281653008_ref7">[7]</xref>. Once the images are captured, the system detects the laser impact coordinates (x, y) and transmits them via a wireless communication protocol (802.11 bg) for further analysis and evaluation.</p>
<p>At an international level, various laser shooting simulation systems with image processing have been developed. However, these systems present limitations that affect their accuracy and applicability in military training. Recent research has analyzed human performance in virtual reality shooting simulators and their potential application in military training <xref ref-type="bibr" rid="redalyc_344281653008_ref72">[8]</xref>. However, in the study mentioned<xref ref-type="bibr" rid="redalyc_344281653008_ref9"> [9]</xref>, these systems rely on video games with limited interaction with visual and auditory stimuli, making it difficult to faithfully replicate real combat and training conditions.</p>
<p>Unlike these approaches, LID-ESMIC not only integrates an impact detection system but also incorporates an electromechanical recoil mechanism, providing a more realistic shooting experience using a real rifle. Furthermore, its portable design allows the system to adapt to any real-world scenario, ensuring its implementation in various operational environments without the need for a fixed infrastructure <xref ref-type="bibr" rid="redalyc_344281653008_ref72">[8]</xref>.</p>
<p>Other studies have explored how brightness and contrast levels in virtual reality simulations can induce motion sickness <xref ref-type="bibr" rid="redalyc_344281653008_ref10">[10] </xref>and the implementation of physiological measurements in realistic shoot/no-shoot simulations<xref ref-type="bibr" rid="redalyc_344281653008_ref11"> [11]</xref>. The Laser Shot Simulator, widely used in military and law enforcement training, is based on sensors placed on the screen or a weapon for impact detection, which limits its adaptability to different training conditions <xref ref-type="bibr" rid="redalyc_344281653008_ref11">[11]</xref>. Similarly, other systems such as the Laser Ammo Smokeless Range 2.0 and the IPN Laser Capture Shooting Simulator present constraints in haptic feedback and the fidelity of real shooting <xref ref-type="bibr" rid="redalyc_344281653008_ref12">[12]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref13">[13]</xref>. Recent research has explored improvements in impact detection through advanced computer vision algorithms <xref ref-type="bibr" rid="redalyc_344281653008_ref14">[14]</xref>, multisensory feedback training systems <xref ref-type="bibr" rid="redalyc_344281653008_ref9">[9],</xref> and predictive models to optimize shot detection<xref ref-type="bibr" rid="redalyc_344281653008_ref15"> [15]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref16">[16]</xref>. Additionally, studies have analyzed the relationship between virtual reality and military shooting training<xref ref-type="bibr" rid="redalyc_344281653008_ref17"> [17]</xref>, the impact of brightness and contrast variation in shooting simulations<xref ref-type="bibr" rid="redalyc_344281653008_ref73"> [18]</xref>, and the use of embedded cameras and image processing techniques to detect laser impacts in shooting simulators with high accuracy<xref ref-type="bibr" rid="redalyc_344281653008_ref19"> [19]</xref>. Augmented reality training systems and optoelectronic techniques have also been developed to improve laser impact detection in simulators <xref ref-type="bibr" rid="redalyc_344281653008_ref20">[20]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref21">[21]</xref>. Furthermore, some studies have evaluated training systems based on low-cost motion sensors <xref ref-type="bibr" rid="redalyc_344281653008_ref12">[12]</xref> and the development of training pistols with laser simulation <xref ref-type="bibr" rid="redalyc_344281653008_ref13">[13]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref22">[22]</xref>.</p>
<p>LID-ESMIC aims to overcome these limitations by integrating computer vision and an electromechanical recoil system, providing a more realistic shooting experience with the Galil ACE 23 rifle. Unlike other simulators that require controlled lighting conditions to ensure effective laser detection, studies have shown that natural light can affect accuracy in some systems<xref ref-type="bibr" rid="redalyc_344281653008_ref14"> [14]</xref>. LID-ESMIC has been designed to operate under various lighting conditions without compromising its performance, making it more adaptable for use in various operating environments. In contrast to more modern systems that require robust and stable infrastructure, LID-ESMIC has been specifically designed for implementation in training and retraining battalions, where practicality and rapid deployment are essential. Many of these units operate as mobile battalions deployed in rural areas for strategic missions. Due to the nature of these operations, they lack the infrastructure necessary to install large-scale simulation systems, as their primary mission is to maintain operational mobility <xref ref-type="bibr" rid="redalyc_344281653008_ref10">[10]</xref>.</p>
<p>While other simulation systems may require large screens, high-performance workstations, projectors, or advanced development engines to emulate realistic environments, LID-ESMIC stands out for its operational simplicity and ease of deployment. Its design enables instructors and shooters to conduct shooting practice without relying on fixed infrastructure, ensuring both safety and efficiency in training. Moreover, unlike other reviewed studies, which require dark rooms to ensure shooting effectiveness due to the interference of natural light with laser detection, LID-ESMIC has been developed to function under varying lighting conditions, allowing its use in open spaces without compromising detection accuracy. This represents a significant advantage compared to more complex solutions, which may be challenging to implement in mobile and constantly shifting military environments.</p>
<p>Image processing requires high-performance computing, which means having good memory and processing resources. The Raspberry Pi 3 B Model board was selected as the central process unit, in charge of detecting the laser impact. It has a Quad-Core 1.2 GHz Broadcom BCM2837 64-bit Central Process Unit, 1 GB RAM, BCM43438 wireless LAN, and the Operating System Raspbian GNU/Linux. It can be programmed using Python language <xref ref-type="bibr" rid="redalyc_344281653008_ref23">[23]</xref>. Inside the hardware it is included a specific connector for the Raspberry camera <xref ref-type="bibr" rid="redalyc_344281653008_ref24">[24]</xref>, which is specially designed to operate with this device, the configuration, operation and algorithms were developed with the OpenCV artificial vision library, and the embedded system design was based on general principles for the construction of customized images, as described in the specialized literature<xref ref-type="bibr" rid="redalyc_344281653008_ref25"> [25]</xref>,<xref ref-type="bibr" rid="redalyc_344281653008_ref26"> [26]</xref>.</p>
<p>In the following section, a general methodology is described, and the most relevant technical characteristics of the hardware are shown. The result section will provide the algorithms implemented for laser impact detection. Then, a discussion is made considering the current technology implemented by the Colombian National Army. Finally, the main conclusion will be presented for future references to improve the proposed method.</p>
</sec>
<sec sec-type="methods">
<title>
<bold>2. METHODOLOGY</bold>
</title>
<p>The portable laser range system is based on the development of an electronic prototype capable of emulating the shooting exercises performed by Colombian soldiers or security and defense professionals. Therefore, this project corresponds to applied research, as its objective is grounded in the implementation of technical concepts such as electronics, Wi-Fi communication, and image processing, with the aim of recreating safe scenarios that facilitate the training and instruction processes of shooting ranges. Consequently, research can be defined as having an experimental approach <xref ref-type="bibr" rid="redalyc_344281653008_ref27">[27]</xref>, as it is not based on theory or qualitative models, but rather employs existing systems for the development of new technology that contributes significantly to practical training <xref ref-type="bibr" rid="redalyc_344281653008_ref28">[28]</xref>.</p>
<p>Research focused on designing and building a device capable of adapting to any terrain and weather conditions, since end users are security professionals who frequently perform their duties in remote areas, often far from cities or regions with limited access to the Internet and electrical power from distribution networks. For this reason, the PLR (Portable Laser Range) became an essential tool, capable of ensuring the continuity and consistency of training sessions without relying on an external power source or communication systems. This enables the implementation of a simulation device that meets validation procedures and has been tested by experts in the field <xref ref-type="bibr" rid="redalyc_344281653008_ref29">[29]</xref>.</p>
<p>During project development, four phases were considered aligned with the main research objective (see <xref ref-type="fig" rid="gf2">Figure 2</xref>). These phases were defined as follows: (1) Study of device requirements, (2) Electronic and software development, (3) Adaptation of the components developed to the weapon, and (4) Functionality testing.</p>
<p>
<fig id="gf2">
<label>Figure 2.</label>
<caption>
<title>Methodology block diagram</title>
</caption>
<alt-text>Figure 2. Methodology block diagram</alt-text>
<graphic xlink:href="344281653008_gf3.png" position="anchor" orientation="portrait">
<alt-text>Figure 2. Methodology block diagram</alt-text>
</graphic>
<attrib>Source: own elaboration.</attrib>
</fig>
</p>
<p>In the first stage, a review of current technologies and a consultation with experts was conducted to identify key differentiators that would ensure the system's optimal performance and adaptability to the needs of locally employed exercises. This was essential due to the necessity of conducting exercises in open fields and under complex weather conditions. Additionally, technical variables were considered according to regulations and the knowledge of experienced professionals.</p>
<p>In phase 2, based on the captured data, the design and construction of electronic components and software were started. A system was used to enable communication between the laser and the target <xref ref-type="bibr" rid="redalyc_344281653008_ref30">[30]</xref>, with a maximum range of 800 meters. Power sources were devised to ensure the execution of the exercises, and simultaneously, software development and image processing were carried out to capture data via screens that serve as shooting silhouettes <xref ref-type="bibr" rid="redalyc_344281653008_ref74">[31]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref32">[32]</xref>.</p>
<p>In stage 3, the developed components shown in <xref ref-type="fig" rid="gf1">Figure 1</xref> were adapted; the aim is for the user to become accustomed to the weight, control, and reaction of the weapon so that, when exposed to a real scenario, they have the experience of operating the actual components. The implementation of these mechanisms seeks to approximate the user and the instructor to situations as realistic as possible in a controlled environment. Finally, in phase 4, entire system tests were conducted in both closed and open environments to verify its proper functioning <xref ref-type="bibr" rid="redalyc_344281653008_ref33">[33]</xref>. In addition, the device was ensured to meet the standards and regulations for firearm training practices <xref ref-type="bibr" rid="redalyc_344281653008_ref34">[34]</xref>. Furthermore, the operation of the electronic components and software was verified to ensure that the device did not encounter any issues when performing a shooting range test.</p>
<p>In general terms, the LID-ESMIC algorithm is made up of the following stages:</p>
<p>
<list list-type="simple">
<list-item>
<p>1. Calibration of the camera lens: The camera includes a variable focal lens, which should be adjusted. To make the adjustment, a grid pattern was designed to precisely focus on the image.</p>
<p>2. Calibration of the digital target: The characteristics of the camera and its corresponding lens cause the captured image to support two types of distortion: tangential and radial. It is possible to find physical parameters that produce this type of distortion and transform the images to be as real as possible.</p>
<p>3. Image processing: Once the trigger is pulled, three images are captured. After that, processing techniques are applied to adapt and detect the center of the laser impact, where the acquisition of coordinates (x, y) of the laser impact in the calibrated area is acquired. If in any case the laser beam does not hit the target, the central processing unit (Raspberry PI 3 B Model) will send a “null” coordinate, which means that the screen was not affected.</p>
</list-item>
</list>
</p>
<p>To ensure accurate laser impact detection, calibrating the target was a crucial step. Initially, the camera captured two images at a resolution of 480x640 pixels: one of a fully black screen and another with a reference pattern. These images were processed to identify the working area by applying corrections for radial and tangential distortions. Subsequently, a Contrast-Limited Adaptive Histogram Equalization (CLAHE) <xref ref-type="bibr" rid="redalyc_344281653008_ref30">[30]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref35">[35]</xref> was applied to enhance contrast and edge detection. The Harris corner detection algorithm was used to locate the inner corners of the reference pattern, ensuring a precise calibration of the target <xref ref-type="bibr" rid="redalyc_344281653008_ref36">[36]</xref>. If the calibration failed, the system performed up to five iterations to obtain acceptable results. This calibration process proved robust against varying light conditions (200–400 lumens/m²) <xref ref-type="bibr" rid="redalyc_344281653008_ref37">[37]</xref>.</p>
<p>Once the trigger is activated, three frames are captured at 25 ms intervals using a 640x480 resolution. The images are converted from the RGB color space to HSV, isolating red hues (H = 0-25, H = 330-359) to detect the laser impact. Morphological operations, such as erosion and dilation, refine the shape of the impact, and the centroid is determined for the coordinates (x, y) <xref ref-type="bibr" rid="redalyc_344281653008_ref15">[15]</xref>. The Raspberry Pi 3 B Model processes these data and transmits them over Wi-Fi 802.11ac in a structured format that includes the identification of the shot, the target number, and the address <xref ref-type="bibr" rid="redalyc_344281653008_ref38">[38]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref39">[39]</xref>.</p>
<p>LID-ESMIC was developed using Python language, specifically to be used with a Raspberry Pi 3 B Model board with Raspbian OS (GNU/Linux), using a “Raspberry Pi-Camera”. The Python language offers the advantage of being a multiplatform interpreted language <xref ref-type="bibr" rid="redalyc_344281653008_ref40">[40]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref41">[41]</xref>. The hardware required to achieve laser impact detection on a target is shown in <xref ref-type="fig" rid="gf2">Figure 2</xref>.</p>
<p>
<bold>2.1  Raspberry Pi Camera</bold>
</p>
<p>The Raspberry Pi Camera is a module <xref ref-type="fig" rid="gf3">(Figure 3A)</xref> designed to be connected to the Raspberry Pi via a specific serial interface connector. The camera has an 8-megapixel Sony IMX 219 with fixed lens. It allows taking static pictures of 2592 x 1944 pixels and it is compatible with the following video format: 1080 p – 30 fps, 720 p – 60 fps, 640 x 480 p – 90 fps <xref ref-type="bibr" rid="redalyc_344281653008_ref42">[42]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref43">[43]</xref>.</p>
<p>The Raspberry Pi Camera v2 is integrated in a board size (25 mm x 20 mm x 9 mm), and weighs just over 3 g, making it perfect for mobiles and other applications where weight and size are important <xref ref-type="bibr" rid="redalyc_344281653008_ref44">[44]</xref>.</p>
<p>
<bold>2.2  Raspberry Pi</bold>
</p>
<p>The Raspberry Pi is an 85 x 56 mm minicomputer <xref ref-type="fig" rid="gf3">Figure 3A</xref> and 3B based on an ARM processor. The Raspberry Pi 3 model B has 1 GB ram and a quad core 1.2 GHz Broadcom BCM2837 64-bit CPU processor <xref ref-type="bibr" rid="redalyc_344281653008_ref4">[4]</xref>. It has four USB ports, an Ethernet port, and a BCM43438 wireless LAN. The module is equipped with an HDMI output for the interface with a screen monitor, a microSD slot, up to 40 general purpose Input/Output pins, and a switched microUSB power input up to 2.5 A <xref ref-type="bibr" rid="redalyc_344281653008_ref45">[45]</xref>.</p>
<p>
<fig id="gf3">
<label>Figure 3.</label>
<caption>
<title>A) Raspberry) Pi-Camera. B) Raspberry Pi 3 Model B. C). Laser Beamshot 1000. D) Screen Gechic 1303H 13.3 "resolution 1920x1080 with HDMI.</title>
</caption>
<alt-text>Figure 3.  A) Raspberry) Pi-Camera. B) Raspberry Pi 3 Model B. C). Laser Beamshot 1000. D) Screen Gechic 1303H 13.3 "resolution 1920x1080 with HDMI.</alt-text>
<graphic xlink:href="344281653008_gf4.png" position="anchor" orientation="portrait">
<alt-text>Figure 3.  A) Raspberry) Pi-Camera. B) Raspberry Pi 3 Model B. C). Laser Beamshot 1000. D) Screen Gechic 1303H 13.3 "resolution 1920x1080 with HDMI.</alt-text>
</graphic>
<attrib>Source.       <xref ref-type="bibr" rid="redalyc_344281653008_ref46">[46]</xref>.</attrib>
</fig>
</p>
<p>
<bold>2.3  Laser</bold>
</p>
<p>A Beamshot 1000 laser was used as shown in<xref ref-type="fig" rid="gf3"> Figure 3C</xref>. Dimensions are 69 mm x 19 mm, a wavelength of 650 nm (645 ~ 665 nm) / at 455 m, and a dot size of 12.7 mm at 9.11 m, and 102 mm at 91.11 m <xref ref-type="bibr" rid="redalyc_344281653008_ref47">[47]</xref>.</p>
<p>
<bold>2.4  Screen</bold>
</p>
<p>A GeChic 1303H (<xref ref-type="fig" rid="gf3">Figure 3D</xref>) was used to show the silhouette in which the shooting practice is performed. The screen has a size of 13.3" TFT IPS LCD (16:09), a resolution of 1920 x 108 / 16.7 million colors (antiglare) with HDMI, VGA input, mini-DP, a weight time response of 14 ms typical of the system, and a 1080p HDMI video format (60 Hz / 50 Hz), 1080i (60 Hz / 50 Hz), 720 p (60 Hz / 50 Hz), with a power supply of 5 V / 2 A with micro-USB input <xref ref-type="bibr" rid="redalyc_344281653008_ref48">[48]</xref>.</p>
</sec>
<sec>
<title>
<bold>3. RESULTS AND DISCUSSION</bold>
</title>
<p>This section presents results related to the calibration and image processing stages of the proposed method.</p>
<p>
<bold>3.1  Lens calibration</bold>
</p>
<p>In general terms, the economic advantages of the current cameras are in contravention of the relative distortion of the image, which can be compensated with a lens calibration procedure <xref ref-type="bibr" rid="redalyc_344281653008_ref75">[49]</xref>. Radial and tangential factors are considered to correct the distortion. Radial distortion is shown in “barrel” or “fisheye” effects <xref ref-type="bibr" rid="redalyc_344281653008_ref50">[50]</xref>. The radial factor correction uses <xref ref-type="disp-formula" rid="e1">(1)</xref> and <xref ref-type="disp-formula" rid="e2">(2)</xref>, the position (x, y) of one point of the non-corrected image is transformed, and its position in the corrected image is determined by 𝑋<sub>𝑐𝑜𝑟𝑟𝑒𝑐𝑡𝑒𝑑</sub> 𝑌<sub>𝑐𝑜𝑟𝑟𝑒𝑐𝑡𝑒𝑑</sub>.</p>
<p>
<disp-formula id="e1">
<label>(1)</label>
<graphic xlink:href="344281653008_ee5.png" position="anchor" orientation="portrait">
<alt-text/>
</graphic>
</disp-formula>
</p>
<p>
<disp-formula id="e2">
<label>(2)</label>
<graphic xlink:href="344281653008_ee6.png" position="anchor" orientation="portrait">
<alt-text/>
</graphic>
</disp-formula>
</p>
<p>The taking of non-perfectly parallel images to the image plane produces tangential distortion, which can be corrected with <xref ref-type="disp-formula" rid="e6">(3)</xref> and <xref ref-type="disp-formula" rid="e4">(4)</xref>.</p>
<p>
<disp-formula id="e6">
<label>(3)</label>
<graphic xlink:href="344281653008_ee7.png" position="anchor" orientation="portrait">
<alt-text/>
</graphic>
</disp-formula>
</p>
<p>
<disp-formula id="e4">
<label>(4)</label>
<graphic xlink:href="344281653008_ee8.png" position="anchor" orientation="portrait">
<alt-text/>
</graphic>
</disp-formula>
</p>
<p>Where: [k1, k2, k3, p1, p2]: are the distortion parameters. The conversion of units can be made with<xref ref-type="disp-formula" rid="e5"> (5)</xref>.</p>
<p>
<disp-formula id="e5">
<label>(5)</label>
<graphic xlink:href="344281653008_ee9.png" position="anchor" orientation="portrait">
<alt-text/>
</graphic>
</disp-formula>
</p>
<p>Where 𝑓<sub>𝑥′</sub> 𝑓<sub>𝑦</sub> are the focal distances in the horizontal axis (x) and vertical (y), and 𝑐<sub>𝑥′</sub> 𝑐<sub>𝑦</sub> are the optical centers expressed in pixel coordinates for each axis.</p>
<p>If a common focal distance is used for both axes, then 𝑓<sub>𝑦</sub>= 𝑓<sub>𝑥</sub> ∗ 𝑎. The matrix that contains these four parameters is called the camera matrix<xref ref-type="bibr" rid="redalyc_344281653008_ref51"> [51]</xref>. While the distortion coefficients are the same regardless of the camera resolution, these coefficients must be scaled according to the calibrated resolution <xref ref-type="bibr" rid="redalyc_344281653008_ref76">[52]</xref>.</p>
<p>The calibration aims to determine the distortion parameters and the unit conversion matrix <xref ref-type="bibr" rid="redalyc_344281653008_ref53">[53]</xref>. The calculation of the entire set of parameters can be carried out using basic geometric equations implemented in the OpenCV library <xref ref-type="disp-formula" rid="e6">(3)</xref> and depends on the calibration pattern <xref ref-type="bibr" rid="redalyc_344281653008_ref54">[54]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref26">[26]</xref>. The chess board pattern designed for camera calibration has 10 columns and 5 rows. Initially, snapshots of the selected reference pattern must be captured. Each pattern found results in a new equation, and a predetermined number of pattern snapshots are needed to form an adequate equation system <xref ref-type="bibr" rid="redalyc_344281653008_ref55">[55]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref56">[56]</xref>. Although theoretically only two (02) images of the calibration pattern may suffice, practical scenarios often involve image noise and distortion, which affect accuracy. Therefore, it is advisable to acquire no fewer than ten (10) images of the chessboard from varied perspectives and positions to enhance the reliability and precision of the calibration process <xref ref-type="bibr" rid="redalyc_344281653008_ref56">[56]</xref>.</p>
<p>
<bold>3.2  Target calibration</bold>
</p>
<p>The laser impact detection on the screen requires a calibrated camera, to detect the laser impact coordinates in the most accurate possible area. <xref ref-type="fig" rid="gf4">Figure 4</xref> shows the image with the white reference contour used as a pattern in the calibration procedure.</p>
<p>
<fig id="gf4">
<label>Figure 4.</label>
<caption>
<title>The reference pattern, the white contour, and the red corners are delimited to detect.</title>
</caption>
<alt-text>Figure 4.  The reference pattern, the white contour, and the red corners are delimited to detect.</alt-text>
<graphic xlink:href="344281653008_gf5.png" position="anchor" orientation="portrait">
<alt-text>Figure 4.  The reference pattern, the white contour, and the red corners are delimited to detect.</alt-text>
</graphic>
<attrib>Source: own work.</attrib>
</fig>
</p>
<p>The algorithm implemented demonstrated its effectiveness in precisely detecting the inner white corners of the calibration pattern <xref ref-type="fig" rid="gf4">(Figure 4)</xref>. The system captured and processed two high-resolution pixels (480 x 640), with a 4:9 aspect relation and a RGB plane color (Red, Green, Blue) <xref ref-type="bibr" rid="redalyc_344281653008_ref57">[57]</xref>. Images, one of a completely black screen and another containing a reference pattern, were used to establish a calibration framework. A transition time of 500 ms between both captures ensured optimal contrast differentiation. The difference between the two images enabled the generation of a reference image, which was subsequently processed to correct radial and tangential distortions. This adjustment maintained geometric fidelity, preventing alterations to the captured data. Finally, a grayscale conversion was performed, optimizing computational efficiency in subsequent processing steps.</p>
<p>After grayscale conversion, Contrast-Limited Adaptive Histogram Equalization (CLAHE) <xref ref-type="bibr" rid="redalyc_344281653008_ref30">[30]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref35">[35]</xref> was applied to improve local contrast and edge definition within the image. By segmenting the image into multiple sections and applying individual histograms, CLAHE dynamically redistributed brightness values, optimizing visibility in different regions of the reference pattern. Subsequently, bilinear interpolation was employed to remove artifacts at the edges of the calibration templates, further refining the overall accuracy of the impact detection algorithm.</p>
<p>The preprocessing stage successfully enhanced the image contrast, enabling a more precise detection of the reference pattern's inner corners. The Harris corner detection algorithm<xref ref-type="bibr" rid="redalyc_344281653008_ref36"> [36]</xref>. effectively identified these key points within the calibration pattern, ensuring accurate alignment. However, initial tests revealed the presence of extraneous points that did not correspond to the expected inner corners, requiring the implementation of a thresholding mechanism to refine the detection. By dynamically adjusting this threshold, the system consistently distinguished valid calibration points, optimizing the overall accuracy of the setup.</p>
<p>To further improve the efficiency of the detection process, the image was segmented into four equal sections (320x240 pixels). Each section generated individual templates of ten pixels per side with embedded five-pixel overlays, as illustrated in <xref ref-type="fig" rid="gf5">Figure 5</xref>. Then, a correlation analysis was performed within each section to locate the best matching pattern. In cases where multiple potential matches were found, the system selected the one with the highest correlation score, ensuring a robust and precise calibration process. This segmentation strategy significantly reduced processing time while maintaining detection reliability, reinforcing the adaptability of the system under varying operational conditions.</p>
<p>
<fig id="gf5">
<label>Figure 5.</label>
<caption>
<title>Templates generated to determine the pattern of the reference corners</title>
</caption>
<alt-text>Figure 5. Templates generated to determine the pattern of the reference corners</alt-text>
<graphic xlink:href="344281653008_gf6.png" position="anchor" orientation="portrait">
<alt-text>Figure 5. Templates generated to determine the pattern of the reference corners</alt-text>
</graphic>
<attrib>Source: own work.</attrib>
</fig>
</p>
<p>To improve the accuracy of laser impact detection, the Gaussian defocus algorithm was applied to minimize noise interference. This process used a linear filter to smooth the values of the pixels, ensuring that the output reflected a weighted sum of the input pixels <xref ref-type="bibr" rid="redalyc_344281653008_ref50">[50]</xref>. By reducing noise artifacts, the system effectively improved image quality, leading to a clearer and more reliable identification of impact points. This optimization significantly improved the consistency of the results under different lighting conditions.</p>
<p>In cases where the initial calibration was unsuccessful, the software automatically performed up to five recalibration attempts until a high-quality acquisition was achieved. This iterative process ensured that detected edges were correctly aligned and stored as a reference for subsequent laser impact detection. The ability of the system to self-adjust minimized errors and ensure repeatability across different training environments.</p>
<p>
<xref ref-type="fig" rid="gf6">Figure 6</xref> illustrates the target calibration process under standard lighting conditions (200–400 lumens/m²). The system initially captured two reference images: a fully black screen <xref ref-type="fig" rid="gf6">(Figure 6A)</xref> and an image containing the calibration pattern <xref ref-type="fig" rid="gf6">(Figure 6B)</xref>. By computing the difference between these two images <xref ref-type="fig" rid="gf6">(Figure 6C)</xref>, the algorithm extracted the key features necessary for accurate distortion correction. <xref ref-type="fig" rid="gf6">Figures 6D</xref> and <xref ref-type="fig" rid="gf6">6E</xref> present the results of applying radial and tangential distortion corrections, followed by the CLAHE filter to enhance image contrast. The final binarized image <xref ref-type="fig" rid="gf6">(Figure 6F) </xref>delineated the working area, ensuring precise detection of laser impact. While there were minor noise artifacts, they did not significantly affect the overall accuracy of the calibration process <xref ref-type="bibr" rid="redalyc_344281653008_ref37">[37]</xref>.</p>
<p>
<fig id="gf6">
<label>Figure 6. A)</label>
<caption>
<title>Photograph taken with the Raspberry Pi Cam without the reference shape of the dot. B) Photograph taken with the Raspberry Pi Cam with the reference shape of the dot. C) Difference between the images taken. D) Image with correction for radial and tangential distortion. E) Binarization and application of the CLAHE filter. F) The reference frame is highlighted in green, which indicates that the detection procedure of the work area considered has been successfully calibrated.</title>
</caption>
<alt-text>Figure 6. A)  Photograph taken with the Raspberry Pi Cam without the reference shape of the dot. B) Photograph taken with the Raspberry Pi Cam with the reference shape of the dot. C) Difference between the images taken. D) Image with correction for radial and tangential distortion. E) Binarization and application of the CLAHE filter. F) The reference frame is highlighted in green, which indicates that the detection procedure of the work area considered has been successfully calibrated.</alt-text>
<graphic xlink:href="344281653008_gf7.png" position="anchor" orientation="portrait">
<alt-text>Figure 6. A)  Photograph taken with the Raspberry Pi Cam without the reference shape of the dot. B) Photograph taken with the Raspberry Pi Cam with the reference shape of the dot. C) Difference between the images taken. D) Image with correction for radial and tangential distortion. E) Binarization and application of the CLAHE filter. F) The reference frame is highlighted in green, which indicates that the detection procedure of the work area considered has been successfully calibrated.</alt-text>
</graphic>
<attrib>Source: own work.</attrib>
</fig>
</p>
<p>
<bold>3.3  Image processing</bold>
</p>
<p>Initially, three photograms were captured at 25 ms intervals with 640 x 480 resolution. Once the trigger is activated, a command is sent to the target for camera activation from the power supplier <xref ref-type="fig" rid="gf1">(Figure 1b, 3)</xref> via the 802.11ac Wi-Fi protocol <xref ref-type="bibr" rid="redalyc_344281653008_ref38">[38]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref39">[39]</xref> The beam light comes on 40 ms after the trigger is pulled and has an on duration of 5 ms.</p>
<p>The camera detects the beam light in two (02) possible scenarios depending on the laser impact location: first) in a unique photogram; or second) the impact covers two (02) photograms, as shown in <xref ref-type="fig" rid="gf7">Figure 7</xref>, in this case the detection algorithm considers only the first photogram in which the laser impacts the LCD screen, avoiding to analyze beam light trajectories.</p>
<p>
<fig id="gf7">
<label>Figure 7.</label>
<caption>
<title>Camera events of laser detection. “d” corresponds to the duration of each photogram, the capture of each frame represented in time, a red bar represents the moment in which the laser impact is captured and has an activation time of 5 ms</title>
</caption>
<alt-text>Figure 7. Camera events of laser detection. “d” corresponds to the duration of each photogram, the capture of each frame represented in time, a red bar represents the moment in which the laser impact is captured and has an activation time of 5 ms</alt-text>
<graphic xlink:href="344281653008_gf8.png" position="anchor" orientation="portrait">
<alt-text>Figure 7. Camera events of laser detection. “d” corresponds to the duration of each photogram, the capture of each frame represented in time, a red bar represents the moment in which the laser impact is captured and has an activation time of 5 ms</alt-text>
</graphic>
<attrib>Source: own work.</attrib>
</fig>
</p>
<p>In this stage, methods to detect the laser impact coordinates (x, y) are implemented, and the following class was developed. Image transformation methods from the RGB plane to the HSV (Hue Saturation Value) are implemented. The hue (H) ranges are selected to focus on the red color (H=0-25, H=330-359). Due to the hue red values in the two (02) sections, an OR operation must be done to create one unique mask. The image is binarized in such way that the red color is represented as white (laser impact), and the rest of the components are given as black. Morphological operations (erosion and dilation) are applied to highlight the structure of the laser impact. Finally, the contour of impact detection is drawn, and the coordinates are determined as the centroid <xref ref-type="bibr" rid="redalyc_344281653008_ref32">[32]</xref>.</p>
<p>The laser impact contour is shown in <xref ref-type="fig" rid="gf8">Figure 8A</xref>; once the contour is delimited, it is used to determine the centroid (<xref ref-type="fig" rid="gf8">Figures 8B</xref> and <xref ref-type="fig" rid="gf8">8C</xref>). Finally, the target processing unit (Raspberry Pi model 3b) transmits the data frame via Wi-Fi 802.11ac with the information about detection in the following format: (center in X; center in Y; shot identification; target number; target address). <xref ref-type="fig" rid="gf9">Figure 9</xref> shows the laser impact detection during a shooting practice.</p>
<p>
<fig id="gf8">
<label>Figure 8.</label>
<caption>
<title>Laser impact detection. A) Laser impact contour detection. B) Magnification of the point of the detected laser impact (red color), a yellow contour, and its blue centroid. C) Segmented laser impact detection. It should be noted that when performing a shooting practice, these images are not shown on the LCD screen. They are only used in debug mode</title>
</caption>
<alt-text>Figure 8. Laser impact detection. A) Laser impact contour detection. B) Magnification of the point of the detected laser impact (red color), a yellow contour, and its blue centroid. C) Segmented laser impact detection. It should be noted that when performing a shooting practice, these images are not shown on the LCD screen. They are only used in debug mode</alt-text>
<graphic xlink:href="344281653008_gf9.png" position="anchor" orientation="portrait">
<alt-text>Figure 8. Laser impact detection. A) Laser impact contour detection. B) Magnification of the point of the detected laser impact (red color), a yellow contour, and its blue centroid. C) Segmented laser impact detection. It should be noted that when performing a shooting practice, these images are not shown on the LCD screen. They are only used in debug mode</alt-text>
</graphic>
<attrib>Source: own elaboration.</attrib>
</fig>
</p>
<p>
<fig id="gf9">
<label>Figure 9.</label>
<caption>
<title>Capture of laser impact detection over a given shape</title>
</caption>
<alt-text>Figure 9.  Capture of laser impact detection over a given shape</alt-text>
<graphic xlink:href="344281653008_gf10.png" position="anchor" orientation="portrait">
<alt-text>Figure 9.  Capture of laser impact detection over a given shape</alt-text>
</graphic>
<attrib>Source: own elaboration.</attrib>
</fig>
</p>
<p>The detection from a sample of fifty (50) laser shots at a simulated distance of 20 m, considering scale 1: is presented in <xref ref-type="table" rid="gt4">Table 1</xref>. The detected coordinates by the proposed method (Xs and Ys) correspond in all cases to visual inspection.</p>
<p>According to the results, the military instructor can evaluate the shooters accurately since in the silhouette and table of coordinates the point where the laser beam hit simulating a projectile is fixed. As is well known, these silhouettes have different scores around the image, which is why the aim of the shooter is to hit the center of the figure or parts where the highest score is given.</p>
<p>The PLP system, being a controlled environment, can instruct military personnel in different shooting techniques, because it does not expose the physical integrity of the personnel and does not generate over costs for the training of such practices, therefore, by identifying the points of impact by the method used (Xs,Ys) guarantees an objective and efficient evaluation, where the shooters develop their skills under the constant training and feedback reviewed by the instructors.</p>
<p>Finally, the laser impact detection algorithm demonstrated an average accuracy of 95.6 % under controlled lighting conditions (200–400 lumens/m²). At a simulated shooting distance of 20 meters, the system maintained a deviation of ±3 pixels on the x-axis and ±4 pixels on the y-axis <xref ref-type="table" rid="gt4">(Table 1)</xref>.</p>
<p>
<table-wrap id="gt4">
<label>Table 1</label>
<caption>
<title>Detection of laser impact coordinates</title>
</caption>
<alt-text>Table 1  Detection of laser impact coordinates</alt-text>
<alternatives>
<graphic xlink:href="344281653008_gt5.png" position="anchor" orientation="portrait">
<alt-text>Table 1  Detection of laser impact coordinates</alt-text>
</graphic>
<table style="width:254.05pt;border-collapse:collapse;  " id="gt5-526564616c7963">
<thead style="display:none;">
<tr style="display:none;">
<th style="display:none;"/>
</tr>
</thead>
<tbody>
<tr style="height:14.15pt">
<td style="border-top:solid windowtext 1.0pt;   border-left:none;border-bottom:solid windowtext 1.0pt;border-right:none;      padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt" colspan="6">Distance = 5m<bold/>
</td>
</tr>
<tr style="height:14.15pt">
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Shot</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Xs</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Ys</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Shot</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Xs</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;      padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">Ys</td>
</tr>
<tr style="height:14.15pt">
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">1</td>
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">386.133</td>
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">259.466</td>
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">26</td>
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">219.733</td>
<td style="border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">208.799</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">2</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">354.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">525.866</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">27</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">381.866</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">342.933</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">3</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">386.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">497.066</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">28</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">430.933</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">259.199</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">4</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">288.533</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">138.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">29</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">426.666</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">187.199</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">5</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">174.933</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">442.399</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">30</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">356.266</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">244.533</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">6</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">198.400</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">436.533</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">31</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">458.667</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">456.533</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">7</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">477.866</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">249.333</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">32</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">403.200</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">461.600</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">8</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">328.533</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">599.999</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">33</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">189.867</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">615.200</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">9</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">369.066</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">367.999</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">34</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">514.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">287.733</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">10</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">394.666</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">246.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">35</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">445.867</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">202.667</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">11</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">409.600</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">576.800</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">36</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">437.333</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">208.533</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">12</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">337.067</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">506.400</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">37</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">313.600</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">346.667</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">13</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">364.800</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">556.800</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">38</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">254.933</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">176.000</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">14</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">373.333</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">151.200</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">39</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">277.333</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">544.800</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">15</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">401.067</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">293.867</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">40</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">371.200</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">595.733</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">16</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">354.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">522.667</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">41</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">420.266</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">238.933</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">17</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">266.667</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">240.267</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">42</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">335.733</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">396.799</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">18</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">354.133</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">563.733</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">43</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">320.000</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">622.933</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">19</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">366.933</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">219.200</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">44</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">199.733</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">272.533</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">20</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">337.067</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">178.667</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">45</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">301.600</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">402.933</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">21</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">381.866</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">178.666</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">46</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">471.466</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">556.266</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">22</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">396.800</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">537.599</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">47</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">217.600</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">243.466</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">23</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">232.533</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">244.533</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">48</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">230.400</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">274.933</td>
</tr>
<tr style="height:14.15pt">
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">24</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">209.866</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">495.466</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">49</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">251.733</td>
<td style="padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">356.266</td>
</tr>
<tr style="height:14.15pt">
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">25</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">390.933</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">238.666</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">50</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">234.666</td>
<td style="border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:14.15pt">183.466</td>
</tr>
</tbody>
</table>
</alternatives>
<attrib>Source: own elaboration.</attrib>
</table-wrap>
</p>
<p>A total of 50 laser shots were analyzed to validate the detection system, ensuring that the coordinates (x, y) corresponded precisely to the expected impact locations. The algorithm correctly identified impact points within a 1 cm margin of error in more than 98 % of the cases.</p>
<p>Additionally, the system was tested in both indoor and outdoor environments to evaluate its robustness. In outdoor conditions with natural light, the detection accuracy remained above 92 %, demonstrating its adaptability to real-world operational scenarios. Unlike other systems that require controlled environments with artificial lighting to function optimally, the PLP system exhibited resilience under various lighting conditions.</p>
<p>During the validation phase, the electromechanical recoil simulation was assessed in terms of usability and realism. Test participants reported that the simulated recoil accurately replicated that of the Galil 5.56 mm rifle, contributing to a more immersive and realistic training experience. The feedback from the military instructors indicated that the system successfully replicated real-world shooting scenarios, allowing shooters to practice without live ammunition while maintaining an authentic training environment.</p>
<p>The usability of the system Graphical User Interface (GUI) <xref ref-type="fig" rid="gf10">(Figure 10) </xref>was also evaluated. The control interface allowed instructors to monitor shooting sessions in real time, track shooter performance, and generate detailed reports on shot accuracy and dispersion patterns. As shown in<xref ref-type="fig" rid="gf10"> Figure 10</xref>, the GUI displayed individual shooter statistics, impact positions on targets, and a summary of training sessions, ensuring an effective feedback mechanism for military personnel.</p>
<p>
<fig id="gf10">
<label>Figure 10.</label>
<caption>
<title>Graphic User Interface to administrate the PLP system of a line shot composed of ten (10) targets</title>
</caption>
<alt-text>Figure 10.  Graphic User Interface to administrate the PLP system of a line shot composed of ten (10) targets</alt-text>
<graphic xlink:href="344281653008_gf11.png" position="anchor" orientation="portrait">
<alt-text>Figure 10.  Graphic User Interface to administrate the PLP system of a line shot composed of ten (10) targets</alt-text>
</graphic>
<attrib>Source: own elaboration.</attrib>
</fig>
</p>
<p>These findings confirm that the PLP system provides a practical and effective alternative to traditional marksmanship training, reducing costs and safety risks while maintaining high-fidelity simulation capabilities. The portability of the system further enhances its applicability in remote training locations where the infrastructure is limited.</p>
<p>
<bold>3.4  Discussion</bold>
</p>
<p>Currently, the Military School of Cadets General José María Córdova has implemented the training system “BeamHit 460 Laser Marksmanship” <xref ref-type="bibr" rid="redalyc_344281653008_ref58">[58]</xref>, which is used in shooting practices for military personnel belonging to the unit, as well as personnel from other national army units. The LTMS provides a capacity for a shooting line consisting of one to ten targets, operated by a computer and an instructor, the practice record includes the generation of data such as time, score, and dispersion for each target.</p>
<p>
<list list-type="simple">
<list-item>
<p>460: System for precision shooting at simulated distances between 25-100 m (6.25 m to 25 m real) with the use of several silhouettes pre-established in the software, can be used with long and short weapons. This system is made up of a line of fire with 10 targets, managed by software to visualize the results.</p>
<p>· Mini Rets: System for reaction shot, using folding targets, made up of lines of 7 to 10 targets, it is possible to configure a maximum of 10 lines each with 7 to 10 targets, all managed by the same computer and software.</p>
<p>· Alt-C: Long weapon system for selected shooters, made up of a target with simulated silhouettes at distances of 50 meters, 100 meters, 150 meters, 200 meters, 250 meters and 300 meters, and administration software for the programming of exercises.</p>
<p>Sniper: Aimed at high-precision shooters, consisting of a suitcase,      several silhouettes, and administration software with ballistic      calculations and configuration of external variables such as humidity,      wind, direction, among others, allows you to exercise with simulated      distances of up to 1500 m.</p>
<p>Machinegun: System for shooting with machine guns made up of a      suitcase with several silhouettes and administration software for the      execution of the exercises.</p>
</list-item>
</list>
</p>
<p>A comparative analysis based on the current simulators described, and the developed system (PLP), is presented in <xref ref-type="table" rid="gt5">Table 2</xref>.</p>
<p>
<table-wrap id="gt5">
<label>Table 2</label>
<caption>
<title>Strength and weakness analysis of practice shooting simulators</title>
</caption>
<alt-text>Table 2  Strength and weakness analysis of practice shooting simulators</alt-text>
<alternatives>
<graphic xlink:href="344281653008_gt6.png" position="anchor" orientation="portrait">
<alt-text>Table 2  Strength and weakness analysis of practice shooting simulators</alt-text>
</graphic>
<table style="border-collapse:collapse;border:none;" id="gt6-526564616c7963">
<thead style="display:none;">
<tr style="display:none;">
<th style="display:none;"/>
</tr>
</thead>
<tbody>
<tr style="   height:14.15pt">
<td style="width:233.95pt;border-top:solid windowtext 1.0pt;   border-left:none;border-bottom:solid windowtext 1.0pt;border-right:none;      padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">Current Technology (Beamhit simulators)</td>
<td style="width:219.4pt;border-top:solid windowtext 1.0pt;   border-left:none;border-bottom:solid windowtext 1.0pt;border-right:none;      padding:0cm 5.4pt 0cm 5.4pt;height:14.15pt">Portable Laser Polygon (PLP)</td>
</tr>
<tr style="height:25.5pt">
<td style="width:233.95pt;border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:25.5pt">Continuous connection is required to the power grid supply</td>
<td style="width:219.4pt;border:none;   padding:0cm 5.4pt 0cm 5.4pt;height:25.5pt">Use of rechargeable batteries, allowing a portable system</td>
</tr>
<tr style="height:36.85pt">
<td style="width:233.95pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Electrostatic sensitivity filters are required in data cables.</td>
<td style="width:219.4pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">No data cables are used for communication, all communication is done through a wireless network</td>
</tr>
<tr style="height:36.85pt">
<td style="width:233.95pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Weapon and target are independent, and any target can get shots from any weapon, regardless of the one assigned.</td>
<td style="width:219.4pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Weapon and target are part of a synchronized system, which does not interfere with other users in the line shot.</td>
</tr>
<tr style="height:25.5pt">
<td style="width:233.95pt;padding:0cm 5.4pt 0cm 5.4pt;height:25.5pt">Silhouettes according to country of manufacture</td>
<td style="width:219.4pt;padding:0cm 5.4pt 0cm 5.4pt;height:25.5pt">Silhouettes according to the Colombian doctrine</td>
</tr>
<tr style="height:36.85pt">
<td style="width:233.95pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Manual configuration of the number of shots according to the shooting practice exercise is required</td>
<td style="width:219.4pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">The number of shots is set automatically according to the shooting practice exercise defined in the doctrine.</td>
</tr>
<tr style="height:36.85pt">
<td style="width:233.95pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Recoil by compressed air or CO<sub>2</sub>, which implies monthly expenses, or the simulators do not have this feature</td>
<td style="width:219.4pt;padding:0cm 5.4pt 0cm 5.4pt;height:36.85pt">Weapon recoil is achieved with electric DC motors (no external expenses)</td>
</tr>
<tr style="height:36.85pt">
<td style="width:233.95pt;border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:36.85pt">Maintenance and spare parts are imported</td>
<td style="width:219.4pt;border:none;border-bottom:solid windowtext 1.0pt;   padding:0cm 5.4pt 0cm 5.4pt;   height:36.85pt">Maintenance is carried out by Colombian Army personnel, and most spare parts are obtained in the national market</td>
</tr>
</tbody>
</table>
</alternatives>
<attrib>Source: own elaboration.</attrib>
</table-wrap>
</p>
<p>Verifying the information presented in the table, improvements can be observed regardless of simulators with similar characteristics found on the market. As for the administration software, it is possible to select the long weapon exercises established by the Colombian Army. On the other hand, it allows visualization of information about each target, battery levels, real-time results, and the generation of customized reports according to target, shooter, and exercise. <xref ref-type="fig" rid="gf9">Figure 9 </xref>shows the graphic user interface of the administration software.</p>
<p>The PLP system represents a significant advancement in military shooting training compared to other established simulators. Unlike the BeamHit 460 Laser Marksmanship <xref ref-type="bibr" rid="redalyc_344281653008_ref58">[58]</xref> and the VirTra V-100 <xref ref-type="bibr" rid="redalyc_344281653008_ref73">[18]</xref>,<xref ref-type="bibr" rid="redalyc_344281653008_ref59"> [59]</xref>, which require structured environments and fixed infrastructure, the PLP system introduces full portability, operating on rechargeable batteries and a locally functioning wireless Wi-Fi network, making it viable for open-field training without dependence on external power sources or internet connectivity <xref ref-type="bibr" rid="redalyc_344281653008_ref5">[5]</xref>.</p>
<p>Existing research highlights the importance of physiological monitoring to improve shooting accuracy and stress response during training. Studies on heart rate variability in shoot/don't-shoot scenarios indicate that physiological feedback in real time can improve decision making under stress <xref ref-type="bibr" rid="redalyc_344281653008_ref11">[11]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref60">[60]</xref>. However, widely used simulators, such as the VirTra series, do not integrate biometric tracking. The PLP system, due to its adaptable architecture, allows the possible integration of biometric sensors, paving the way for real-time stress assessment during field training <xref ref-type="bibr" rid="redalyc_344281653008_ref11">[11]</xref>, <xref ref-type="bibr" rid="redalyc_344281653008_ref61">[61]</xref>.</p>
<p>A contentious topic in marksmanship training is the efficacy of video game-based simulators. Although VR-based systems improve cognitive response and reaction times, they lack realistic recoil, weapon weight, and environmental factors crucial for military training <xref ref-type="bibr" rid="redalyc_344281653008_ref72">[8]</xref>. Unlike these VR-focused approaches, the PLP system incorporates electromechanical recoil simulation, providing an experience closer to live-fire training. Research on augmented reality-based shooting simulators suggests that while AR improves reaction speed, it still does not offer the tactile feedback and realism necessary for combat readiness<xref ref-type="bibr" rid="redalyc_344281653008_ref17"> [17]</xref>,<xref ref-type="bibr" rid="redalyc_344281653008_ref16"> [16]</xref>.</p>
<p>Another challenge with current simulators is maintenance and operational costs. Systems such as BeamHit and VirTra require imported and specialized components, increasing long-term costs <xref ref-type="bibr" rid="redalyc_344281653008_ref62">[62]</xref>. The PLP system addresses this by utilizing locally sourced spare parts and allowing maintenance by Colombian Army personnel, reducing costs and logistical complexities.</p>
<p>Environmental adaptability is another crucial factor in effective training. Studies on optical tracking in VR simulators indicate that lighting conditions significantly impact accuracy in laser-based training<xref ref-type="bibr" rid="redalyc_344281653008_ref20"> [20]</xref>. While many systems require controlled lighting, the PLP demonstrated consistent performance in diverse lighting conditions, maintaining over 92 % accuracy even in outdoor environments. Research on laser spot detection in shooting simulators further support the importance of enhanced optical tracking for training accuracy <xref ref-type="bibr" rid="redalyc_344281653008_ref12">[12]</xref>.</p>
<p>The debate over the effectiveness of different shooting simulators continues as emerging research introduces low-cost and optoelectronic-based solutions. For example, studies on optoelectronic tracking in shooting simulators show that precision in laser detection can be improved by improving image processing algorithms <xref ref-type="bibr" rid="redalyc_344281653008_ref20">[20]</xref>. Although these solutions improve tracking capabilities, they often lack real-world applicability due to infrastructure limitations. The PLP system mitigates these issues by incorporating optimized image processing and real-time wireless data transmission over a local Wi-Fi network <xref ref-type="bibr" rid="redalyc_344281653008_ref6">[6]</xref>. Additionally, recent developments in single-board computer-based shooting simulators suggest that cost-effective implementations can still achieve high accuracy levels, in accordance with the PLP approach <xref ref-type="bibr" rid="redalyc_344281653008_ref63">[63]</xref>,<xref ref-type="bibr" rid="redalyc_344281653008_ref64"> [64]</xref>.</p>
<p>Furthermore, previous research on augmented reality (AR) and virtual reality (VR) simulators emphasizes their ability to improve reaction time and decision making skills <xref ref-type="bibr" rid="redalyc_344281653008_ref17">[17]</xref>. However, a key limitation is the lack of tactile feedback and realistic weapon handling, which can hinder complete combat readiness. By integrating physical weapon components, simulated recoil, and real-world field conditions, the PLP ensures that training bridges the gap between virtual and live-fire exercises.</p>
<p>These findings establish the PLP system as a cost-effective, mobile, and highly adaptable alternative to traditional simulators. It enhances realism, field applicability, and affordability, making it a valuable tool for military training. Future research should focus on long-term performance evaluations and the integration of AI-driven biometric analysis to further refine the skill assessment of shooters.</p>
<p>By bridging the gap between virtual training and real-world firearm handling, the PLP system emerges as a pioneering approach to military marksmanship training, offering a comprehensive, field-deployable solution that meets modern training needs.</p>
<p>The PLP is a portable device that allows simulating polygon exercises in real time, with a minimum distance of 5 meters, optimizing the consumption of ammunition to perform this practice, this system is integrated by a hardware capable of capturing and transmitting real-time data of the exercise performance, it also has a software capable of running on in different operating systems being responsible for processing said data captured in the shooting practice, such information travels through a bidirectional wireless link with the ability to be encrypted, e.g. Bluetooth, radio frequency (RF), etc. It also has a calibration system and alerts to adjust lighting, where it is reported if conditions are adequate to perform exercises.</p>
<p>The PLP system is powered by rechargeable batteries, which allows polygon activities in open fields without the need to rely on a constant supply source. Batteries can be adapted to the rifle in the ammunition loading area so as not to lose the feeling of real handling and shooting.</p>
<p>These exercises can be performed individually or in parallel, generating a report of the score and points hit on the silhouette, thanks to the control system that includes a graphical interface where the instructor can monitor the tests. In this way, reports are generated individually or on all shooting ranges. One of the advantages is that it is possible to perform a simultaneous training in a shooting line made up of one or more ranges, which guarantees its simultaneous application in different scenarios.</p>
<p>For the implementation of the device, it is possible to make use of the Galil 5.56 mm, with four components that integrate the PLP shooting system. As described, the projectile supplier is replaced by the casing in the form of this piece to supply energy to the electronic devices. Then, the laser device is installed which will project the light beam on the silhouette, considering that this laser is linked to the trigger. Finally, the butt of the weapon is replaced by the electromechanical system to simulate the recoil at the time of shooting.</p>
<p>Once the elements for the practice are configured and ready, the control software is started for the administration of the exercises. Shooters are assigned to each target or jointly according to the training objectives. This system has indicators of shooting effectiveness; through a green light, where they indicate that they are available to perform the shot, this system has early alerts that indicate the battery level and time elapsed in the test.</p>
</sec>
<sec sec-type="conclusions">
<title>
<bold>4. CONCLUSIONS</bold>
</title>
<p>A laser impact detector ESMIC (LID-ESMIC) was developed, which is integrated on the prototype Portable Laser Polygon of the military school of the Military School of Cadets "General José María Córdova". The presented method proposes a lens calibration stage that is primarily responsible for performing correction of the radial and tangential distortion, thus ensuring an appropriate representation of the coordinates in the image being analyzed. The target calibration is an important component on the mobility factor of the PLP, calibration allows operation in different environments conditions. These processes are carried out automatically every time the target is turned on or a shooting practice is restarted.</p>
<p>In this shot simulator system model, an embedded camera is used to detect the laser impact on a certain silhouette, either by a shooting practice directive (Directive 300-7 National Army) or another specific standard. The camera is attached to the target, and the laser to the weapon. It uses a simple and effective image processing technique on a Raspberry Pi using Python OpenCV to detect and locate the red dot of the laser on the target with high precision and accuracy. We managed to detect the coordinates of the laser point on the white screen.</p>
</sec>
</body>
<back>
<ref-list>
<title>
<bold>REFERENCES</bold>
</title>
<ref id="redalyc_344281653008_ref1">
<label>[1]</label>
<mixed-citation publication-type="thesis">[1] J. A. Granados Ruiz, J. R. Pinzón Fontecha, “Modelo de evaluación para los niveles de instrucción de soldados en los batallones de instrucción y entrenamiento,” Tesis de grado, Escuela Superior de Guerra General Rafael Reyes Prieto, Bogotá, Colombia, 2010. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.esdegrepositorio.edu.co/handle/20.500.14205/2882">https://www.esdegrepositorio.edu.co/handle/20.500.14205/2882</ext-link>
</mixed-citation>
<element-citation publication-type="thesis">
<person-group person-group-type="author">
<name>
<surname>Granados Ruiz</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Pinzón Fontecha</surname>
<given-names>J. R.</given-names>
</name>
</person-group>
<source>Modelo de evaluación para los niveles de instrucción de soldados en los batallones de instrucción y entrenamiento</source>
<year>2010</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.esdegrepositorio.edu.co/handle/20.500.14205/2882">https://www.esdegrepositorio.edu.co/handle/20.500.14205/2882</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref2">
<label>[2]</label>
<mixed-citation publication-type="webpage">[2] J. S. Castellanos Masmela, and C. A. Martínez Puentes, <italic>Propuesta de implementación polígono para cursos de combate en el centro internacional de entrenamiento anfibio</italic> Santiago de Tolú, Sucre: Escuela de Formación de Infantería de Marina, 2023. <ext-link ext-link-type="uri" xlink:href="https://mindefensa.primo.exlibrisgroup.com/discovery/delivery/57MDN_INST:MDN/1237542240007231">https://mindefensa.primo.exlibrisgroup.com/discovery/delivery/57MDN_INST:MDN/1237542240007231</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<name>
<surname>Castellanos Masmela</surname>
<given-names>J. S.</given-names>
</name>
<name>
<surname>Martínez Puentes</surname>
<given-names>C. A.</given-names>
</name>
</person-group>
<source>Propuesta de implementación polígono para cursos de combate en el centro internacional de entrenamiento anfibio</source>
<year>2023</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://mindefensa.primo.exlibrisgroup.com/discovery/delivery/57MDN_INST:MDN/1237542240007231">https://mindefensa.primo.exlibrisgroup.com/discovery/delivery/57MDN_INST:MDN/1237542240007231</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref3">
<label>[3]</label>
<mixed-citation publication-type="webpage">[3] Ejército Nacional de Colombia, “Plan estratégico de transformación ejército del futuro 2042,” ejercito.mil.co Accessed: Feb. 11, 2024. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.ejercito.mil.co/">https://www.ejercito.mil.co/</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Ejército Nacional de Colombia</collab>
</person-group>
<source>Plan estratégico de transformación ejército del futuro 2042</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.ejercito.mil.co/">https://www.ejercito.mil.co/</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref4">
<label>[4]</label>
<mixed-citation publication-type="thesis">[4] G. H. Acosta Pedreros, and S. A. Guzman Jaimes, “Pertinencia de la implementación de las aulas de entrenamiento táctico simulado para los batallones de instrucción y entrenamiento,” Tesis de grado, Escuela Superior de Guerra General Rafael Reyes Prieto, Bogotá, Colombia, 2013. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://esdegrepositorio.edu.co/handle/20.500.14205/3302">https://esdegrepositorio.edu.co/handle/20.500.14205/3302</ext-link>
</mixed-citation>
<element-citation publication-type="thesis">
<person-group person-group-type="author">
<name>
<surname>Acosta Pedreros</surname>
<given-names>G. H.</given-names>
</name>
<name>
<surname>Guzman Jaimes</surname>
<given-names>S. A.</given-names>
</name>
</person-group>
<source>Pertinencia de la implementación de las aulas de entrenamiento táctico simulado para los batallones de instrucción y entrenamiento</source>
<year>2013</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://esdegrepositorio.edu.co/handle/20.500.14205/3302">https://esdegrepositorio.edu.co/handle/20.500.14205/3302</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref5">
<label>[5]</label>
<mixed-citation publication-type="webpage">[5] Arma de fuego adaptada para un simulador de tiro que permite obtener una experiencia real mediante la simulación de retroceso, by I. D. Chavarro Castañeda et al., (2020, Apr 13),<italic> Patent NC2018/0010559</italic>, [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://sipi.sic.gov.co/sipi/Extra/IP/Mutual/Browse.aspx?sid=638779918406392000">https://sipi.sic.gov.co/sipi/Extra/IP/Mutual/Browse.aspx?sid=638779918406392000</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<name>
<surname>Chavarro Castañeda</surname>
<given-names>I. D.</given-names>
</name>
</person-group>
<source>Patent NC2018/0010559</source>
<year>2020</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://sipi.sic.gov.co/sipi/Extra/IP/Mutual/Browse.aspx?sid=638779918406392000">https://sipi.sic.gov.co/sipi/Extra/IP/Mutual/Browse.aspx?sid=638779918406392000</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref6">
<label>[6]</label>
<mixed-citation publication-type="journal">[6] P. J. Rojas Guevara, “Doctrina Damasco: eje articulador de la segunda gran reforma del Ejército Nacional de Colombia,”<italic> Rev. cient. Gen. José María Córdova.,</italic> vol. 15, no. 19, p. 95, Jan. 2017. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.21830/19006586.78">https://doi.org/10.21830/19006586.78</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Rojas Guevara</surname>
<given-names>P. J.</given-names>
</name>
</person-group>
<article-title>Doctrina Damasco: eje articulador de la segunda gran reforma del Ejército Nacional de Colombia</article-title>
<source>Rev. cient. Gen. José María Córdova</source>
<year>2017</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.21830/19006586.78">https://doi.org/10.21830/19006586.78</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref7">
<label>[7]</label>
<mixed-citation publication-type="webpage">[7] Beamshot, “Multifution Laser Aiming System<italic>, </italic>Los Ángeles,” beamshot.com. Accessed: Feb. 11, 2024. [Online]. Available: 2024. <ext-link ext-link-type="uri" xlink:href="https://www.beamshot.com/multifunction-laser-aiming-system.htm">https://www.beamshot.com/multifunction-laser-aiming-system.htm</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Beamshot</collab>
</person-group>
<source>Multifution Laser Aiming System, Los Ángeles</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.beamshot.com/multifunction-laser-aiming-system.htm">https://www.beamshot.com/multifunction-laser-aiming-system.htm</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref72">
<label>[8]</label>
<mixed-citation publication-type="journal">[8] A. Sudiarno et al., “Analysis of human performance and potential application of virtual reality (VR) shooting games as a shooting training simulator for military personnel,” <italic>IJTech</italic>, vol. 15, no. 1, p. 87, 2024. <ext-link ext-link-type="uri" xlink:href="https://search.ebscohost.com/login.aspx?direct=true&amp;profile=ehost&amp;scope=site&amp;authtype=crawler&amp;jrnl=20869614&amp;AN=175217133&amp;h=tiihH%2FFLqrB29y2tKVyneOoU5Fzty6eEfKXmTvi%2BvxW1UHfayAqzzsDvPaLMxqGFn2A8EBJNzCr%2BPSUIwTJftw%3D%3D&amp;crl=c">https://search.ebscohost.com/login.aspx?direct=true&amp;profile=ehost&amp;scope=site&amp;authtype=crawler&amp;jrnl=20869614&amp;AN=175217133&amp;h=tiihH%2FFLqrB29y2tKVyneOoU5Fzty6eEfKXmTvi%2BvxW1UHfayAqzzsDvPaLMxqGFn2A8EBJNzCr%2BPSUIwTJftw%3D%3D&amp;crl=c</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Sudiarno</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>Analysis of human performance and potential application of virtual reality (VR) shooting games as a shooting training simulator for military personnel</article-title>
<source>IJTech</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://search.ebscohost.com/login.aspx?direct=true&amp;profile=ehost&amp;scope=site&amp;authtype=crawler&amp;jrnl=20869614&amp;AN=175217133&amp;h=tiihH%2FFLqrB29y2tKVyneOoU5Fzty6eEfKXmTvi%2BvxW1UHfayAqzzsDvPaLMxqGFn2A8EBJNzCr%2BPSUIwTJftw%3D%3D&amp;crl=c">https://search.ebscohost.com/login.aspx?direct=true&amp;profile=ehost&amp;scope=site&amp;authtype=crawler&amp;jrnl=20869614&amp;AN=175217133&amp;h=tiihH%2FFLqrB29y2tKVyneOoU5Fzty6eEfKXmTvi%2BvxW1UHfayAqzzsDvPaLMxqGFn2A8EBJNzCr%2BPSUIwTJftw%3D%3D&amp;crl=c</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref9">
<label>[9]</label>
<mixed-citation publication-type="journal">[9] L. Wei, H. Zhou, and S. Nahavandi, “Haptically enabled simulation system for firearm shooting training,” <italic>Virtual Real.</italic>, vol. 23, no. 3, pp. 217–228, Sep. 2019. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10055-018-0349-0">https://doi.org/10.1007/s10055-018-0349-0</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Wei</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Zhou</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Nahavandi</surname>
<given-names>S.</given-names>
</name>
</person-group>
<article-title>Haptically enabled simulation system for firearm shooting training</article-title>
<source>Virtual Real.</source>
<year>2019</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s10055-018-0349-0">https://doi.org/10.1007/s10055-018-0349-0</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref10">
<label>[10]</label>
<mixed-citation publication-type="journal">[10] E. Ugur, B. Ozlem Konukseven, M. Ergen, M. E. Aksoy, and S. Ilgaz Yoner, “Is the brightness- contrast level of virtual reality videos significant for visually induced motion sickness? Experimental real-time biosensor and self-report analysis,” <italic>Frontiers in Virtual Reality, </italic>vol. 5, Aug. 2024. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2024.1435049">https://doi.org/10.3389/frvir.2024.1435049</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ugur</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Ozlem Konukseven</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Ergen</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Aksoy</surname>
<given-names>M. E.</given-names>
</name>
<name>
<surname>Ilgaz Yoner</surname>
<given-names>S.</given-names>
</name>
</person-group>
<article-title>Is the brightness- contrast level of virtual reality videos significant for visually induced motion sickness? Experimental real-time biosensor and self-report analysis</article-title>
<source>Frontiers in Virtual Reality</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/frvir.2024.1435049">https://doi.org/10.3389/frvir.2024.1435049</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref11">
<label>[11]</label>
<mixed-citation publication-type="journal">[11] A. T. Biggs, A. E. Jensen, and K. R. Kelly, “Heart rate of fire: exploring direct implementation of physiological measurements in realistic shoot/don’t-shoot simulations,” <italic>Front. Sports Act. Living.</italic>, vol. 6, p. 1444655, Aug. 2024. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fspor.2024.1444655">https://doi.org/10.3389/fspor.2024.1444655</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Biggs</surname>
<given-names>A. T.</given-names>
</name>
<name>
<surname>Jensen</surname>
<given-names>A. E.</given-names>
</name>
<name>
<surname>Kelly</surname>
<given-names>K. R.</given-names>
</name>
</person-group>
<article-title>Heart rate of fire: exploring direct implementation of physiological measurements in realistic shoot/don’t-shoot simulations</article-title>
<source>Front. Sports Act. Living.</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3389/fspor.2024.1444655">https://doi.org/10.3389/fspor.2024.1444655</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref12">
<label>[12]</label>
<mixed-citation publication-type="journal">[12] D. Bogatinov, P. Lameski, V. Trajkovik, and K. M. Trendova, “Firearms training simulator based on low cost motion tracking sensor,” <italic>Multimed. Tools Appl.</italic>, vol. 76, no. 1, pp. 1403–1418, Jan. 2017. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11042-015-3118-z">https://doi.org/10.1007/s11042-015-3118-z</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bogatinov</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Lameski</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Trajkovik</surname>
<given-names>V.</given-names>
</name>
<name>
<surname>Trendova</surname>
<given-names>K. M.</given-names>
</name>
</person-group>
<article-title>Firearms training simulator based on low cost motion tracking sensor</article-title>
<source>Multimed. Tools Appl.</source>
<year>2017</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s11042-015-3118-z">https://doi.org/10.1007/s11042-015-3118-z</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref13">
<label>[13]</label>
<mixed-citation publication-type="journal">[13] A. Fedaravičius, K. Pilkauskas, E. Slizys, and A. Survila, “Research and development of training pistols for laser shooting simulation system,” <italic>Def. Technol.</italic>, vol. 16, no. 3, pp. 530–534, Jun. 2020. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.dt.2019.06.018">https://doi.org/10.1016/j.dt.2019.06.018</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Fedaravičius</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Pilkauskas</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Slizys</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Survila</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>Research and development of training pistols for laser shooting simulation system</article-title>
<source>Def. Technol.</source>
<year>2020</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.dt.2019.06.018">https://doi.org/10.1016/j.dt.2019.06.018</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref14">
<label>[14]</label>
<mixed-citation publication-type="journal">[14] A. Soetedjo, A. Mahmudi, M. Ashari, and Y. I. Nakhoda, “Detecting Laser Spot In Shooting Simulator Using An Embedded Camera,” <italic>International Journal on Smart Sensing and Intelligent Systems, </italic>vol. 7, no.1, pp. 1-19, Mar. 2014. <ext-link ext-link-type="uri" xlink:href="https://eprints.itn.ac.id/5304/">https://eprints.itn.ac.id/5304/</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Soetedjo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mahmudi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ashari</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Nakhoda</surname>
<given-names>Y. I.</given-names>
</name>
</person-group>
<article-title>Detecting Laser Spot In Shooting Simulator Using An Embedded Camera</article-title>
<source>International Journal on Smart Sensing and Intelligent Systems</source>
<year>2014</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://eprints.itn.ac.id/5304">https://eprints.itn.ac.id/5304</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref15">
<label>[15]</label>
<mixed-citation publication-type="book">[15] Z. Wang, Y.-M. M. Hu, and F. Xie, “Optical fiber simulator for shooting and aiming practices,” in <italic>Proceedings Volume 2895, Fiber Optic Sensors V, </italic>Beijing, China,1996. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1117/12.252196">https://doi.org/10.1117/12.252196</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Wang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Hu</surname>
<given-names>Y.-M. M.</given-names>
</name>
<name>
<surname>Xie</surname>
<given-names>F.</given-names>
</name>
</person-group>
<source>Proceedings Volume 2895, Fiber Optic Sensors V</source>
<year>1996</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1117/12.252196">https://doi.org/10.1117/12.252196</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref16">
<label>[16]</label>
<mixed-citation publication-type="webpage">[16] Special Pie, “L17Pro Laser Shooting Simulator,” special314.com. Accessed: Feb. 11, 2024. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.special314.com/sp/85.html?admin_id=1">https://www.special314.com/sp/85.html?admin_id=1</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Special Pie</collab>
</person-group>
<source>L17Pro Laser Shooting Simulator</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.special314.com/sp/85.html?admin_id=1">https://www.special314.com/sp/85.html?admin_id=1</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref17">
<label>[17]</label>
<mixed-citation publication-type="journal">[17] K. Teguh Martono, O. Dwi Nurhayati, and C. Galuhputri Wulwida, “Augmented reality–based shooting simulator system to analysis of virtual distance to real distance using unity 3D,” <italic>Journal of Theoretical and Applied Information Technology</italic>, vol. 95, no. 23. Dec. 2017. <ext-link ext-link-type="uri" xlink:href="https://www.jatit.org/volumes/Vol95No23/2Vol95No23.pdf">https://www.jatit.org/volumes/Vol95No23/2Vol95No23.pdf</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Teguh Martono</surname>
<given-names>K.</given-names>
</name>
<name>
<surname>Dwi Nurhayati</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Galuhputri Wulwida</surname>
<given-names>C.</given-names>
</name>
</person-group>
<article-title>Augmented reality–based shooting simulator system to analysis of virtual distance to real distance using unity 3D</article-title>
<source>Journal of Theoretical and Applied Information Technology</source>
<year>2017</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.jatit.org/volumes/Vol95No23/2Vol95No23.pdf">https://www.jatit.org/volumes/Vol95No23/2Vol95No23.pdf</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref73">
<label>[18]</label>
<mixed-citation publication-type="journal">[18] A. Buga et al., “The VirTra V-100 is a test-retest reliable shooting simulator for measuring accuracy/precision, decision-making, and reaction time in civilians, police/SWAT, and military personnel,” <italic>J. Strength Cond. Res.</italic>, vol. 38, no. 10, pp. 1714-1723, Oct. 2024. <ext-link ext-link-type="uri" xlink:href="https://journals.lww.com/nsca-jscr/abstract/2024/10000/the_virtra_v_100_is_a_test_retest_reliable.3.aspx">https://journals.lww.com/nsca-jscr/abstract/2024/10000/the_virtra_v_100_is_a_test_retest_reliable.3.aspx</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Buga</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>The VirTra V-100 is a test-retest reliable shooting simulator for measuring accuracy/precision, decision-making, and reaction time in civilians, police/SWAT, and military personnel</article-title>
<source>J. Strength Cond. Res.</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://journals.lww.com/nsca-jscr/abstract/2024/10000/the_virtra_v_100_is_a_test_retest_reliable.3.aspx">https://journals.lww.com/nsca-jscr/abstract/2024/10000/the_virtra_v_100_is_a_test_retest_reliable.3.aspx</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref19">
<label>[19]</label>
<mixed-citation publication-type="journal">[19] M. Lesaffre, N. Verrier, and M. Gross, “Noise and signal scaling factors in digital holography in weak illumination: relationship with shot noise,” <italic>Appl. Opt.</italic>, vol. 52, no. 1, pp. A81-91, 2013. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1364/AO.52.000A81">https://doi.org/10.1364/AO.52.000A81</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lesaffre</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Verrier</surname>
<given-names>N.</given-names>
</name>
<name>
<surname>Gross</surname>
<given-names>M.</given-names>
</name>
</person-group>
<article-title>Noise and signal scaling factors in digital holography in weak illumination: relationship with shot noise</article-title>
<source>Appl. Opt.</source>
<year>2013</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1364/AO.52.000A81">https://doi.org/10.1364/AO.52.000A81</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref20">
<label>[20]</label>
<mixed-citation publication-type="journal">[20] M. Maciejewski, M. Piszczek, M. Pomianek, and N. Pałka, “Optoelectronic tracking system for shooting simulator - tests in a virtual reality application,” <italic>Photonics Lett. Pol.</italic>, vol. 12, no. 2, p. 61, Jul. 2020. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.4302/plp.v12i2.1025">https://doi.org/10.4302/plp.v12i2.1025</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Maciejewski</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Piszczek</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pomianek</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Pałka</surname>
<given-names>N.</given-names>
</name>
</person-group>
<article-title>Optoelectronic tracking system for shooting simulator - tests in a virtual reality application</article-title>
<source>Photonics Lett. Pol.</source>
<year>2020</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.4302/plp.v12i2.1025">https://doi.org/10.4302/plp.v12i2.1025</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref21">
<label>[21]</label>
<mixed-citation publication-type="journal">[21] E. dos S. Soares, S. T. Corazza, A. C. Piovesan, R. P. de Azevedo, and S. J. L. Vasconcellos, “Creation, validation, and reliability of a shooting simulator instrument for reaction time evaluation,” <italic>Motriz</italic>, vol. 22, no. 4, pp. 277–282, Dec. 2016. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1590/S1980-6574201600040010">https://doi.org/10.1590/S1980-6574201600040010</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>dos S. Soares</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Corazza</surname>
<given-names>S. T.</given-names>
</name>
<name>
<surname>Piovesan</surname>
<given-names>A. C.</given-names>
</name>
<name>
<surname>de Azevedo</surname>
<given-names>R. P.</given-names>
</name>
<name>
<surname>Vasconcellos</surname>
<given-names>S. J. L.</given-names>
</name>
</person-group>
<article-title>Creation, validation, and reliability of a shooting simulator instrument for reaction time evaluation</article-title>
<source>Motriz</source>
<year>2016</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1590/S1980-6574201600040010">https://doi.org/10.1590/S1980-6574201600040010</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref22">
<label>[22]</label>
<mixed-citation publication-type="journal">[22] A. Shahal, W. Hemmerich, and H. Hecht, “Brightness and contrast do not affect visually induced motion sickness in a passively-flown fixed-base flight simulator,” <italic>Displays</italic>, vol. 44, pp. 5–14, Sep. 2016. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.displa.2016.05.007">https://doi.org/10.1016/j.displa.2016.05.007</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Shahal</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Hemmerich</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Hecht</surname>
<given-names>H.</given-names>
</name>
</person-group>
<article-title>Brightness and contrast do not affect visually induced motion sickness in a passively-flown fixed-base flight simulator</article-title>
<source>Displays</source>
<year>2016</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.displa.2016.05.007">https://doi.org/10.1016/j.displa.2016.05.007</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref23">
<label>[23]</label>
<mixed-citation publication-type="journal">[23] S. van der Walt <italic>et al.</italic>, “scikit-image: image processing in Python,” <italic>PeerJ</italic>, vol. 2, p. e453, Jun. 2014. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.7717/peerj.453">https://doi.org/10.7717/peerj.453</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>van der Walt</surname>
<given-names>S.</given-names>
</name>
</person-group>
<article-title>scikit-image: image processing in Python</article-title>
<source>PeerJ</source>
<year>2014</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.7717/peerj.453">https://doi.org/10.7717/peerj.453</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref24">
<label>[24]</label>
<mixed-citation publication-type="book">[24] R. Serrano Oller, “DSLR Project: Control de càmeres DSLR en temps real mitjançant dispositius tipus RaspberryPi,” Tesi de grau, Universitat De Barcelona, Barcelona, España, 2014. <ext-link ext-link-type="uri" xlink:href="https://diposit.ub.edu/dspace/handle/2445/61663">https://diposit.ub.edu/dspace/handle/2445/61663</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Serrano Oller</surname>
<given-names>R.</given-names>
</name>
</person-group>
<source>DSLR Project: Control de càmeres DSLR en temps real mitjançant dispositius tipus RaspberryPi</source>
<year>2014</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://diposit.ub.edu/dspace/handle/2445/61663">https://diposit.ub.edu/dspace/handle/2445/61663</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref25">
<label>[25]</label>
<mixed-citation publication-type="book">[25] T. Pierre-Jean, and P.Mabacker, “Building our First Poky Image for the Raspberry Pi,” in <italic>Yocto for Raspberry Pi</italic>, Birmingham, Inglaterra: Packt Publishing, 2016, pp. 27-28. <ext-link ext-link-type="uri" xlink:href="https://books.google.com.co/books?hl=es&amp;lr=&amp;id=Bf5vDQAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=P.-J.+Texier+y+P.+Mabacker">https://books.google.com.co/books?hl=es&amp;lr=&amp;id=Bf5vDQAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=P.-J.+Texier+y+P.+Mabacker</ext-link>,+Yocto+for+Raspberry+Pi+:+Create+Unique+and+Amazing+Projects+by+Using+the+Powerful+Combination+of+Yocto+and+Raspberry+Pi,+Birmingham,+&amp;ots=jlqeoMHBY2&amp;sig=tvVea30YfGWdAeil9VsrkYvAENo&amp;redir_esc=y#v=onepage&amp;q&amp;f=false</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Pierre-Jean</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Mabacker</surname>
<given-names>P.</given-names>
</name>
</person-group>
<source>Yocto for Raspberry Pi</source>
<year>2016</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://books.google.com.co/books?hl=es&amp;lr=&amp;id=Bf5vDQAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=P.-J.+Texier+y+P.+Mabacker">https://books.google.com.co/books?hl=es&amp;lr=&amp;id=Bf5vDQAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=P.-J.+Texier+y+P.+Mabacker</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref26">
<label>[26]</label>
<mixed-citation publication-type="webpage">[26] Intel. <italic>OpenCV - Open Computer Vision Library</italic>. (1999). OpenCV. Accessed: Feb. 11, 2024.<italic/>[Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://docs.opencv.org/2.4/index.html">https://docs.opencv.org/2.4/index.html</ext-link>.</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Intel. OpenCV - Open Computer Vision Library.</collab>
</person-group>
<source>OpenCV.</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://docs.opencv.org/2.4/index.html">https://docs.opencv.org/2.4/index.html</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref27">
<label>[27]</label>
<mixed-citation publication-type="book">[27] P. A. Laplante, “Software Engineering: An Overview,” in <italic>Software Engineering for Image Processing Systems</italic>. Boca Ratón, FL, USA: CRC Press, 2003, ch. 1, pp.1-20. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1201/9780203496107">https://doi.org/10.1201/9780203496107</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Laplante</surname>
<given-names>P. A.</given-names>
</name>
</person-group>
<source>Software Engineering for Image Processing Systems.</source>
<year>2003</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1201/9780203496107">https://doi.org/10.1201/9780203496107</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref28">
<label>[28]</label>
<mixed-citation publication-type="book">[28] Gechic, “Perfect Match for Human Machine Interface,” icitouchtech.com. Accessed: Feb. 11, 2024. [Online]. Available: 2024. <ext-link ext-link-type="uri" xlink:href="https://www.icitouchtech.com/gechic-t151a-touch-monitor-15-inch">https://www.icitouchtech.com/gechic-t151a-touch-monitor-15-inch</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<collab>Gechic</collab>
</person-group>
<source>Perfect Match for Human Machine Interface</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.icitouchtech.com/gechic-t151a-touch-monitor-15-inch">https://www.icitouchtech.com/gechic-t151a-touch-monitor-15-inch</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref29">
<label>[29]</label>
<mixed-citation publication-type="journal">[29] J. Weng, P. Cohen, and M. Herniou, “Camera calibration with distortion models and accuracy evaluation,” <italic>IEEE Transactions on Pattern Analysis and Machine Intelligence</italic>, vol. 14, no. 10, pp. 965-980, Oct. 1992. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/34.159901">https://doi.org/10.1109/34.159901</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Weng</surname>
<given-names>J.</given-names>
</name>
<name>
<surname>Cohen</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Herniou</surname>
<given-names>M.</given-names>
</name>
</person-group>
<article-title>Camera calibration with distortion models and accuracy evaluation</article-title>
<source>IEEE Transactions on Pattern Analysis and Machine Intelligence</source>
<year>1992</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/34.159901">https://doi.org/10.1109/34.159901</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref30">
<label>[30]</label>
<mixed-citation publication-type="journal">[30] T. Grant, A. Rohou, and N. Grigorieff, “cisTEM, user-friendly software for single-particle image processing,” <italic>Elife</italic>, vol. 7, p. e35383, Mar. 2018. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.7554/eLife.35383">https://doi.org/10.7554/eLife.35383</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Grant</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Rohou</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Grigorieff</surname>
<given-names>N.</given-names>
</name>
</person-group>
<article-title>cisTEM, user-friendly software for single-particle image processing</article-title>
<source>Elife</source>
<year>2018</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.7554/eLife.35383">https://doi.org/10.7554/eLife.35383</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref74">
<label>[31]</label>
<mixed-citation publication-type="journal">[31] A. Baranski et al., “MAUI (MBI Analysis User Interface)-An image processing pipeline for Multiplexed Mass Based Imaging,” <italic>PLoS Comput. Biol.</italic>, vol. 17, no. 4, p. e1008887, 2021. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1371/journal.pcbi.1008887">https://doi.org/10.1371/journal.pcbi.1008887</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Baranski</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>MAUI (MBI Analysis User Interface)-An image processing pipeline for Multiplexed Mass Based Imaging</article-title>
<source>PLoS Comput. Biol.</source>
<year>2021</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1371/journal.pcbi.1008887">https://doi.org/10.1371/journal.pcbi.1008887</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref32">
<label>[32]</label>
<mixed-citation publication-type="journal">[32] S. Bash, B. Johnson, W. Gibbs, T. Zhang, A. Shankaranarayanan, and L. N. Tanenbaum, “Deep learning image processing enables 40% faster spinal MR scans which match or exceed quality of standard of care : A prospective multicenter multireader study: A prospective multicenter multireader study,” <italic>Clin. Neuroradiol.</italic>, vol. 32, no. 1, pp. 197–203, 2022. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00062-021-01121-2">https://doi.org/10.1007/s00062-021-01121-2</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Bash</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Johnson</surname>
<given-names>B.</given-names>
</name>
<name>
<surname>Gibbs</surname>
<given-names>W.</given-names>
</name>
<name>
<surname>Zhang</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Shankaranarayanan</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Tanenbaum</surname>
<given-names>L. N.</given-names>
</name>
</person-group>
<article-title>Deep learning image processing enables 40% faster spinal MR scans which match or exceed quality of standard of care : A prospective multicenter multireader study: A prospective multicenter multireader study</article-title>
<source>Clin. Neuroradiol.</source>
<year>2022</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1007/s00062-021-01121-2">https://doi.org/10.1007/s00062-021-01121-2</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref33">
<label>[33]</label>
<mixed-citation publication-type="journal">[33] T. Pietzsch, S. Preibisch, P. Tomancák, and S. Saalfeld, “ImgLib2--generic image processing in Java,” <italic>Bioinformatics</italic>, vol. 28, no. 22, pp. 3009–3011, Nov. 2012. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1093/bioinformatics/bts543">https://doi.org/10.1093/bioinformatics/bts543</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pietzsch</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Preibisch</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Tomancák</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Saalfeld</surname>
<given-names>S.</given-names>
</name>
</person-group>
<article-title>ImgLib2--generic image processing in Java</article-title>
<source>Bioinformatics</source>
<year>2012</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1093/bioinformatics/bts543">https://doi.org/10.1093/bioinformatics/bts543</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref34">
<label>[34]</label>
<mixed-citation publication-type="webpage">[34] Ejército Nacional de Colombia, “Dirección de Aplicación de Normas de Transparencia del Ejército,” ejercito.mil.co. Accessed: Feb. 11, 2024. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.ejercito.mil.co/direccion-de-aplicacion-de-normas-de-transparencia-del-ejercito">https://www.ejercito.mil.co/direccion-de-aplicacion-de-normas-de-transparencia-del-ejercito</ext-link>/</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Ejército Nacional de Colombia</collab>
</person-group>
<source>Dirección de Aplicación de Normas de Transparencia del Ejército</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.ejercito.mil.co/direccion-de-aplicacion-de-normas-de-transparencia-del-ejercito">https://www.ejercito.mil.co/direccion-de-aplicacion-de-normas-de-transparencia-del-ejercito</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref35">
<label>[35]</label>
<mixed-citation publication-type="journal">[35] Z. Zhang, Y. Liu, and W.-H. Peng, “Side information-driven image coding for hybrid machine–human vision,” <italic>EURASIP J. Image Video Process.</italic>, vol. 3, no. 1, Jan. 2025. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s13640-024-00661-0">https://doi.org/10.1186/s13640-024-00661-0</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
<name>
<surname>Liu</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Peng</surname>
<given-names>W.-H.</given-names>
</name>
</person-group>
<article-title>Side information-driven image coding for hybrid machine–human vision</article-title>
<source>EURASIP J. Image Video Process.</source>
<year>2025</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s13640-024-00661-0">https://doi.org/10.1186/s13640-024-00661-0</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref36">
<label>[36]</label>
<mixed-citation publication-type="book">[36] C. G. Harris, and M. J. Stephens, “A Combined Corner and Edge Detector,” in <italic>Proceedings of the Alvey Vision Conference, </italic>1988, pp. 23.1-23.6. <ext-link ext-link-type="uri" xlink:href="https://www.semanticscholar.org/paper/A-Combined-Corner-and-Edge-Detector-Harris-Stephens/6818668fb895d95861a2eb9673ddc3a41e27b3b3">https://www.semanticscholar.org/paper/A-Combined-Corner-and-Edge-Detector-Harris-Stephens/6818668fb895d95861a2eb9673ddc3a41e27b3b3</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Harris</surname>
<given-names>C. G.</given-names>
</name>
<name>
<surname>Stephens</surname>
<given-names>M. J.</given-names>
</name>
</person-group>
<source>Proceedings of the Alvey Vision Conference</source>
<year>1988</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.semanticscholar.org/paper/A-Combined-Corner-and-Edge-Detector-Harris-Stephens/6818668fb895d95861a2eb9673ddc3a41e27b3b3">https://www.semanticscholar.org/paper/A-Combined-Corner-and-Edge-Detector-Harris-Stephens/6818668fb895d95861a2eb9673ddc3a41e27b3b3</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref37">
<label>[37]</label>
<mixed-citation publication-type="journal">[37] A. Pál, «fitsh– a software package for image processing,» <italic>Monthly Notices of the Royal Astronomical Society, </italic>vol. 421, p. 1825–1837, 2012. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1111/j.1365-2966.2011.19813.x">https://doi.org/10.1111/j.1365-2966.2011.19813.x</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pál</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>fitsh– a software package for image processing</article-title>
<source>Monthly Notices of the Royal Astronomical Society</source>
<year>2012</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1111/j.1365-2966.2011.19813.x">https://doi.org/10.1111/j.1365-2966.2011.19813.x</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref38">
<label>[38]</label>
<mixed-citation publication-type="journal">[38] P. Hajder, and Ł. Rauch, “Moving multiscale modelling to the edge: Benchmarking and load optimization for cellular automata on low power microcomputers,” <italic>Processes</italic>, vol. 9, no. 12, p. 2225, 2021. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/pr9122225">https://doi.org/10.3390/pr9122225</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Hajder</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Rauch</surname>
<given-names>Ł.</given-names>
</name>
</person-group>
<article-title>Moving multiscale modelling to the edge: Benchmarking and load optimization for cellular automata on low power microcomputers</article-title>
<source>Processes</source>
<year>2021</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/pr9122225">https://doi.org/10.3390/pr9122225</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref39">
<label>[39]</label>
<mixed-citation publication-type="journal">[39] R. B. Marks, I. C. Gifford, and B. O'Hara, “Standards in IEEE 802 unleash the wireless Internet,” in <italic>IEEE Microwave Magazine</italic>, vol. 2, no. 2, pp. 46-56, Jun. 2001. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/6668.924918">https://doi.org/10.1109/6668.924918</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Marks</surname>
<given-names>R. B.</given-names>
</name>
<name>
<surname>Gifford</surname>
<given-names>I. C.</given-names>
</name>
</person-group>
<article-title>Standards in IEEE 802 unleash the wireless Internet</article-title>
<source>IEEE Microwave Magazine</source>
<year>2001</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1109/6668.924918">https://doi.org/10.1109/6668.924918</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref40">
<label>[40]</label>
<mixed-citation publication-type="confproc">[40] Y. Amri, and M. A. Setiawan, “Improving Smart Home Concept with the Internet of Things Concept Using RaspberryPi and NodeMCU,” in <italic>IOP Conference Series. Materials Science and Engineering, </italic>Philadelphia, Pennsylvania, USA, 2018, p. 012021. <ext-link ext-link-type="uri" xlink:href="https://iopscience.iop.org/article/10.1088/1757-899X/325/1/012021/pdf">https://iopscience.iop.org/article/10.1088/1757-899X/325/1/012021/pdf</ext-link>
</mixed-citation>
<element-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Amri</surname>
<given-names>Y.</given-names>
</name>
<name>
<surname>Setiawan</surname>
<given-names>M. A.</given-names>
</name>
</person-group>
<source>IOP Conference Series. Materials Science and Engineering</source>
<year>2018</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://iopscience.iop.org/article/10.1088/1757-899X/325/1/012021/pdf">https://iopscience.iop.org/article/10.1088/1757-899X/325/1/012021/pdf</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref41">
<label>[41]</label>
<mixed-citation publication-type="journal">[41] E. Gamess, and S. Hernandez, “Performance evaluation of different raspberry pi models for a broad spectrum of interests,” <italic>Int. J. Adv. Comput. Sci. Appl.</italic>, vol. 13, no. 2, May. 2022. <ext-link ext-link-type="uri" xlink:href="http://dx.doi.org/10.14569/IJACSA.2022.0130295">http://dx.doi.org/10.14569/IJACSA.2022.0130295</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gamess</surname>
<given-names>E.</given-names>
</name>
<name>
<surname>Hernandez</surname>
<given-names>S.</given-names>
</name>
</person-group>
<article-title>Performance evaluation of different raspberry pi models for a broad spectrum of interests</article-title>
<source>Int. J. Adv. Comput. Sci. Appl.</source>
<year>2022</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="http://dx.doi.org/10.14569/IJACSA.2022.0130295">http://dx.doi.org/10.14569/IJACSA.2022.0130295</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref42">
<label>[42]</label>
<mixed-citation publication-type="webpage">[42] R. Ildar, “RaspberryPI for mosquito neutralization by power laser,” <italic>arXiv [cs.CV]</italic>, 2021. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.2105.14190">https://doi.org/10.48550/arXiv.2105.14190</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<name>
<surname>Ildar</surname>
<given-names>R.</given-names>
</name>
</person-group>
<source>RaspberryPI for mosquito neutralization by power laser</source>
<year>2021</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.48550/arXiv.2105.14190">https://doi.org/10.48550/arXiv.2105.14190</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref43">
<label>[43]</label>
<mixed-citation publication-type="journal">[43] M. Lukitasari, W. I. Windarti, E. P. L. Fatma, T. Suharsono, and D. A. Nugroho, “The efficacy of Raspberry Pi-based automatic voice message education on knowledge level and prevention behavior of high-risk population,” <italic>Healthc. Low Resour. Settings</italic>, vol. 11, no. s1, 2023. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.4081/hls.2023.11178">https://doi.org/10.4081/hls.2023.11178</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lukitasari</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Windarti</surname>
<given-names>W. I.</given-names>
</name>
<name>
<surname>Fatma</surname>
<given-names>E. P. L.</given-names>
</name>
<name>
<surname>Suharsono</surname>
<given-names>T.</given-names>
</name>
<name>
<surname>Nugroho</surname>
<given-names>D. A.</given-names>
</name>
</person-group>
<article-title>The efficacy of Raspberry Pi-based automatic voice message education on knowledge level and prevention behavior of high-risk population</article-title>
<source>Healthc. Low Resour. Settings</source>
<year>2023</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.4081/hls.2023.11178">https://doi.org/10.4081/hls.2023.11178</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref44">
<label>[44]</label>
<mixed-citation publication-type="thesis">[44] R. Vilches Pons, “Red de sensores pervasiva para el bienestar basada en RaspberryPi,” Tesi de grau, UPC Universitat Politècnica de Catalunya, Barcelona, España, 2013. <ext-link ext-link-type="uri" xlink:href="https://upcommons.upc.edu/handle/2099.1/18780">https://upcommons.upc.edu/handle/2099.1/18780</ext-link>
</mixed-citation>
<element-citation publication-type="thesis">
<person-group person-group-type="author">
<name>
<surname>Vilches Pons</surname>
<given-names>R.</given-names>
</name>
</person-group>
<source>Red de sensores pervasiva para el bienestar basada en RaspberryPi</source>
<year>2013</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://upcommons.upc.edu/handle/2099.1/18780">https://upcommons.upc.edu/handle/2099.1/18780</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref45">
<label>[45]</label>
<mixed-citation publication-type="journal">[45] O. Marzuqi, A. Virgono, and R. M. Negara, “Implementation model architecture software defined network using raspberry Pi: a review paper,” <italic>TELKOMNIKA</italic>, vol. 17, no. 3, p. 1136, 2019. <ext-link ext-link-type="uri" xlink:href="https://telkomnika.uad.ac.id/index.php/TELKOMNIKA/article/view/8859">https://telkomnika.uad.ac.id/index.php/TELKOMNIKA/article/view/8859</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Marzuqi</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Virgono</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Negara</surname>
<given-names>R. M.</given-names>
</name>
</person-group>
<article-title>Implementation model architecture software defined network using raspberry Pi: a review paper</article-title>
<source>TELKOMNIKA</source>
<year>2019</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://telkomnika.uad.ac.id/index.php/TELKOMNIKA/article/view/8859">https://telkomnika.uad.ac.id/index.php/TELKOMNIKA/article/view/8859</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref46">
<label>[46]</label>
<mixed-citation publication-type="webpage">[46] I. Rakhmatulin, and S. Volkl, “Brain-Computer-Interface controlled robot via RaspberryPi and PiEEG,” <italic>arXiv [cs.RO]</italic>, Feb. 2022. <ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2202.01936">https://arxiv.org/abs/2202.01936</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<name>
<surname>Rakhmatulin</surname>
<given-names>I.</given-names>
</name>
<name>
<surname>Volkl</surname>
<given-names>S.</given-names>
</name>
</person-group>
<source>Brain-Computer-Interface controlled robot via RaspberryPi and PiEEG</source>
<year>2022</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/2202.01936">https://arxiv.org/abs/2202.01936</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref47">
<label>[47]</label>
<mixed-citation publication-type="confproc">[47] S. Merugu, D. N. Sudha, T. K. Juluru, R. Rao, and S. K. Reddy Ravula, “Raspberry Pi based Intelligent Classroom Information and Management System using LBP Method,” in <italic>IOP Conference Series. Materials Science and Engineering, </italic>Warangal, India<italic>, </italic>2020., p. 032035. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1088/1757-899X/981/3/032035">https://doi.org/10.1088/1757-899X/981/3/032035</ext-link>
</mixed-citation>
<element-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Merugu</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Sudha</surname>
<given-names>D. N.</given-names>
</name>
<name>
<surname>Juluru</surname>
<given-names>T. K.</given-names>
</name>
<name>
<surname>Rao</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Reddy Ravula</surname>
<given-names>S. K.</given-names>
</name>
</person-group>
<source>IOP Conference Series. Materials Science and Engineering</source>
<year>2020</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1088/1757-899X/981/3/032035">https://doi.org/10.1088/1757-899X/981/3/032035</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref48">
<label>[48]</label>
<mixed-citation publication-type="journal">[48] F. Morales, L. Bernal, G. Pereira, S. Pérez-Buitrago, M. Kammer, and D. H. Stalder, “PytuTester: RaspberryPi open-source ventilator tester,” <italic>HardwareX</italic>, vol. 12, p. e00334, Jun. 2022. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ohx.2022.e00334">https://doi.org/10.1016/j.ohx.2022.e00334</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Morales</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Bernal</surname>
<given-names>L.</given-names>
</name>
<name>
<surname>Pereira</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Pérez-Buitrago</surname>
<given-names>S.</given-names>
</name>
<name>
<surname>Kammer</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Stalder</surname>
<given-names>D. H.</given-names>
</name>
</person-group>
<article-title>PytuTester: RaspberryPi open-source ventilator tester</article-title>
<source>HardwareX</source>
<year>2022</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.ohx.2022.e00334">https://doi.org/10.1016/j.ohx.2022.e00334</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref75">
<label>[49]</label>
<mixed-citation publication-type="journal">[49] P. Lanari et al., “XMapTools: A MATLAB©-based program for electron microprobe X-ray image processing and geothermobarometry,” <italic>Comput. Geosci.</italic>, vol. 62, pp. 227–240, Jan. 2014. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.cageo.2013.08.010">https://doi.org/10.1016/j.cageo.2013.08.010</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Lanari</surname>
<given-names>P.</given-names>
</name>
</person-group>
<article-title>XMapTools: A MATLAB©-based program for electron microprobe X-ray image processing and geothermobarometry</article-title>
<source>Comput. Geosci.</source>
<year>2014</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.cageo.2013.08.010">https://doi.org/10.1016/j.cageo.2013.08.010</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref50">
<label>[50]</label>
<mixed-citation publication-type="journal">[50] L. C. Ngugi, M. Abelwahab, and M. Abo-Zahhad, “Recent advances in image processing techniques for automated leaf pest and disease recognition – A review,” <italic>Inf. Process. Agric.</italic>, vol. 8, no. 1, pp. 27–51, Mar. 2021. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.inpa.2020.04.004">https://doi.org/10.1016/j.inpa.2020.04.004</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Ngugi</surname>
<given-names>L. C.</given-names>
</name>
<name>
<surname>Abelwahab</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Abo-Zahhad</surname>
<given-names>M.</given-names>
</name>
</person-group>
<article-title>Recent advances in image processing techniques for automated leaf pest and disease recognition – A review</article-title>
<source>Inf. Process. Agric.</source>
<year>2021</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1016/j.inpa.2020.04.004">https://doi.org/10.1016/j.inpa.2020.04.004</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref51">
<label>[51]</label>
<mixed-citation publication-type="journal">[51] M. G. Selvaraj, M. Valderrama, D. Guzman, M. Valencia, H. Ruiz, and A. Acharjee, “Machine learning for high-throughput field phenotyping and image processing provides insight into the association of above and below-ground traits in cassava (Manihot esculenta Crantz),” <italic>Plant Methods</italic>, vol. 16, no. 1, p. 87, Jun. 2020. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s13007-020-00625-1">https://doi.org/10.1186/s13007-020-00625-1</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Selvaraj</surname>
<given-names>M. G.</given-names>
</name>
<name>
<surname>Valderrama</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Guzman</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Valencia</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Ruiz</surname>
<given-names>H.</given-names>
</name>
<name>
<surname>Acharjee</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>Machine learning for high-throughput field phenotyping and image processing provides insight into the association of above and below-ground traits in cassava (Manihot esculenta Crantz)</article-title>
<source>Plant Methods</source>
<year>2020</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1186/s13007-020-00625-1">https://doi.org/10.1186/s13007-020-00625-1</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref76">
<label>[52]</label>
<mixed-citation publication-type="journal">[52] C. A. Gomez Gonzalez et al., “VIP: Vortex image processing package for high-contrast direct imaging,” <italic>Astron</italic>. J., vol. 154, no. 1, p. 7, Jun. 2017. <ext-link ext-link-type="uri" xlink:href="https://iopscience.iop.org/article/10.3847/1538-3881/aa73d7/meta">https://iopscience.iop.org/article/10.3847/1538-3881/aa73d7/meta</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Gomez Gonzalez</surname>
<given-names>C. A.</given-names>
</name>
</person-group>
<article-title>VIP: Vortex image processing package for high-contrast direct imaging</article-title>
<source>Astron. J.</source>
<year>2017</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://iopscience.iop.org/article/10.3847/1538-3881/aa73d7/meta">https://iopscience.iop.org/article/10.3847/1538-3881/aa73d7/meta</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref53">
<label>[53]</label>
<mixed-citation publication-type="journal">[53] H. W. L.eung Mak, R. Han, and H. H. F. Yin, “Application of Variational AutoEncoder (VAE) model and image processing approaches in game design,” <italic>Sensors</italic>, vol. 23, no. 7, p. 3457, Mar. 2023. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/s23073457">https://doi.org/10.3390/s23073457</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>L.eung Mak</surname>
<given-names>H. W. L.</given-names>
</name>
<name>
<surname>Han</surname>
<given-names>R.</given-names>
</name>
<name>
<surname>Yin</surname>
<given-names>H. H. F.</given-names>
</name>
</person-group>
<article-title>Application of Variational AutoEncoder (VAE) model and image processing approaches in game design</article-title>
<source>Sensors</source>
<year>2023</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3390/s23073457">https://doi.org/10.3390/s23073457</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref54">
<label>[54]</label>
<mixed-citation publication-type="book">[54] D. Millan Escriva, P. Joshi, V. G. Mendonca, and R. Shilkrot, “An Introduction to the Basics of OpenCV,” in <italic>Building Computer Vision Projects with OpenCV 4 and C++: Implement complex computer vision algorithms and explore deep learning and face detection. </italic>Birmingham, England: Packt Publishing, 2019, ch. 2, pp. 41–48. https://books.google.com.co/books?hl=es&amp;lr=&amp;id=naOPDwAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=%5B54%5D%09D.+Mill%C3%A1n+Escriv%C3%A1,+P.+Joshi,+V.+G.+Mendon%C3%A7a+y+R.+Shilkrot,+Building+Computer+Vision+Projects+with+OpenCV+4+and+C%2B%2B+:+Implement+Complex+Computer+Vision+Algorithms+and+Explore+Deep+Learning+&amp;ots=_8aHLwVvkn&amp;sig=OydZuiemOrAwI94PNtDxKXsulSo&amp;redir_esc=y#v=onepage&amp;q&amp;f=false </mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Millan Escriva</surname>
<given-names>D.</given-names>
</name>
<name>
<surname>Joshi</surname>
<given-names>P.</given-names>
</name>
<name>
<surname>Mendonca</surname>
<given-names>V. G.</given-names>
</name>
<name>
<surname>Shilkrot</surname>
<given-names>R.</given-names>
</name>
</person-group>
<source>Building Computer Vision Projects with OpenCV 4 and C++: Implement complex computer vision algorithms and explore deep learning and face detection.</source>
<year>2019</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://books.google.com.co/books?hl=es&amp;lr=&amp;id=naOPDwAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=%5B54%5D%09D.+Mill%C3%A1n+Escriv%C3%A1">https://books.google.com.co/books?hl=es&amp;lr=&amp;id=naOPDwAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=%5B54%5D%09D.+Mill%C3%A1n+Escriv%C3%A1</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref55">
<label>[55]</label>
<mixed-citation publication-type="book">[55] M. Nixon, and A. Aguado, “Moving object detection and description,” in <italic>Feature extraction and Image Processing for Computer Vision. </italic>San Diego, CA, USA: Academic Press, 2019, ch. 9, pp. 257–280. https://books.google.com.co/books?hl=es&amp;lr=&amp;id=KcW-DwAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=%5B56%5D%09M.+Nixon,+Feature+Extraction+and+Image+Processing+for+Computer+Vision,+3+ed.,+San+Diego,+UNITED+KINGDOM:+Elsevier+Science+%26+Technology&amp;ots=11ly2pRF3S&amp;sig=IkMRT66aDSGNGDjVUTr_Z3AHv2k&amp;redir_esc=y#v=onepage&amp;q&amp;f=false</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Nixon</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Aguado</surname>
<given-names>A.</given-names>
</name>
</person-group>
<article-title>Moving object detection and description</article-title>
<source>Feature extraction and Image Processing for Computer Vision.</source>
<year>2019</year>
<comment>https://books.google.com.co/books?hl=es&amp;lr=&amp;id=KcW-DwAAQBAJ&amp;oi=fnd&amp;pg=PP1&amp;dq=%5B56%5D%09M.+Nixon,+Feature+Extraction+and+Image+Processing+for+Computer+Vision,+3+ed.,+San+Diego,+UNITED+KINGDOM:+Elsevier+Science+%26+Technology&amp;ots=11ly2pRF3S&amp;sig=IkMRT66aDSGNGDjVUTr_Z3AHv2k&amp;redir_esc=y#v=onepage&amp;q&amp;f=false</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref56">
<label>[56]</label>
<mixed-citation publication-type="journal">[56] Z. Zhang, "A flexible new technique for camera calibration," <italic>IEEE Transactions on Pattern Analysis and Machine Intelligence</italic>, vol. 22, no. 11, pp. 1330–1334, Nov. 2000. https://doi: 10.1109/34.888718</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Zhang</surname>
<given-names>Z.</given-names>
</name>
</person-group>
<article-title>A flexible new technique for camera calibration</article-title>
<source>IEEE Transactions on Pattern Analysis and Machine Intelligence</source>
<year>2000</year>
<comment>https://doi: 10.1109/34.888718</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref57">
<label>[57]</label>
<mixed-citation publication-type="journal">[57] G. Pau, F. Fuchs, O. Sklyar, M. Boutros, and W. Huber, “EBImage--an R package for image processing with applications to cellular phenotype,s, <italic>Bioinformatics</italic>, vol. 26, no. 7, pp. 979–981, Apr. 2010. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1093/bioinformatics/btq046">https://doi.org/10.1093/bioinformatics/btq046</ext-link>
</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Pau</surname>
<given-names>G.</given-names>
</name>
<name>
<surname>Fuchs</surname>
<given-names>F.</given-names>
</name>
<name>
<surname>Sklyar</surname>
<given-names>O.</given-names>
</name>
<name>
<surname>Boutros</surname>
<given-names>M.</given-names>
</name>
<name>
<surname>Huber</surname>
<given-names>W.</given-names>
</name>
</person-group>
<article-title>EBImage--an R package for image processing with applications to cellular phenotype,s</article-title>
<source>Bioinformatics</source>
<year>2010</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.1093/bioinformatics/btq046">https://doi.org/10.1093/bioinformatics/btq046</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref58">
<label>[58]</label>
<mixed-citation publication-type="webpage">[58] Beamhit, “Military, DOD, and Law Enforcement Systems,” beamhit.net. Accessed: Feb. 11, 2024.<italic/>[Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.beamhit.net/controlledsystems">https://www.beamhit.net/controlledsystems</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Beamhit</collab>
</person-group>
<source>Military, DOD, and Law Enforcement Systems</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.beamhit.net/controlledsystems">https://www.beamhit.net/controlledsystems</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref59">
<label>[59]</label>
<mixed-citation publication-type="webpage">[59] Vitra, “Comprehensive Simulation Training Solutions for Law Enforcement and Military,” virtra.com, 2024. Accessed: Feb. 11, 2024.<italic/>[Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.virtra.com/?lang=es">https://www.virtra.com/?lang=es</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Vitra</collab>
</person-group>
<source>virtra.com</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.virtra.com/?lang=es">https://www.virtra.com/?lang=es</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref60">
<label>[60]</label>
<mixed-citation publication-type="thesis">[60] J. A. Silva Achancaray, “La gestión de las Tecnologías de la Información y Comunicaciones y el desarrollo de Simuladores de Armas en el Comando de Educación y Doctrina del Ejército en el año 2017” Tesis de Maestría, Instituto Científico y Tecnológico del Ejercito, Lima, Perú, 2017. <ext-link ext-link-type="uri" xlink:href="https://repositoriodev.icte.edu.pe/bitstream/handle/ICTE/137/Tesis%20Juan%20Silva%20Achancaray.pdf?sequence=1&amp;isAllowed=y">https://repositoriodev.icte.edu.pe/bitstream/handle/ICTE/137/Tesis%20Juan%20Silva%20Achancaray.pdf?sequence=1&amp;isAllowed=y</ext-link>
</mixed-citation>
<element-citation publication-type="thesis">
<person-group person-group-type="author">
<name>
<surname>Silva Achancaray</surname>
<given-names>J. A.</given-names>
</name>
</person-group>
<source>La gestión de las Tecnologías de la Información y Comunicaciones y el desarrollo de Simuladores de Armas en el Comando de Educación y Doctrina del Ejército en el año 2017</source>
<year>2017</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://repositoriodev.icte.edu.pe/bitstream/handle/ICTE/137/Tesis%20Juan%20Silva%20Achancaray.pdf?sequence=1&amp;isAllowed=y">https://repositoriodev.icte.edu.pe/bitstream/handle/ICTE/137/Tesis%20Juan%20Silva%20Achancaray.pdf?sequence=1&amp;isAllowed=y</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref61">
<label>[61]</label>
<mixed-citation publication-type="webpage">[61] Laser Ammo Traimim Technologies, “Simulador de Smokeless Range 2.0,” laserammo.com, 2025. Accessed: Feb. 11, 2024.<italic/>[Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://www.laserammo.com/store/Simulators-Targets/Simulators/Smokeless-Range/Smokeless-Range-20-Simulator">https://www.laserammo.com/store/Simulators-Targets/Simulators/Smokeless-Range/Smokeless-Range-20-Simulator</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Laser Ammo Traimim Technologies</collab>
</person-group>
<source>Simulador de Smokeless Range 2.0</source>
<year>2025</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://www.laserammo.com/store/Simulators-Targets/Simulators/Smokeless-Range/Smokeless-Range-20-Simulator">https://www.laserammo.com/store/Simulators-Targets/Simulators/Smokeless-Range/Smokeless-Range-20-Simulator</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref62">
<label>[62]</label>
<mixed-citation publication-type="book">[62] A. T. Biggs, J. A. Hamilton, A. G. Thompson, and R. Markwald, “Talk is cheap: Self-reported versus actual marksmanship proficiency among military and community samples,” <italic>Am. J. Psychol.</italic>, vol. 137, no. 1, pp. 1–17, Sep. 2024. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5406/19398298.137.1.01">https://doi.org/10.5406/19398298.137.1.01</ext-link>
</mixed-citation>
<element-citation publication-type="book">
<person-group person-group-type="author">
<name>
<surname>Biggs</surname>
<given-names>A. T.</given-names>
</name>
<name>
<surname>Hamilton</surname>
<given-names>J. A.</given-names>
</name>
<name>
<surname>Thompson</surname>
<given-names>A. G.</given-names>
</name>
<name>
<surname>Markwald</surname>
<given-names>R.</given-names>
</name>
</person-group>
<source>Am. J. Psychol</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.5406/19398298.137.1.01">https://doi.org/10.5406/19398298.137.1.01</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref63">
<label>[63]</label>
<mixed-citation publication-type="confproc">[63] A. Soetedjo, A. Mahmudi, M. I. Ashari, and Y. I. Nakhoda, “Low cost shooting simulator based on a single board computer,” <italic>Am. J. Appl. Sci.</italic>, vol. 12, no. 2, pp. 130–141, 2015. <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3844/ajassp.2015.130.141">https://doi.org/10.3844/ajassp.2015.130.141</ext-link>
</mixed-citation>
<element-citation publication-type="confproc">
<person-group person-group-type="author">
<name>
<surname>Soetedjo</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Mahmudi</surname>
<given-names>A.</given-names>
</name>
<name>
<surname>Ashari</surname>
<given-names>M. I.</given-names>
</name>
<name>
<surname>Nakhoda</surname>
<given-names>Y. I.</given-names>
</name>
</person-group>
<source>Am. J. Appl. Sci.</source>
<year>2015</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://doi.org/10.3844/ajassp.2015.130.141">https://doi.org/10.3844/ajassp.2015.130.141</ext-link>
</comment>
</element-citation>
</ref>
<ref id="redalyc_344281653008_ref64">
<label>[64]</label>
<mixed-citation publication-type="webpage">[64] Laser Shot, “Simulador de Entrenamiento de Puntería Móvil,” lasershot.com. Accessed: Feb. 11, 2024. [Online]. Available: <ext-link ext-link-type="uri" xlink:href="https://lasershot.com/es/simuladores-militares/#mmtsmil">https://lasershot.com/es/simuladores-militares/#mmtsmil</ext-link>
</mixed-citation>
<element-citation publication-type="webpage">
<person-group person-group-type="author">
<collab>Laser Shot</collab>
</person-group>
<source>Simulador de Entrenamiento de Puntería Móvil</source>
<year>2024</year>
<comment>
<ext-link ext-link-type="uri" xlink:href="https://lasershot.com/es/simuladores-militares/#mmtsmil">https://lasershot.com/es/simuladores-militares/#mmtsmil</ext-link>
</comment>
</element-citation>
</ref>
</ref-list>
<fn-group>
<title>Notes</title>
<fn id="fn8" fn-type="other">
<label>
<bold> ACKNOWLEDGEMENTS AND FUNDING</bold>
</label>
<p>
<bold>:</bold>
</p>
<p>This work has been supported by the assignment to the CTeI of the Escuela Militar de Cadetes "General José María Córdova,” which seeks to improve the training processes and technological development for the benefit of the institution and the nation.</p>
</fn>
<fn id="fn6" fn-type="other">
<label>
<bold> CONFLICT OF INTEREST</bold>
</label>
<p>
<bold>:</bold>
</p>
<p>The authors declare that there is no conflict of interest with respect to the publication of this work.</p>
</fn>
<fn id="fn7" fn-type="other">
<label>
<bold>AUTHOR CONTRIBUTIONS</bold>
</label>
<p>
<bold>:</bold>
</p>
<p>
<italic>
<bold>  José Antonio García Torres</bold>
</italic> and<bold>
<italic>Jhonnatan Eduardo Zamudio</italic>:</bold> Development of the technology, Design and development of the system software, Image processing and Assembly of the prototype.</p>
<p>
<bold>
<italic>Cristian Camilo García Rodriguez</italic>: </bold>Development of the prototype, Electronic configuration, and Development of the academic text.</p>
<p>
<bold>
<italic>Jhon Fredy Rincón Morantes </italic>
</bold>and<bold>
<italic>Daniel Guzmán Pérez</italic>:</bold> Methodology, Polygon tests, Adapting and arrange the scenarios to carry out the system tests.</p>
<p>
<bold>
<italic>Daniel Felipe Molina Martínez</italic>: </bold>Development and electronic assembly of the device, Methodology, such as manuals, User guides and maintenance of the device.</p>
</fn>
</fn-group>
</back>
</article>