<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE rdf:RDF PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3.dtd">
<rdf:RDF xmlns="http://purl.org/rss/1.0/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:cc="http://web.resource.org/cc/" xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:admin="http://webns.net/mvcb/" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel rdf:about="https://www.acadlore.com/rss/journals/ATAIML">
    <title>Acadlore Transactions on AI and Machine Learning</title>
    <description>Latest open access articles published in Acadlore Transactions on AI and Machine Learning at https://www.acadlore.com/journals/ATAIML</description>
    <link>https://www.acadlore.com/journals/ATAIML</link>
    <admin:generatorAgent rdf:resource="https://www.acadlore.com/journals/ATAIML"/>
    <admin:errorReportsTo rdf:resource="mailto:support@acadlore.com"/>
    <dc:publisher>Acadlore</dc:publisher>
    <dc:language>en</dc:language>
    <dc:rights>Creative Commons Attribution(CC - BY)</dc:rights>
    <prism:copyright>ATAIML</prism:copyright>
    <prism:rightsAgent>support@acadlore.com</prism:rightsAgent>
    <image rdf:resource="https://media.acadlore.com/assets/media/2026/2/img_s1rbhqdkabhpVcGq.png"/>
    <items>
      <rdf:Seq>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_2/ataiml050201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050107"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050106"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010108"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010107"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010106"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010101"/>
      </rdf:Seq>
    </items>
    <cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/"/>
  </channel>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_2/ataiml050201">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 2, Pages undefined: Fine-Tuning a Vision-Language Model for Automated Grading of K-12 Handwritten Answer Sheets</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_2/ataiml050201</link>
    <description>Automated grading has become an important component of digital transformation in K-12 education, yet the structured recognition of handwritten responses on answer sheets remains a practical challenge. General-purpose vision-language models often show limited robustness when applied directly to school assessment materials, particularly in the presence of fixed answer regions, mixed Chinese-English content, and diverse handwriting styles. To address this issue, this study develops a task-oriented fine-tuning framework for automated recognition of handwritten answer sheets in K-12 educational settings. A multimodal dataset was constructed from Chinese and English answer sheets, with region-level annotations designed to support structured text extraction. Based on this dataset, the Qwen2.5-VL-7B-Instruct model was adapted through LoRA-based fine-tuning under a dual-A16 GPU environment to reduce computational cost while preserving practical deployment feasibility. An end-to-end workflow covering data preparation, model training, weight merging, and inference was then established for structured JSON output. Experimental results show that the fine-tuned model achieved stable convergence in both small-sample and medium-sample settings and improved the extraction quality of handwritten responses within predefined answer regions. The proposed framework provides a practical and reproducible solution for deploying vision-language models in school grading scenarios with limited computing resources. The study also offers an application-oriented reference for the integration of multimodal large models into educational assessment systems.</description>
    <pubDate>04-08-2026</pubDate>
    <content:encoded>&lt;![CDATA[ Automated grading has become an important component of digital transformation in K-12 education, yet the structured recognition of handwritten responses on answer sheets remains a practical challenge. General-purpose vision-language models often show limited robustness when applied directly to school assessment materials, particularly in the presence of fixed answer regions, mixed Chinese-English content, and diverse handwriting styles. To address this issue, this study develops a task-oriented fine-tuning framework for automated recognition of handwritten answer sheets in K-12 educational settings. A multimodal dataset was constructed from Chinese and English answer sheets, with region-level annotations designed to support structured text extraction. Based on this dataset, the Qwen2.5-VL-7B-Instruct model was adapted through LoRA-based fine-tuning under a dual-A16 GPU environment to reduce computational cost while preserving practical deployment feasibility. An end-to-end workflow covering data preparation, model training, weight merging, and inference was then established for structured JSON output. Experimental results show that the fine-tuned model achieved stable convergence in both small-sample and medium-sample settings and improved the extraction quality of handwritten responses within predefined answer regions. The proposed framework provides a practical and reproducible solution for deploying vision-language models in school grading scenarios with limited computing resources. The study also offers an application-oriented reference for the integration of multimodal large models into educational assessment systems. ]]&gt;</content:encoded>
    <dc:title>Fine-Tuning a Vision-Language Model for Automated Grading of K-12 Handwritten Answer Sheets</dc:title>
    <dc:creator>yuanyuan wang</dc:creator>
    <dc:creator>hao shen</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050201</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>04-08-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>04-08-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>89</prism:startingPage>
    <prism:doi>10.56578/ataiml050201</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_2/ataiml050201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050107">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: Mesh-Free Modeling of Heat Transfer Dynamics for Rapid Assessment of Necrosis Zones in Hepatic Tumor Radiofrequency Ablation Using Physics-Informed Neural Networks</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050107</link>
    <description>Accurate prediction of the thermal ablation zone in hepatic radiofrequency ablation (RFA) is critical for preventing the recurrence of local tumor, yet it is complicated by the convective heat sink effect of blood perfusion. Traditional numerical solvers, such as the finite difference method (FDM), are inherently limited by time-step constraints which require greater computational cost and impede real-time clinical applications. This study proposed a mesh-free Physics-Informed Neural Network (PINN) framework to simulate the spatiotemporal dynamics of Pennes bioheat equation. By embedding the governing partial differential equation (PDE) directly into the loss function of the neural network, the model learnt the continuous temperature field without spatial discretization or labeled training data. A comparative analysis against an explicit FDM baseline yielded a relative L2 error norm of 1.9%. Although PINN’s continuous functional approximation slightly dampened the theoretical singularity at the tip of the electrode, it accurately resolved the critical 50 °C isotherm that defined the boundary of irreversible coagulative necrosis. Furthermore, the framework effectively decoupled computational cost from the time of physical simulation. While offline training required approximately 6 minutes, the optimized network executed online inference in milliseconds. This capability to provide physically consistent and near-instantaneous thermal predictions demonstrates the potential of the PINN framework for intraoperative decision support systems.</description>
    <pubDate>03-26-2026</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Accurate prediction of the thermal ablation zone in hepatic radiofrequency ablation (RFA) is critical for preventing the recurrence of local tumor, yet it is complicated by the convective heat sink effect of blood perfusion. Traditional numerical solvers, such as the finite difference method (FDM), are inherently limited by time-step constraints which require greater computational cost and impede real-time clinical applications. This study proposed a mesh-free Physics-Informed Neural Network (PINN) framework to simulate the spatiotemporal dynamics of Pennes bioheat equation. By embedding the governing partial differential equation (PDE) directly into the loss function of the neural network, the model learnt the continuous temperature field without spatial discretization or labeled training data. A comparative analysis against an explicit FDM baseline yielded a relative L2 error norm of 1.9%. Although PINN’s continuous functional approximation slightly dampened the theoretical singularity at the tip of the electrode, it accurately resolved the critical 50 °C isotherm that defined the boundary of irreversible coagulative necrosis. Furthermore, the framework effectively decoupled computational cost from the time of physical simulation. While offline training required approximately 6 minutes, the optimized network executed online inference in milliseconds. This capability to provide physically consistent and near-instantaneous thermal predictions demonstrates the potential of the PINN framework for intraoperative decision support systems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Mesh-Free Modeling of Heat Transfer Dynamics for Rapid Assessment of Necrosis Zones in Hepatic Tumor Radiofrequency Ablation Using Physics-Informed Neural Networks</dc:title>
    <dc:creator>muhammet kaan yeşilyurt</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050107</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-26-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-26-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>73</prism:startingPage>
    <prism:doi>10.56578/ataiml050107</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050107</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050106">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: A Deep Learning and Sensor-Based Internet of Things Framework for Intelligent Waste Management: A Comparative Analysis</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050106</link>
    <description>The escalating volume of municipal solid waste has intensified the need for intelligent waste management systems capable of improving operational efficiency, classification accuracy, and sustainability. In recent years, the integration of Internet of Things technologies, deep learning algorithms, and sensor-based monitoring has significantly transformed conventional waste collection and sorting practices. In this study, an intelligent waste management framework was proposed and comparatively evaluated against twelve contemporary smart waste management systems reported in the literature. The proposed architecture integrates a Raspberry Pi 3 embedded platform, You Only Look Once version 8 (YOLOv8) deep learning models for real-time waste classification, and ultrasonic bin-fill sensors for monitoring container capacity, enabling automated lid operation, and supporting optimized waste collection scheduling. A comprehensive comparative analysis was conducted across multiple performance dimensions, including classification accuracy, system responsiveness, scalability, deployment cost, and operational efficiency. Experimental evaluation demonstrates that the deep learning–driven framework achieved high real-time classification accuracy while maintaining low computational overhead on resource-constrained edge devices. In addition, the incorporation of bin-fill sensing and automated actuation enhanced system responsiveness and supported data-driven collection planning, thereby reducing unnecessary collection trips and operational costs. The findings highlight the significant potential of combining advanced deep learning algorithms with sensor-based Internet of Things infrastructures to develop sustainable, intelligent, and cost-effective waste management ecosystems. These insights provide a foundation for future research aimed at enhancing intelligent waste infrastructure and supporting environmentally sustainable urban development.</description>
    <pubDate>03-15-2026</pubDate>
    <content:encoded>&lt;![CDATA[ The escalating volume of municipal solid waste has intensified the need for intelligent waste management systems capable of improving operational efficiency, classification accuracy, and sustainability. In recent years, the integration of Internet of Things technologies, deep learning algorithms, and sensor-based monitoring has significantly transformed conventional waste collection and sorting practices. In this study, an intelligent waste management framework was proposed and comparatively evaluated against twelve contemporary smart waste management systems reported in the literature. The proposed architecture integrates a Raspberry Pi 3 embedded platform, You Only Look Once version 8 (YOLOv8) deep learning models for real-time waste classification, and ultrasonic bin-fill sensors for monitoring container capacity, enabling automated lid operation, and supporting optimized waste collection scheduling. A comprehensive comparative analysis was conducted across multiple performance dimensions, including classification accuracy, system responsiveness, scalability, deployment cost, and operational efficiency. Experimental evaluation demonstrates that the deep learning–driven framework achieved high real-time classification accuracy while maintaining low computational overhead on resource-constrained edge devices. In addition, the incorporation of bin-fill sensing and automated actuation enhanced system responsiveness and supported data-driven collection planning, thereby reducing unnecessary collection trips and operational costs. The findings highlight the significant potential of combining advanced deep learning algorithms with sensor-based Internet of Things infrastructures to develop sustainable, intelligent, and cost-effective waste management ecosystems. These insights provide a foundation for future research aimed at enhancing intelligent waste infrastructure and supporting environmentally sustainable urban development. ]]&gt;</content:encoded>
    <dc:title>A Deep Learning and Sensor-Based Internet of Things Framework for Intelligent Waste Management: A Comparative Analysis</dc:title>
    <dc:creator>rexhep mustafovski</dc:creator>
    <dc:creator>aleksandar petrovski</dc:creator>
    <dc:creator>marko radovanovic</dc:creator>
    <dc:creator>aner behlic</dc:creator>
    <dc:creator>kristijan ilievski</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050106</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-15-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-15-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>58</prism:startingPage>
    <prism:doi>10.56578/ataiml050106</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050106</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050105">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: Empowering Accessibility to Digital Space Through Generative AI to Support People with Disabilities</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050105</link>
    <description>This paper explored how generative artificial intelligence (AI) could enhance the digital accessibility of individuals with visual, auditory, and cognitive impairments. It aims to develop an adaptive and context-sensitive system to dynamically customize content in accordance with users’ needs. The proposed system creates text simplification with generative AI models like Generative Pretrained Transformer 3 (GPT-3), and caption images with Contrastive Language–Image Pre-Training (CLIP). It adapts users’ reactions with reinforcement learning, to enable the generation of real-time and personalized content. This project tested the system performance with mixed data, including texts, images, and videos. The outcomes revealed that the accessibility of the content had been significantly increased. At the same time, the Flesch-Kincaid Grade Level was reduced by 50% through text simplification, and the bilingual evaluation understudy (BLEU) score was ranked at 0.74 in the case of image captioning. User satisfaction had increased by 15% after feedback corrections. In addition to these results, the system demonstrated high effectiveness in supporting auditory-impaired users by achieving a subtitle synchronization accuracy of 94.6% in video content, and increasing auditory user satisfaction by 18% during accessibility evaluations. This study helped develop AI-based accessibility and provide more inclusive online environment for people with disabilities, thus facilitating their access to online content. In conclusion, the proposed system is more convenient and could offer a broader range of individual and time-sensitive user experiences, compared to the current accessibility models.</description>
    <pubDate>02-28-2026</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This paper explored how generative artificial intelligence (AI) could enhance the digital accessibility of individuals with visual, auditory, and cognitive impairments. It aims to develop an adaptive and context-sensitive system to dynamically customize content in accordance with users’ needs. The proposed system creates text simplification with generative AI models like Generative Pretrained Transformer 3 (GPT-3), and caption images with Contrastive Language–Image Pre-Training (CLIP). It adapts users’ reactions with reinforcement learning, to enable the generation of real-time and personalized content. This project tested the system performance with mixed data, including texts, images, and videos. The outcomes revealed that the accessibility of the content had been significantly increased. At the same time, the Flesch-Kincaid Grade Level was reduced by 50% through text simplification, and the bilingual evaluation understudy (BLEU) score was ranked at 0.74 in the case of image captioning. User satisfaction had increased by 15% after feedback corrections. In addition to these results, the system demonstrated high effectiveness in supporting auditory-impaired users by achieving a subtitle synchronization accuracy of 94.6% in video content, and increasing auditory user satisfaction by 18% during accessibility evaluations. This study helped develop AI-based accessibility and provide more inclusive online environment for people with disabilities, thus facilitating their access to online content. In conclusion, the proposed system is more convenient and could offer a broader range of individual and time-sensitive user experiences, compared to the current accessibility models.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Empowering Accessibility to Digital Space Through Generative AI to Support People with Disabilities</dc:title>
    <dc:creator>abebe kindie awuraris</dc:creator>
    <dc:creator>ravuri daniel</dc:creator>
    <dc:creator>paramasivan muthukumar</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050105</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>02-28-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>02-28-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>44</prism:startingPage>
    <prism:doi>10.56578/ataiml050105</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050104">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: Transformer-Driven Feature Fusion for Robust Diagnosis of Lung Cancer Brain Metastasis Under Missing-Modality Scenarios</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050104</link>
    <description>Accurate diagnosis of lung cancer brain metastasis is often hindered by incomplete magnetic resonance imaging (MRI) modalities, resulting in suboptimal utilization of complementary radiological information. To address the challenge of ineffective feature integration in missing-modality scenarios, a Transformer-based multi-modal feature fusion framework, referred to as Missing Modality Transformer (MMT), was introduced. In this study, multi-modal MRI data from 279 individuals diagnosed with lung cancer brain metastasis, including both small cell lung cancer (SCLC) and non-small cell lung cancer (NSCLC), were acquired and processed through a standardized radiomics pipeline encompassing feature extraction, feature selection, and controlled data augmentation. The proposed MMT framework was trained and evaluated under various single-modality and combined-modality configurations to assess its robustness to modality absence. A maximum diagnostic accuracy of 0.905 was achieved under single-modality missing conditions, exceeding the performance of the full-modality baseline by 0.017. Interpretability was further strengthened through systematic analysis of loss-function hyperparameters and quantitative assessments of modality-specific importance. The experimental findings collectively indicate that the MMT framework provides a reliable and clinically meaningful solution for diagnostic environments in which imaging acquisition is limited by patient conditions, equipment availability, or time constraints. These results highlight the potential of Transformer-based radiomics fusion to advance computational neuro-oncology by improving diagnostic performance, enhancing robustness to real-world imaging variability, and offering transparent interpretability that aligns with clinical decision-support requirements.</description>
    <pubDate>02-05-2026</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate diagnosis of lung cancer brain metastasis is often hindered by incomplete magnetic resonance imaging (MRI) modalities, resulting in suboptimal utilization of complementary radiological information. To address the challenge of ineffective feature integration in missing-modality scenarios, a Transformer-based multi-modal feature fusion framework, referred to as Missing Modality Transformer (MMT), was introduced. In this study, multi-modal MRI data from 279 individuals diagnosed with lung cancer brain metastasis, including both small cell lung cancer (SCLC) and non-small cell lung cancer (NSCLC), were acquired and processed through a standardized radiomics pipeline encompassing feature extraction, feature selection, and controlled data augmentation. The proposed MMT framework was trained and evaluated under various single-modality and combined-modality configurations to assess its robustness to modality absence. A maximum diagnostic accuracy of 0.905 was achieved under single-modality missing conditions, exceeding the performance of the full-modality baseline by 0.017. Interpretability was further strengthened through systematic analysis of loss-function hyperparameters and quantitative assessments of modality-specific importance. The experimental findings collectively indicate that the MMT framework provides a reliable and clinically meaningful solution for diagnostic environments in which imaging acquisition is limited by patient conditions, equipment availability, or time constraints. These results highlight the potential of Transformer-based radiomics fusion to advance computational neuro-oncology by improving diagnostic performance, enhancing robustness to real-world imaging variability, and offering transparent interpretability that aligns with clinical decision-support requirements. ]]&gt;</content:encoded>
    <dc:title>Transformer-Driven Feature Fusion for Robust Diagnosis of Lung Cancer Brain Metastasis Under Missing-Modality Scenarios</dc:title>
    <dc:creator>yue ding</dc:creator>
    <dc:creator>yunqi ma</dc:creator>
    <dc:creator>kuo jing</dc:creator>
    <dc:creator>zhansong shang</dc:creator>
    <dc:creator>feiyang gao</dc:creator>
    <dc:creator>zhengwei cui</dc:creator>
    <dc:creator>linyan xue</dc:creator>
    <dc:creator>shuang liu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050104</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>02-05-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>02-05-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>32</prism:startingPage>
    <prism:doi>10.56578/ataiml050104</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050103">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: AI-Driven Climate Adaptation: Technical Applications, Ethical Governance, and Social Inclusion</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050103</link>
    <description>Climate change, which has intensified to a global governance crisis, demands adaptation strategies that are faster, precise, and more inclusive than ever before. Artificial intelligence (AI), increasingly positioned at the core of this transformation, is offering powerful tools for climate risk forecasting, disaster preparedness, energy optimization, agricultural efficiency, and business resilience. Yet the growing adoption of AI exposes a fundamental paradox: while it promises unprecedented analytical capacity, its benefits remain unevenly distributed across communities. The current study addressed this tension by presenting a comprehensive and governance-oriented analysis of AI-driven climate adaptation. Drawing on an extensive review of academic research and major institutional reports, this paper identified three interlinked challenges including methodological limitations, ethical and equity risks, as well as governance gaps which continuously undermine the effectiveness of AI-enabled adaptation. Predictive models struggled to incorporate complex social vulnerabilities; algorithmic opacity limited trust and accountability whereas persistent data inequality prevented low-income regions from leveraging advanced digital tools. In response, the study introduced a multi-layered governance framework encompassing technical capacity, regulatory and ethical infrastructure, and socially inclusive outcomes. The findings revealed that the contributions of AI to climate adaptation were fundamentally shaped by institutional quality, transparent data governance, equitable digital access, and participation of vulnerable populations in decision making. The paper concluded that AI held extraordinary potential to strengthen resilience, only if deployed within governance systems that prioritize fairness, accountability, transparency, ethics, and social inclusion. By aligning technological innovation with just and sustainable governance, AI becomes not only a predictive instrument but a transformative catalyst for equitable climate adaptation worldwide. </description>
    <pubDate>01-22-2026</pubDate>
    <content:encoded>&lt;![CDATA[ Climate change, which has intensified to a global governance crisis, demands adaptation strategies that are faster, precise, and more inclusive than ever before. Artificial intelligence (AI), increasingly positioned at the core of this transformation, is offering powerful tools for climate risk forecasting, disaster preparedness, energy optimization, agricultural efficiency, and business resilience. Yet the growing adoption of AI exposes a fundamental paradox: while it promises unprecedented analytical capacity, its benefits remain unevenly distributed across communities. The current study addressed this tension by presenting a comprehensive and governance-oriented analysis of AI-driven climate adaptation. Drawing on an extensive review of academic research and major institutional reports, this paper identified three interlinked challenges including methodological limitations, ethical and equity risks, as well as governance gaps which continuously undermine the effectiveness of AI-enabled adaptation. Predictive models struggled to incorporate complex social vulnerabilities; algorithmic opacity limited trust and accountability whereas persistent data inequality prevented low-income regions from leveraging advanced digital tools. In response, the study introduced a multi-layered governance framework encompassing technical capacity, regulatory and ethical infrastructure, and socially inclusive outcomes. The findings revealed that the contributions of AI to climate adaptation were fundamentally shaped by institutional quality, transparent data governance, equitable digital access, and participation of vulnerable populations in decision making. The paper concluded that AI held extraordinary potential to strengthen resilience, only if deployed within governance systems that prioritize fairness, accountability, transparency, ethics, and social inclusion. By aligning technological innovation with just and sustainable governance, AI becomes not only a predictive instrument but a transformative catalyst for equitable climate adaptation worldwide.  ]]&gt;</content:encoded>
    <dc:title>AI-Driven Climate Adaptation: Technical Applications, Ethical Governance, and Social Inclusion</dc:title>
    <dc:creator>özden şentürk</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050103</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-22-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-22-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>20</prism:startingPage>
    <prism:doi>10.56578/ataiml050103</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050102">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: Decision-Level Multimodal Fusion for Non-Invasive Diagnosis of Endometriosis: Strategies, Calibration, and Net Clinical Benefit</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050102</link>
    <description>Endometriosis remains underdiagnosed due to reliance on invasive laparoscopy. Artificial Intelligence (AI) using either imaging or structured clinical data have shown promise, but single modality approaches face limitations in sensitivity, calibration, and clinical reliability. This work seeks to evaluate whether decision-level multimodal fusion of Magnetic Resonance Imaging (MRI)-based and clinical data-based AI systems improves diagnostic performance, calibration, and net clinical benefit, compared with single-modality models. Two previously validated models were combined with retrospective data from 1,208 patients with suspected endometriosis: a Dual U-Net trained on pelvic MRI with Gradient-weighted Class Activation Mapping (Grad-CAM) interpretability and a dense neural network trained on structured clinical features with SHapley Additive exPlanations (SHAP). This study tested weighted averaging, stacking via logistic regression, and confidence-gating. Performance was assessed using accuracy, precision, recall, F1-score, and area under the curve (AUC). Calibration was evaluated using the Brier score, expected calibration error (ECE), and reliability diagrams. Clinical utility was quantified with decision curve analysis (DCA). Statistical significance was tested with McNemar’s test for accuracy and DeLong’s test for AUC. Multimodal fusion outperformed both single modality models. Weighted averaging accuracy was 0.89, precision was 0.89, recall was 0.87, and F1-score was 0.86, thus improving on either modality alone. Stacking further enhanced calibration (ECE reduction from 0.8 to 0.04) and yielded higher net benefit across clinically relevant probability thresholds (20 to 60%). DCA indicated fusion would avoid 12 to 18 unnecessary surgical investigations per 100 patients, compared with single modality strategies. Confidence-gating maintained performance under simulated distribution shifts to support robustness. Decision-level multimodal fusion enhanced non-invasive diagnosis of endometriosis by improving accuracy, calibration, and clinical utility. These results demonstrated the value of integrative AI gynecological care and justify prospective validation in real-world clinical settings.</description>
    <pubDate>01-18-2026</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Endometriosis remains underdiagnosed due to reliance on invasive laparoscopy. Artificial Intelligence (AI) using either imaging or structured clinical data have shown promise, but single modality approaches face limitations in sensitivity, calibration, and clinical reliability. This work seeks to evaluate whether decision-level multimodal fusion of Magnetic Resonance Imaging (MRI)-based and clinical data-based AI systems improves diagnostic performance, calibration, and net clinical benefit, compared with single-modality models. Two previously validated models were combined with retrospective data from 1,208 patients with suspected endometriosis: a Dual U-Net trained on pelvic MRI with Gradient-weighted Class Activation Mapping (Grad-CAM) interpretability and a dense neural network trained on structured clinical features with SHapley Additive exPlanations (SHAP). This study tested weighted averaging, stacking via logistic regression, and confidence-gating. Performance was assessed using accuracy, precision, recall, F1-score, and area under the curve (AUC). Calibration was evaluated using the Brier score, expected calibration error (ECE), and reliability diagrams. Clinical utility was quantified with decision curve analysis (DCA). Statistical significance was tested with McNemar’s test for accuracy and DeLong’s test for AUC. Multimodal fusion outperformed both single modality models. Weighted averaging accuracy was 0.89, precision was 0.89, recall was 0.87, and F1-score was 0.86, thus improving on either modality alone. Stacking further enhanced calibration (ECE reduction from 0.8 to 0.04) and yielded higher net benefit across clinically relevant probability thresholds (20 to 60%). DCA indicated fusion would avoid 12 to 18 unnecessary surgical investigations per 100 patients, compared with single modality strategies. Confidence-gating maintained performance under simulated distribution shifts to support robustness. Decision-level multimodal fusion enhanced non-invasive diagnosis of endometriosis by improving accuracy, calibration, and clinical utility. These results demonstrated the value of integrative AI gynecological care and justify prospective validation in real-world clinical settings.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Decision-Level Multimodal Fusion for Non-Invasive Diagnosis of Endometriosis: Strategies, Calibration, and Net Clinical Benefit</dc:title>
    <dc:creator>oluwayemisi b. fatade</dc:creator>
    <dc:creator>oyebimpe f. ajiboye</dc:creator>
    <dc:creator>funmilayo a. sanusi</dc:creator>
    <dc:creator>kikelomo i. okesola</dc:creator>
    <dc:creator>grace c. okorie</dc:creator>
    <dc:creator>goodness o. opateye</dc:creator>
    <dc:creator>oluwasefunmi b. famodimu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050102</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-18-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-18-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>11</prism:startingPage>
    <prism:doi>10.56578/ataiml050102</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050101">
    <title>Acadlore Transactions on AI and Machine Learning, 2026, Volume 5, Issue 1, Pages undefined: A Lightweight Conditional Diffusion Model for Restoring Turbulence-degraded Facial Images</title>
    <link>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050101</link>
    <description>Atmospheric turbulence induces severe blurring and geometric distortions in facial imagery, critically compromising the performance of downstream tasks. To overcome this challenge, a lightweight conditional diffusion model was proposed for the restoration of single-frame turbulence-degraded facial images. Super-resolution techniques were integrated with the diffusion model, and high-frequency information was incorporated as a conditional constraint to enhance structural recovery and achieve high-fidelity generation. A simplified U-Net architecture was employed within the diffusion model to reduce computational complexity while maintaining high restoration quality. Comprehensive comparative evaluations and restoration experiments across multiple scenarios demonstrate that the proposed method produces results with reduced perceptual and distributional discrepancies from ground-truth images, while also exhibiting superior inference efficiency compared to existing approaches. The presented approach not only offers a practical solution for enhancing facial imagery in turbulent environments but also establishes a promising paradigm for applying efficient diffusion models to ill-posed image restoration problems, with potential applicability to other domains such as medical and astronomical imaging.</description>
    <pubDate>01-11-2026</pubDate>
    <content:encoded>&lt;![CDATA[ Atmospheric turbulence induces severe blurring and geometric distortions in facial imagery, critically compromising the performance of downstream tasks. To overcome this challenge, a lightweight conditional diffusion model was proposed for the restoration of single-frame turbulence-degraded facial images. Super-resolution techniques were integrated with the diffusion model, and high-frequency information was incorporated as a conditional constraint to enhance structural recovery and achieve high-fidelity generation. A simplified U-Net architecture was employed within the diffusion model to reduce computational complexity while maintaining high restoration quality. Comprehensive comparative evaluations and restoration experiments across multiple scenarios demonstrate that the proposed method produces results with reduced perceptual and distributional discrepancies from ground-truth images, while also exhibiting superior inference efficiency compared to existing approaches. The presented approach not only offers a practical solution for enhancing facial imagery in turbulent environments but also establishes a promising paradigm for applying efficient diffusion models to ill-posed image restoration problems, with potential applicability to other domains such as medical and astronomical imaging. ]]&gt;</content:encoded>
    <dc:title>A Lightweight Conditional Diffusion Model for Restoring Turbulence-degraded Facial Images</dc:title>
    <dc:creator>shaoyu sun</dc:creator>
    <dc:creator>pinchao meng</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml050101</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-11-2026</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-11-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ataiml050101</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2026_5_1/ataiml050101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040405">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 4, Pages undefined: Multimodal Audio Violence Detection: Fusion of Acoustic Signals and Semantics</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040405</link>
    <description>When public safety is considered to be of paramount importance, the capacity to detect violent situations through audio monitoring has become increasingly indispensable. This paper proposed a hybrid audio text violence detection system that combines text-based information with frequency-based features to improve accuracy and reliability. The two core models of the system include a frequency-based model, Random Forest (RF) classifier, and a natural language processing (NLP) model called Bidirectional Encoder Representations from Transformers (BERT). RF classifier was trained on Mel-Frequency Cepstral Coefficients (MFCCs) and other spectrum features, whereas BERT identified violent content in transcribed speech. BERT model was improved through task-specific fine-tuning on a curated violence-related text dataset and balanced with class-weighting strategies to address category imbalance. This adaptation enhanced its ability to capture subtle violent language patterns beyond general purpose embeddings. Furthermore, a meta-learner ensemble model using eXtreme Gradient Boosting (XGBoost) classifier model could combine the probability output of the two base models. The ensemble strategy proposed in this research differed from conventionally multimodal fusion techniques, which depend on a single strategy, either NLP or audio. The XGBoost fusion model possessed the qualities derived from both base models to improve classification accuracy and robustness by creating an ideal decision boundary. The proposed system was supported by a Graphical User Interface (GUI) for multiple purposes, such as smart city applications, emergency response, and security monitoring with real-time analysis. The proposed XGBoost ensemble model attained an overall accuracy of over 97.37%, demonstrating the efficacy of integrating machine learning-based decision.</description>
    <pubDate>12-23-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;When public safety is considered to be of paramount importance, the capacity to detect violent situations through audio monitoring has become increasingly indispensable. This paper proposed a hybrid audio text violence detection system that combines text-based information with frequency-based features to improve accuracy and reliability. The two core models of the system include a frequency-based model, Random Forest (RF) classifier, and a natural language processing (NLP) model called Bidirectional Encoder Representations from Transformers (BERT). RF classifier was trained on Mel-Frequency Cepstral Coefficients (MFCCs) and other spectrum features, whereas BERT identified violent content in transcribed speech. BERT model was improved through task-specific fine-tuning on a curated violence-related text dataset and balanced with class-weighting strategies to address category imbalance. This adaptation enhanced its ability to capture subtle violent language patterns beyond general purpose embeddings. Furthermore, a meta-learner ensemble model using eXtreme Gradient Boosting (XGBoost) classifier model could combine the probability output of the two base models. The ensemble strategy proposed in this research differed from conventionally multimodal fusion techniques, which depend on a single strategy, either NLP or audio. The XGBoost fusion model possessed the qualities derived from both base models to improve classification accuracy and robustness by creating an ideal decision boundary. The proposed system was supported by a Graphical User Interface (GUI) for multiple purposes, such as smart city applications, emergency response, and security monitoring with real-time analysis. The proposed XGBoost ensemble model attained an overall accuracy of over 97.37%, demonstrating the efficacy of integrating machine learning-based decision.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Multimodal Audio Violence Detection: Fusion of Acoustic Signals and Semantics</dc:title>
    <dc:creator>shivwani nadar</dc:creator>
    <dc:creator>disha gandhi</dc:creator>
    <dc:creator>anupama jawale</dc:creator>
    <dc:creator>shweta pawar</dc:creator>
    <dc:creator>ruta prabhu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040405</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-23-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-23-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>301</prism:startingPage>
    <prism:doi>10.56578/ataiml040405</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040404">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 4, Pages undefined: Natural Language Processing for PTSD Detection: A Systematic Review and Bibliometric Analysis</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040404</link>
    <description>Post-traumatic stress disorder (PTSD) has been recognized as a critical global mental health challenge, and the application of natural language processing (NLP) has emerged as a promising approach for its detection and management. In this study, a systematic review was conducted to evaluate the quality, quantity, and consistency of research investigating the role of NLP in PTSD detection. Through this process, prior research was consolidated, methodological gaps were identified, and a conceptual framework was formulated to guide future investigations. To complement the systematic review, a bibliometric analysis was performed to map the intellectual landscape, assess publication trends, and visualize research networks within this domain. The systematic review involved a structured search across ScienceDirect, IEEE Xplore, PubMed, and Web of Science, resulting in the retrieval of 328 records. After rigorous screening, 56 studies were included in the final synthesis. Separately, a bibliometric analysis was conducted on 4,138 publications obtained from the Web of Science database. The findings highlight that NLP methods not only enhance the detection of PTSD but also support the development of personalized treatment strategies. Ethical and security considerations were also identified as pressing concerns requiring further attention. The results of this study underscore the significance of NLP in advancing PTSD research and emphasize its potential to transform mental health services. By identifying trends, challenges, and opportunities, this study provides a foundation for future research aimed at strengthening the role of NLP in clinical practice and mental health policy.</description>
    <pubDate>12-14-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Post-traumatic stress disorder (PTSD) has been recognized as a critical global mental health challenge, and the application of natural language processing (NLP) has emerged as a promising approach for its detection and management. In this study, a systematic review was conducted to evaluate the quality, quantity, and consistency of research investigating the role of NLP in PTSD detection. Through this process, prior research was consolidated, methodological gaps were identified, and a conceptual framework was formulated to guide future investigations. To complement the systematic review, a bibliometric analysis was performed to map the intellectual landscape, assess publication trends, and visualize research networks within this domain. The systematic review involved a structured search across ScienceDirect, IEEE Xplore, PubMed, and Web of Science, resulting in the retrieval of 328 records. After rigorous screening, 56 studies were included in the final synthesis. Separately, a bibliometric analysis was conducted on 4,138 publications obtained from the Web of Science database. The findings highlight that NLP methods not only enhance the detection of PTSD but also support the development of personalized treatment strategies. Ethical and security considerations were also identified as pressing concerns requiring further attention. The results of this study underscore the significance of NLP in advancing PTSD research and emphasize its potential to transform mental health services. By identifying trends, challenges, and opportunities, this study provides a foundation for future research aimed at strengthening the role of NLP in clinical practice and mental health policy. ]]&gt;</content:encoded>
    <dc:title>Natural Language Processing for PTSD Detection: A Systematic Review and Bibliometric Analysis</dc:title>
    <dc:creator>engin seven</dc:creator>
    <dc:creator>eylem yucel</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040404</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-14-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-14-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>273</prism:startingPage>
    <prism:doi>10.56578/ataiml040404</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040403">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 4, Pages undefined: Exploring the Operational Potential of Generative Artificial Intelligence in Local Government Organizations</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040403</link>
    <description>Generative Artificial Intelligence (Gen-AI) has emerged as a transformative technology with considerable potential to enhance information management and decision-making processes in the public sector. The present study examined how Gen-AI, with specific attention to Microsoft Copilot, can be integrated into local government organizations to support routine operations and strategic tasks. An Integrative Literature Review (ILR) methodology was applied, through which scholarly sources were systematically evaluated and findings were synthesized across predefined research questions and thematic categories. The review emphasized three focal areas: the conceptual foundations of Gen-AI, the challenges associated with its integration, and the opportunities for improving public sector information analysis and administrative practices. Evidence indicated that Gen-AI adoption in local government contexts can substantially improve efficiency in data retrieval, accelerate decision-making processes, enhance service responsiveness, and streamline administrative workflows. At the same time, significant risks were identified, including fragmented data infrastructures, limited digital and Artificial Intelligence (AI) literacy among personnel, and ongoing ethical, transparency, and regulatory challenges. Recommendations were formulated for future research, including empirical assessments of Gen-AI deployment across diverse local government contexts and longitudinal studies to evaluate the sustainability of AI-driven transformations. The insights generated from this study provide actionable guidance for local government organizations seeking to evaluate both the benefits and the risks of integrating Gen-AI technologies into information management and decision-support systems, thereby contributing to ongoing debates on public sector innovation and digital governance.</description>
    <pubDate>11-02-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Generative Artificial Intelligence (Gen-AI) has emerged as a transformative technology with considerable potential to enhance information management and decision-making processes in the public sector. The present study examined how Gen-AI, with specific attention to Microsoft Copilot, can be integrated into local government organizations to support routine operations and strategic tasks. An Integrative Literature Review (ILR) methodology was applied, through which scholarly sources were systematically evaluated and findings were synthesized across predefined research questions and thematic categories. The review emphasized three focal areas: the conceptual foundations of Gen-AI, the challenges associated with its integration, and the opportunities for improving public sector information analysis and administrative practices. Evidence indicated that Gen-AI adoption in local government contexts can substantially improve efficiency in data retrieval, accelerate decision-making processes, enhance service responsiveness, and streamline administrative workflows. At the same time, significant risks were identified, including fragmented data infrastructures, limited digital and Artificial Intelligence (AI) literacy among personnel, and ongoing ethical, transparency, and regulatory challenges. Recommendations were formulated for future research, including empirical assessments of Gen-AI deployment across diverse local government contexts and longitudinal studies to evaluate the sustainability of AI-driven transformations. The insights generated from this study provide actionable guidance for local government organizations seeking to evaluate both the benefits and the risks of integrating Gen-AI technologies into information management and decision-support systems, thereby contributing to ongoing debates on public sector innovation and digital governance. ]]&gt;</content:encoded>
    <dc:title>Exploring the Operational Potential of Generative Artificial Intelligence in Local Government Organizations</dc:title>
    <dc:creator>emre erturk</dc:creator>
    <dc:creator>lahiru dissanayaka</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040403</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-02-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-02-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>263</prism:startingPage>
    <prism:doi>10.56578/ataiml040403</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040402">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 4, Pages undefined: LMS-YOLO: A StarNet-Enhanced Lightweight Framework for Robust Marine Object Detection in Complex Water Surface Environments</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040402</link>
    <description>Accurate and efficient detection of small-scale targets on dynamic water surfaces remains a critical challenge in the deployment of unmanned surface vehicles (USVs) for maritime applications. Complex background interference—such as wave motion, sunlight reflections, and low contrast—often leads to missed or false detections, particularly when using conventional convolutional neural networks. To address these issues, this study introduces LMS-YOLO, a lightweight detection framework built upon the YOLOv8n architecture and optimized for real-time marine object recognition. The proposed network integrates three key components: (1) a C2f-SBS module incorporating StarNet-based Star Blocks, which streamlines multi-scale feature extraction while reducing parameter overhead; (2) a Shared Convolutional Lightweight Detection Head (SCLD), designed to enhance detection precision across scales using a unified convolutional strategy; and (3) a Mixed Local Channel Attention (MLCA) module, which reinforces context-aware representation under complex maritime conditions. Evaluated on the WSODD and FloW-Img datasets, LMS-YOLO achieves a 5.5% improvement in precision and a 2.3% gain in mAP@0.5 compared to YOLOv8n, while reducing parameter count and computational cost by 37.18% and 34.57%, respectively. The model operates at 128 FPS on standard hardware, demonstrating its practical viability for embedded deployment in marine perception systems. These results highlight the potential of LMS-YOLO as a deployable solution for high-speed, high-accuracy marine object detection in real-world environments.</description>
    <pubDate>10-16-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Accurate and efficient detection of small-scale targets on dynamic water surfaces remains a critical challenge in the deployment of unmanned surface vehicles (USVs) for maritime applications. Complex background interference—such as wave motion, sunlight reflections, and low contrast—often leads to missed or false detections, particularly when using conventional convolutional neural networks. To address these issues, this study introduces LMS-YOLO, a lightweight detection framework built upon the YOLOv8n architecture and optimized for real-time marine object recognition. The proposed network integrates three key components: (1) a C2f-SBS module incorporating StarNet-based Star Blocks, which streamlines multi-scale feature extraction while reducing parameter overhead; (2) a Shared Convolutional Lightweight Detection Head (SCLD), designed to enhance detection precision across scales using a unified convolutional strategy; and (3) a Mixed Local Channel Attention (MLCA) module, which reinforces context-aware representation under complex maritime conditions. Evaluated on the WSODD and FloW-Img datasets, LMS-YOLO achieves a 5.5% improvement in precision and a 2.3% gain in mAP@0.5 compared to YOLOv8n, while reducing parameter count and computational cost by 37.18% and 34.57%, respectively. The model operates at 128 FPS on standard hardware, demonstrating its practical viability for embedded deployment in marine perception systems. These results highlight the potential of LMS-YOLO as a deployable solution for high-speed, high-accuracy marine object detection in real-world environments.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>LMS-YOLO: A StarNet-Enhanced Lightweight Framework for Robust Marine Object Detection in Complex Water Surface Environments</dc:title>
    <dc:creator>yuhan sun</dc:creator>
    <dc:creator>xin liu</dc:creator>
    <dc:creator>qingfa zhang</dc:creator>
    <dc:creator>mingzhi shao</dc:creator>
    <dc:creator>tengwen zhang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040402</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>10-16-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>10-16-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>247</prism:startingPage>
    <prism:doi>10.56578/ataiml040402</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040401">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 4, Pages undefined: Real-Time Anomaly Detection in IoT Networks Using a Hybrid Deep Learning Model</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040401</link>
    <description>The rapid expansion of Internet of Things (IoT) systems and networks has led to increased challenges regarding security and system reliability. Anomaly detection has become a critical task for identifying system flaws, cyberattacks, and failures in IoT environments. This study proposes a hybrid deep learning (DL) approach combining Autoencoders (AE) and Long Short-Term Memory (LSTM) networks to detect anomalies in real-time within IoT networks. In this model, normal data trends were learned in an unsupervised manner using an AE, while temporal dependencies in time-series data were captured through the use of an LSTM network. Experiments conducted on publicly available IoT datasets, namely the Kaggle IoT Network Traffic Dataset and the Numenta Anomaly Benchmark (NAB) dataset, demonstrate that the proposed hybrid model outperforms conventional machine learning (ML) algorithms, such as Support Vector Machine (SVM) and Random Forest (RF), in terms of accuracy, precision, recall, and F1-score. The hybrid model achieved a recall of 96.2%, a precision of 95.8%, and an accuracy of 97.5%, with negligible false negatives and false positives. Furthermore, the model is capable of handling real-time data with a latency of just 75 milliseconds, making it suitable for large-scale IoT applications. The performance evaluation, which utilized a diverse set of anomaly scenarios, highlighted the robustness and scalability of the proposed model. The Kaggle IoT Network Traffic Dataset, consisting of approximately 630,000 records across six months and 115 features, along with the NAB dataset, which includes around 365,000 sensor readings and 55 features, provided comprehensive data for evaluating the model’s effectiveness in real-world conditions. These findings suggest that the hybrid DL framework offers a robust, scalable, and efficient solution for anomaly detection in IoT networks, contributing to enhanced system security and dependability.</description>
    <pubDate>10-09-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The rapid expansion of Internet of Things (IoT) systems and networks has led to increased challenges regarding security and system reliability. Anomaly detection has become a critical task for identifying system flaws, cyberattacks, and failures in IoT environments. This study proposes a hybrid deep learning (DL) approach combining Autoencoders (AE) and Long Short-Term Memory (LSTM) networks to detect anomalies in real-time within IoT networks. In this model, normal data trends were learned in an unsupervised manner using an AE, while temporal dependencies in time-series data were captured through the use of an LSTM network. Experiments conducted on publicly available IoT datasets, namely the Kaggle IoT Network Traffic Dataset and the Numenta Anomaly Benchmark (NAB) dataset, demonstrate that the proposed hybrid model outperforms conventional machine learning (ML) algorithms, such as Support Vector Machine (SVM) and Random Forest (RF), in terms of accuracy, precision, recall, and F1-score. The hybrid model achieved a recall of 96.2%, a precision of 95.8%, and an accuracy of 97.5%, with negligible false negatives and false positives. Furthermore, the model is capable of handling real-time data with a latency of just 75 milliseconds, making it suitable for large-scale IoT applications. The performance evaluation, which utilized a diverse set of anomaly scenarios, highlighted the robustness and scalability of the proposed model. The Kaggle IoT Network Traffic Dataset, consisting of approximately 630,000 records across six months and 115 features, along with the NAB dataset, which includes around 365,000 sensor readings and 55 features, provided comprehensive data for evaluating the model’s effectiveness in real-world conditions. These findings suggest that the hybrid DL framework offers a robust, scalable, and efficient solution for anomaly detection in IoT networks, contributing to enhanced system security and dependability.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Real-Time Anomaly Detection in IoT Networks Using a Hybrid Deep Learning Model</dc:title>
    <dc:creator>anil kumar pallikonda</dc:creator>
    <dc:creator>vinay kumar bandarapalli</dc:creator>
    <dc:creator>aruna vipparla</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040401</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>10-09-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>10-09-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>235</prism:startingPage>
    <prism:doi>10.56578/ataiml040401</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_4/ataiml040401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040305">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 3, Pages undefined: Application of Artificial Intelligence on MNIST Dataset for Handwritten Digit Classification for Evaluation of Deep Learning Models</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040305</link>
    <description>Handwritten digit classification represents a foundational task in computer vision and has been widely adopted in applications ranging from Optical Character Recognition (OCR) to biometric authentication. Despite the availability of large benchmark datasets, the development of models that achieve both high accuracy and computational efficiency remains a central challenge. In this study, the performance of three representative machine learning paradigms—Chi-Squared Automatic Interaction Detection (CHAID), Generative Adversarial Networks (GANs), and Feedforward Deep Neural Networks (FFDNNs)—was systematically evaluated on the Modified National Institute of Standards and Technology (MNIST) dataset. The assessment was conducted with a focus on classification accuracy, computational efficiency, and interpretability. Experimental results demonstrated that deep learning approaches substantially outperformed traditional Decision Tree (DT) methods. GANs and FFDNNs achieved classification accuracies of approximately 97%, indicating strong robustness and generalization capability for handwritten digit recognition tasks. In contrast, CHAID achieved only 29.61% accuracy, highlighting the limited suitability of DT models for high-dimensional image data. It was further observed that, despite the computational demand of adversarial training, GANs required less time per epoch than FFDNNs when executed on modern GPU architectures, thereby underscoring their potential scalability. These findings reinforce the importance of model selection in practical deployment, particularly where accuracy, computational efficiency, and interpretability must be jointly considered. The study contributes to the ongoing discourse on the role of artificial intelligence (AI) in pattern recognition by providing a comparative analysis of classical machine learning and deep learning approaches, thereby offering guidance for the development of reliable and efficient digit recognition systems suitable for real-world applications.</description>
    <pubDate>09-18-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Handwritten digit classification represents a foundational task in computer vision and has been widely adopted in applications ranging from Optical Character Recognition (OCR) to biometric authentication. Despite the availability of large benchmark datasets, the development of models that achieve both high accuracy and computational efficiency remains a central challenge. In this study, the performance of three representative machine learning paradigms—Chi-Squared Automatic Interaction Detection (CHAID), Generative Adversarial Networks (GANs), and Feedforward Deep Neural Networks (FFDNNs)—was systematically evaluated on the Modified National Institute of Standards and Technology (MNIST) dataset. The assessment was conducted with a focus on classification accuracy, computational efficiency, and interpretability. Experimental results demonstrated that deep learning approaches substantially outperformed traditional Decision Tree (DT) methods. GANs and FFDNNs achieved classification accuracies of approximately 97%, indicating strong robustness and generalization capability for handwritten digit recognition tasks. In contrast, CHAID achieved only 29.61% accuracy, highlighting the limited suitability of DT models for high-dimensional image data. It was further observed that, despite the computational demand of adversarial training, GANs required less time per epoch than FFDNNs when executed on modern GPU architectures, thereby underscoring their potential scalability. These findings reinforce the importance of model selection in practical deployment, particularly where accuracy, computational efficiency, and interpretability must be jointly considered. The study contributes to the ongoing discourse on the role of artificial intelligence (AI) in pattern recognition by providing a comparative analysis of classical machine learning and deep learning approaches, thereby offering guidance for the development of reliable and efficient digit recognition systems suitable for real-world applications.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Application of Artificial Intelligence on MNIST Dataset for Handwritten Digit Classification for Evaluation of Deep Learning Models</dc:title>
    <dc:creator>jide ebenezer taiwo akinsola</dc:creator>
    <dc:creator>micheal adeolu olatunbosun</dc:creator>
    <dc:creator>ifeoluwa michael olaniyi</dc:creator>
    <dc:creator>moruf adedeji adeagbo</dc:creator>
    <dc:creator>emmanuel ajayi olajubu</dc:creator>
    <dc:creator>ganiyu adesola aderounmu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040305</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-18-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-18-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>219</prism:startingPage>
    <prism:doi>10.56578/ataiml040305</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040304">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 3, Pages undefined: Artificial Intelligence in Electroencephalography: A Comprehensive Survey of Methods, Challenges, and Applications</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040304</link>
    <description>Electroencephalography (EEG) provides a non-invasive approach for capturing brain dynamics and has become a cornerstone in clinical diagnostics, cognitive neuroscience, and neuroengineering. The inherent complexity, low signal-to-noise ratio, and variability of EEG signals have historically posed substantial challenges for interpretation. In recent years, artificial intelligence (AI), encompassing both classical machine learning (ML) and advanced deep learning (DL) methodologies, has transformed EEG analysis by enabling automatic feature extraction, robust classification, regression-based state estimation, and synthetic data generation. This survey synthesizes developments up to 2025, structured along three dimensions. The first dimension is task category, e.g., classification, regression, generation and augmentation, clustering and anomaly detection. The second dimension is the methodological framework, e.g., shallow learners, Convolutional Neural Networks (CNNs), Recurrent Neural Networks (RNNs), Transformers, Graph Neural Networks (GNNs), and hybrid approaches. The third dimension is application domain, e.g., neurological disease diagnosis, brain-computer interfaces (BCIs), affective computing, cognitive workload monitoring, and specialized tasks such as sleep staging and artifact removal. Publicly available EEG datasets and benchmarking initiatives that have catalyzed progress were reviewed in this study. The strengths and limitations of current AI models were critically evaluated, including constraints related to data scarcity, inter-subject variability, noise sensitivity, limited interpretability, and challenges of real-world deployment. Future research directions were highlighted, including federated learning (FL) and privacy-preserving learning, self-supervised pretraining of Transformer-based architectures, explainable artificial intelligence (XAI) tailored to neurophysiological signals, multimodal fusion with complementary biosignals, and the integration of lightweight on-device AI for continuous monitoring. By bridging historical foundations with cutting-edge innovations, this survey aims to provide a comprehensive reference for advancing the development of accurate, robust, and transparent AI-driven EEG systems.</description>
    <pubDate>09-15-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Electroencephalography (EEG) provides a non-invasive approach for capturing brain dynamics and has become a cornerstone in clinical diagnostics, cognitive neuroscience, and neuroengineering. The inherent complexity, low signal-to-noise ratio, and variability of EEG signals have historically posed substantial challenges for interpretation. In recent years, artificial intelligence (AI), encompassing both classical machine learning (ML) and advanced deep learning (DL) methodologies, has transformed EEG analysis by enabling automatic feature extraction, robust classification, regression-based state estimation, and synthetic data generation. This survey synthesizes developments up to 2025, structured along three dimensions. The first dimension is task category, e.g., classification, regression, generation and augmentation, clustering and anomaly detection. The second dimension is the methodological framework, e.g., shallow learners, Convolutional Neural Networks (CNNs), Recurrent Neural Networks (RNNs), Transformers, Graph Neural Networks (GNNs), and hybrid approaches. The third dimension is application domain, e.g., neurological disease diagnosis, brain-computer interfaces (BCIs), affective computing, cognitive workload monitoring, and specialized tasks such as sleep staging and artifact removal. Publicly available EEG datasets and benchmarking initiatives that have catalyzed progress were reviewed in this study. The strengths and limitations of current AI models were critically evaluated, including constraints related to data scarcity, inter-subject variability, noise sensitivity, limited interpretability, and challenges of real-world deployment. Future research directions were highlighted, including federated learning (FL) and privacy-preserving learning, self-supervised pretraining of Transformer-based architectures, explainable artificial intelligence (XAI) tailored to neurophysiological signals, multimodal fusion with complementary biosignals, and the integration of lightweight on-device AI for continuous monitoring. By bridging historical foundations with cutting-edge innovations, this survey aims to provide a comprehensive reference for advancing the development of accurate, robust, and transparent AI-driven EEG systems. ]]&gt;</content:encoded>
    <dc:title>Artificial Intelligence in Electroencephalography: A Comprehensive Survey of Methods, Challenges, and Applications</dc:title>
    <dc:creator>abdulvahap mutlu</dc:creator>
    <dc:creator>şengül doğan</dc:creator>
    <dc:creator>türker tuncer</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040304</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-15-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-15-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>186</prism:startingPage>
    <prism:doi>10.56578/ataiml040304</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040303">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 3, Pages undefined: Interpretable Deep Learning Framework for Early Classification of Tomato and Grapevine Leaf Diseases</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040303</link>
    <description>The integration of artificial intelligence (AI) in precision agriculture has facilitated significant advancements in crop health monitoring, particularly in the early identification and classification of foliar diseases. Accurate and timely diagnosis of plant diseases is critical for minimizing crop loss and enhancing agricultural sustainability. In this study, an interpretable deep learning model—referred to as the Multi-Crop Leaf Disease (MCLD) framework—was developed based on a Convolutional Neural Network (CNN) architecture, tailored for the classification of tomato and grapevine leaf diseases. The model architecture was derived from the Visual Geometry Group Network (VGGNet), optimized to improve computational efficiency while maintaining classification accuracy. Leaf image datasets comprising healthy and diseased samples were employed to train and evaluate the model. Performance was assessed using multiple statistical metrics, including classification accuracy, sensitivity, specificity, precision, recall, and F1-score. The proposed MCLD framework achieved a detection accuracy of 98.40% for grapevine leaf diseases and a classification accuracy of 95.71% for tomato leaf conditions. Despite these promising results, further research is required to address limitations such as generalizability across variable environmental conditions and the integration of field-acquired images. The implementation of such interpretable AI-based systems is expected to substantially enhance precision agriculture by supporting rapid and accurate disease management strategies.</description>
    <pubDate>08-17-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The integration of artificial intelligence (AI) in precision agriculture has facilitated significant advancements in crop health monitoring, particularly in the early identification and classification of foliar diseases. Accurate and timely diagnosis of plant diseases is critical for minimizing crop loss and enhancing agricultural sustainability. In this study, an interpretable deep learning model—referred to as the Multi-Crop Leaf Disease (MCLD) framework—was developed based on a Convolutional Neural Network (CNN) architecture, tailored for the classification of tomato and grapevine leaf diseases. The model architecture was derived from the Visual Geometry Group Network (VGGNet), optimized to improve computational efficiency while maintaining classification accuracy. Leaf image datasets comprising healthy and diseased samples were employed to train and evaluate the model. Performance was assessed using multiple statistical metrics, including classification accuracy, sensitivity, specificity, precision, recall, and F1-score. The proposed MCLD framework achieved a detection accuracy of 98.40% for grapevine leaf diseases and a classification accuracy of 95.71% for tomato leaf conditions. Despite these promising results, further research is required to address limitations such as generalizability across variable environmental conditions and the integration of field-acquired images. The implementation of such interpretable AI-based systems is expected to substantially enhance precision agriculture by supporting rapid and accurate disease management strategies.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Interpretable Deep Learning Framework for Early Classification of Tomato and Grapevine Leaf Diseases</dc:title>
    <dc:creator>geethika ramaiah edara</dc:creator>
    <dc:creator>aluru ranganadha reddy</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040303</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>08-17-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>08-17-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>174</prism:startingPage>
    <prism:doi>10.56578/ataiml040303</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040302">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 3, Pages undefined: Comparative Analysis of Machine Learning Models for Predicting Indonesia’s GDP Growth</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040302</link>
    <description>Accurate forecasting of Gross Domestic Product (GDP) growth remains essential for supporting strategic economic policy development, particularly in emerging economies such as Indonesia. In this study, a hybrid predictive framework was constructed by integrating fuzzy logic representations with machine learning algorithms to improve the accuracy and interpretability of GDP growth estimation. Annual macroeconomic data from 1970 to 2023 were utilised, and 19 input features were engineered by combining numerical economic indicators with fuzzy-based linguistic variables, along with a forecast label generated via the Non-Stationary Fuzzy Time Series (NSFTS) method. Six supervised learning models were comparatively assessed, including Random Forest (RF), Support Vector Regression (SVR), eXtreme Gradient Boosting (XGBoost), Huber Regressor, Decision Tree (DT), and Multilayer Perceptron (MLP). Model performance was evaluated using Mean Absolute Error (MAE) and accuracy metrics. Among the tested models, the RF algorithm demonstrated superior performance, achieving the lowest MAE and an accuracy of 99.45% in forecasting GDP growth for 2023. Its robustness in capturing non-linear patterns and short-term economic fluctuations was particularly evident when compared to other models. These findings underscore the RF model's capability to serve as a reliable tool for economic forecasting in data-limited and volatile macroeconomic environments. By enabling more precise GDP growth predictions, the proposed hybrid framework offers a valuable decision-support mechanism for policymakers in Indonesia, contributing to more informed resource allocation, proactive economic intervention, and long-term development planning. The methodological innovation of integrating NSFTS with machine learning extends the frontier of data-driven macroeconomic modelling and provides a replicable template for forecasting applications in other emerging markets.</description>
    <pubDate>07-03-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Accurate forecasting of Gross Domestic Product (GDP) growth remains essential for supporting strategic economic policy development, particularly in emerging economies such as Indonesia. In this study, a hybrid predictive framework was constructed by integrating fuzzy logic representations with machine learning algorithms to improve the accuracy and interpretability of GDP growth estimation. Annual macroeconomic data from 1970 to 2023 were utilised, and 19 input features were engineered by combining numerical economic indicators with fuzzy-based linguistic variables, along with a forecast label generated via the Non-Stationary Fuzzy Time Series (NSFTS) method. Six supervised learning models were comparatively assessed, including Random Forest (RF), Support Vector Regression (SVR), eXtreme Gradient Boosting (XGBoost), Huber Regressor, Decision Tree (DT), and Multilayer Perceptron (MLP). Model performance was evaluated using Mean Absolute Error (MAE) and accuracy metrics. Among the tested models, the RF algorithm demonstrated superior performance, achieving the lowest MAE and an accuracy of 99.45% in forecasting GDP growth for 2023. Its robustness in capturing non-linear patterns and short-term economic fluctuations was particularly evident when compared to other models. These findings underscore the RF model's capability to serve as a reliable tool for economic forecasting in data-limited and volatile macroeconomic environments. By enabling more precise GDP growth predictions, the proposed hybrid framework offers a valuable decision-support mechanism for policymakers in Indonesia, contributing to more informed resource allocation, proactive economic intervention, and long-term development planning. The methodological innovation of integrating NSFTS with machine learning extends the frontier of data-driven macroeconomic modelling and provides a replicable template for forecasting applications in other emerging markets.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Comparative Analysis of Machine Learning Models for Predicting Indonesia’s GDP Growth</dc:title>
    <dc:creator>rossi passarella</dc:creator>
    <dc:creator>muhammad ikhsan setiawan</dc:creator>
    <dc:creator>zaqqi yamani</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040302</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>07-03-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>07-03-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>157</prism:startingPage>
    <prism:doi>10.56578/ataiml040302</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040301">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 3, Pages undefined: Development of a Machine Learning-Driven Web Platform for Automated Identification of Rice Insect Pests</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040301</link>
    <description>An advanced machine learning (ML)-driven web platform was developed and deployed to automate the identification of rice insect pests, addressing limitations associated with traditional pest detection methods and conventional ML algorithms. Historically, pest identification in rice cultivation has relied on expert evaluation of pest species and their associated crop damage, a process that is labor-intensive, time-consuming, and prone to inaccuracies, particularly in the misclassification of pest species. In this study, a subset of the publicly available IP102 benchmark dataset, consisting of 7,736 images across 12 rice pest categories, was curated for model training and evaluation. Two classification models—a Support Vector Machine (SVM) and a deep Convolutional Neural Network (CNN) based on the Inception_ResNetV2 architecture—were implemented and assessed using standard performance metrics. Experimental results demonstrated that the Inception_ResNetV2 model significantly outperformed SVM, achieving an accuracy of 99.97%, a precision of 99.46%, a recall of 99.81%, and an F1-score of 99.53%. Owing to its superior performance, the Inception_ResNetV2 model was integrated into a web-based application designed for real-time pest identification. The deployed system exhibited an average response time of 5.70 seconds, representing a notable improvement in operational efficiency and usability over previous implementations. The results underscore the potential of artificial intelligence in transforming agricultural practices by enabling accurate, scalable, and timely pest diagnostics, thereby enhancing pest management strategies, mitigating crop losses, and supporting global food security initiatives.</description>
    <pubDate>05-22-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;An advanced machine learning (ML)-driven web platform was developed and deployed to automate the identification of rice insect pests, addressing limitations associated with traditional pest detection methods and conventional ML algorithms. Historically, pest identification in rice cultivation has relied on expert evaluation of pest species and their associated crop damage, a process that is labor-intensive, time-consuming, and prone to inaccuracies, particularly in the misclassification of pest species. In this study, a subset of the publicly available IP102 benchmark dataset, consisting of 7,736 images across 12 rice pest categories, was curated for model training and evaluation. Two classification models—a Support Vector Machine (SVM) and a deep Convolutional Neural Network (CNN) based on the Inception_ResNetV2 architecture—were implemented and assessed using standard performance metrics. Experimental results demonstrated that the Inception_ResNetV2 model significantly outperformed SVM, achieving an accuracy of 99.97%, a precision of 99.46%, a recall of 99.81%, and an F1-score of 99.53%. Owing to its superior performance, the Inception_ResNetV2 model was integrated into a web-based application designed for real-time pest identification. The deployed system exhibited an average response time of 5.70 seconds, representing a notable improvement in operational efficiency and usability over previous implementations. The results underscore the potential of artificial intelligence in transforming agricultural practices by enabling accurate, scalable, and timely pest diagnostics, thereby enhancing pest management strategies, mitigating crop losses, and supporting global food security initiatives.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Development of a Machine Learning-Driven Web Platform for Automated Identification of Rice Insect Pests</dc:title>
    <dc:creator>samuel n. john</dc:creator>
    <dc:creator>nasiru a. musa</dc:creator>
    <dc:creator>joshua s. mommoh</dc:creator>
    <dc:creator>etinosa noma-osaghe</dc:creator>
    <dc:creator>ukeme i. udioko</dc:creator>
    <dc:creator>james l. obetta</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040301</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>05-22-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>05-22-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>137</prism:startingPage>
    <prism:doi>10.56578/ataiml040301</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_3/ataiml040301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040205">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 2, Pages undefined: A Stable Region-Based Image Segmentation Model Integrating Fuzzy Logic and Geometric Principles</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040205</link>
    <description>Image segmentation remains a foundational task in computer vision, remote sensing, medical imaging, and object detection, serving as a critical step in delineating object boundaries and extracting meaningful regions from complex visual data. However, conventional segmentation methods often exhibit limited robustness in the presence of noise, intensity inhomogeneity, and intricate region geometries. To address these challenges, a novel segmentation framework was developed, integrating fuzzy logic with geometric principles. Uncertainty and overlapping intensity distributions within regions were modeled through fuzzy membership functions, allowing for more flexible and resilient region characterization. Simultaneously, geometric principles—specifically image gradients and curvature—were incorporated to guide boundary evolution, thereby improving delineation precision. A fuzzy energy functional was constructed to jointly optimize region homogeneity, edge preservation, and boundary smoothness. This functional was minimized through an iterative level-set evolution process, allowing dynamic adaptation to varying image characteristics while maintaining computational efficiency. The proposed model demonstrated robust performance across diverse image modalities, including those with high noise levels and complex regional structures, outperforming traditional methods in terms of segmentation accuracy and stability. Its applicability to tasks demanding high-precision region-based analysis highlights its potential for widespread deployment in advanced imaging applications.</description>
    <pubDate>05-22-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Image segmentation remains a foundational task in computer vision, remote sensing, medical imaging, and object detection, serving as a critical step in delineating object boundaries and extracting meaningful regions from complex visual data. However, conventional segmentation methods often exhibit limited robustness in the presence of noise, intensity inhomogeneity, and intricate region geometries. To address these challenges, a novel segmentation framework was developed, integrating fuzzy logic with geometric principles. Uncertainty and overlapping intensity distributions within regions were modeled through fuzzy membership functions, allowing for more flexible and resilient region characterization. Simultaneously, geometric principles—specifically image gradients and curvature—were incorporated to guide boundary evolution, thereby improving delineation precision. A fuzzy energy functional was constructed to jointly optimize region homogeneity, edge preservation, and boundary smoothness. This functional was minimized through an iterative level-set evolution process, allowing dynamic adaptation to varying image characteristics while maintaining computational efficiency. The proposed model demonstrated robust performance across diverse image modalities, including those with high noise levels and complex regional structures, outperforming traditional methods in terms of segmentation accuracy and stability. Its applicability to tasks demanding high-precision region-based analysis highlights its potential for widespread deployment in advanced imaging applications.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Stable Region-Based Image Segmentation Model Integrating Fuzzy Logic and Geometric Principles</dc:title>
    <dc:creator>ibrar hussain</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040205</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>05-22-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>05-22-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>124</prism:startingPage>
    <prism:doi>10.56578/ataiml040205</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040204">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 2, Pages undefined: Customer Churn Prediction in the Banking Sector Using Sentence Transformers and a Stacking Ensemble Framework</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040204</link>
    <description>As market saturation and competitive pressure intensify within the banking sector, the mitigation of customer churn has emerged as a critical concern. Given that the cost of acquiring new clients substantially exceeds that of retaining existing ones, the development of highly accurate churn prediction models has become imperative. In this study, a hybrid customer churn prediction model was developed by integrating Sentence Transformers with a stacking ensemble learning architecture. Customer behavioral data containing textual content was transformed into dense vector representations through the use of Sentence Transformers, thereby capturing contextual and semantic nuances. These embeddings were combined with normalized structured features. To enhance predictive performance, a stacking ensemble method was employed to integrate the outputs of multiple base models, including random forest, Gradient Boosting Tree (GBT), and Support Vector Machine (SVM). Experimental evaluation was conducted on real-world banking data, and the proposed model demonstrated superior performance relative to conventional baseline approaches, achieving notable improvements in both accuracy and the area under the curve (AUC). Furthermore, the analysis of model outputs revealed several salient predictors of customer attrition, such as anomalous transaction behavior, prolonged inactivity, and indicators of dissatisfaction with customer service. These insights are expected to inform the development of targeted intervention strategies aimed at strengthening customer retention, improving satisfaction, and fostering long-term institutional growth and stability.</description>
    <pubDate>04-23-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;As market saturation and competitive pressure intensify within the banking sector, the mitigation of customer churn has emerged as a critical concern. Given that the cost of acquiring new clients substantially exceeds that of retaining existing ones, the development of highly accurate churn prediction models has become imperative. In this study, a hybrid customer churn prediction model was developed by integrating Sentence Transformers with a stacking ensemble learning architecture. Customer behavioral data containing textual content was transformed into dense vector representations through the use of Sentence Transformers, thereby capturing contextual and semantic nuances. These embeddings were combined with normalized structured features. To enhance predictive performance, a stacking ensemble method was employed to integrate the outputs of multiple base models, including random forest, Gradient Boosting Tree (GBT), and Support Vector Machine (SVM). Experimental evaluation was conducted on real-world banking data, and the proposed model demonstrated superior performance relative to conventional baseline approaches, achieving notable improvements in both accuracy and the area under the curve (AUC). Furthermore, the analysis of model outputs revealed several salient predictors of customer attrition, such as anomalous transaction behavior, prolonged inactivity, and indicators of dissatisfaction with customer service. These insights are expected to inform the development of targeted intervention strategies aimed at strengthening customer retention, improving satisfaction, and fostering long-term institutional growth and stability.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Customer Churn Prediction in the Banking Sector Using Sentence Transformers and a Stacking Ensemble Framework</dc:title>
    <dc:creator>jing gao</dc:creator>
    <dc:creator>huiyi wang</dc:creator>
    <dc:creator>yuanlin lu</dc:creator>
    <dc:creator>lina yu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040204</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>04-23-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>04-23-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>109</prism:startingPage>
    <prism:doi>10.56578/ataiml040204</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040203">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 2, Pages undefined: Enhancing Non-Invasive Diagnosis of Endometriosis Through Explainable Artificial Intelligence: A Grad-CAM Approach</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040203</link>
    <description>Significant advancements in artificial intelligence (AI) have transformed clinical decision-making, particularly in disease detection and management. Endometriosis, a chronic and often debilitating gynecological disorder, affects a substantial proportion of reproductive-age women and is associated with pelvic pain, infertility, and a reduced quality of life. Despite its high prevalence, non-invasive and accurate diagnostic methods remain limited, frequently resulting in delayed or missed diagnoses. In this study, a novel diagnostic framework was developed by integrating deep learning (DL) with explainable artificial intelligence (XAI) to address existing limitations in the early and non-invasive detection of endometriosis. Abdominopelvic magnetic resonance imaging (MRI) data were obtained from the Crestview Radiology Center in Victoria Island, Lagos State. Preprocessing procedures, including Digital Imaging and Communications in Medicine (DICOM)-to-PNG conversion, image resizing, and intensity normalization, were applied to standardize the imaging data. A U-Net architecture enhanced with a dual attention mechanism was employed for lesion segmentation, while Gradient-weighted Class Activation Mapping (Grad-CAM) was incorporated to visualize and interpret the model’s decision-making process. Ethical considerations, including informed patient consent, fairness in algorithmic decision-making, and mitigation of data bias, were rigorously addressed throughout the model development pipeline. The proposed system demonstrated the potential to improve diagnostic accuracy, reduce diagnostic latency, and enhance clinician trust by offering transparent and interpretable predictions. Furthermore, the integration of XAI is anticipated to promote greater clinical adoption and reliability of AI-assisted diagnostic systems in gynecology. This work contributes to the advancement of non-invasive diagnostic tools and reinforces the role of interpretable DL in the broader context of precision medicine and women's health.</description>
    <pubDate>04-23-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Significant advancements in artificial intelligence (AI) have transformed clinical decision-making, particularly in disease detection and management. Endometriosis, a chronic and often debilitating gynecological disorder, affects a substantial proportion of reproductive-age women and is associated with pelvic pain, infertility, and a reduced quality of life. Despite its high prevalence, non-invasive and accurate diagnostic methods remain limited, frequently resulting in delayed or missed diagnoses. In this study, a novel diagnostic framework was developed by integrating deep learning (DL) with explainable artificial intelligence (XAI) to address existing limitations in the early and non-invasive detection of endometriosis. Abdominopelvic magnetic resonance imaging (MRI) data were obtained from the Crestview Radiology Center in Victoria Island, Lagos State. Preprocessing procedures, including Digital Imaging and Communications in Medicine (DICOM)-to-PNG conversion, image resizing, and intensity normalization, were applied to standardize the imaging data. A U-Net architecture enhanced with a dual attention mechanism was employed for lesion segmentation, while Gradient-weighted Class Activation Mapping (Grad-CAM) was incorporated to visualize and interpret the model’s decision-making process. Ethical considerations, including informed patient consent, fairness in algorithmic decision-making, and mitigation of data bias, were rigorously addressed throughout the model development pipeline. The proposed system demonstrated the potential to improve diagnostic accuracy, reduce diagnostic latency, and enhance clinician trust by offering transparent and interpretable predictions. Furthermore, the integration of XAI is anticipated to promote greater clinical adoption and reliability of AI-assisted diagnostic systems in gynecology. This work contributes to the advancement of non-invasive diagnostic tools and reinforces the role of interpretable DL in the broader context of precision medicine and women's health.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing Non-Invasive Diagnosis of Endometriosis Through Explainable Artificial Intelligence: A Grad-CAM Approach</dc:title>
    <dc:creator>afolashade oluwakemi kuyoro</dc:creator>
    <dc:creator>oluwayemisi boye fatade</dc:creator>
    <dc:creator>ernest enyinnaya onuiri</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040203</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>04-23-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>04-23-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>97</prism:startingPage>
    <prism:doi>10.56578/ataiml040203</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040202">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 2, Pages undefined: Benchmarking Text Embedding Models for Multi-Dataset Semantic Textual Similarity: A Machine Learning-Based Evaluation Framework</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040202</link>
    <description>The selection of optimal text embedding models remains a critical challenge in semantic textual similarity (STS) tasks, particularly when performance varies substantially across datasets. In this study, the comparative effectiveness of multiple state-of-the-art embedding models was systematically evaluated using a benchmarking framework based on established machine learning techniques. A range of embedding architectures was examined across diverse STS datasets, with similarity computations performed using Euclidean distance, cosine similarity, and Manhattan distance metrics. Performance evaluation was conducted through Pearson and Spearman correlation coefficients to ensure robust and interpretable assessments. The results revealed that GIST-Embedding-v0 consistently achieved the highest average correlation scores across all datasets, indicating strong generalizability. Nevertheless, MUG-B-1.6 demonstrated superior performance on datasets 2, 6, and 7, while UAE-Large-V1 outperformed other models on datasets 3 and 5, thereby underscoring the influence of dataset-specific characteristics on embedding model efficacy. These findings highlight the importance of adopting a dataset-aware approach in embedding model selection for STS tasks, rather than relying on a single universal model. Moreover, the observed performance divergence suggests that embedding architectures may encode semantic relationships differently depending on domain-specific linguistic features. By providing a detailed evaluation of model behavior across varied datasets, this study offers a methodological foundation for embedding selection in downstream NLP applications. The implications of this research extend to the development of more reliable, scalable, and context-sensitive STS systems, where model performance can be optimized based on empirical evidence rather than heuristics. These insights are expected to inform future investigations on embedding adaptation, hybrid model integration, and meta-learning strategies for semantic similarity tasks.</description>
    <pubDate>04-17-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The selection of optimal text embedding models remains a critical challenge in semantic textual similarity (STS) tasks, particularly when performance varies substantially across datasets. In this study, the comparative effectiveness of multiple state-of-the-art embedding models was systematically evaluated using a benchmarking framework based on established machine learning techniques. A range of embedding architectures was examined across diverse STS datasets, with similarity computations performed using Euclidean distance, cosine similarity, and Manhattan distance metrics. Performance evaluation was conducted through Pearson and Spearman correlation coefficients to ensure robust and interpretable assessments. The results revealed that GIST-Embedding-v0 consistently achieved the highest average correlation scores across all datasets, indicating strong generalizability. Nevertheless, MUG-B-1.6 demonstrated superior performance on datasets 2, 6, and 7, while UAE-Large-V1 outperformed other models on datasets 3 and 5, thereby underscoring the influence of dataset-specific characteristics on embedding model efficacy. These findings highlight the importance of adopting a dataset-aware approach in embedding model selection for STS tasks, rather than relying on a single universal model. Moreover, the observed performance divergence suggests that embedding architectures may encode semantic relationships differently depending on domain-specific linguistic features. By providing a detailed evaluation of model behavior across varied datasets, this study offers a methodological foundation for embedding selection in downstream NLP applications. The implications of this research extend to the development of more reliable, scalable, and context-sensitive STS systems, where model performance can be optimized based on empirical evidence rather than heuristics. These insights are expected to inform future investigations on embedding adaptation, hybrid model integration, and meta-learning strategies for semantic similarity tasks.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Benchmarking Text Embedding Models for Multi-Dataset Semantic Textual Similarity: A Machine Learning-Based Evaluation Framework</dc:title>
    <dc:creator>sutriawan</dc:creator>
    <dc:creator>wasis haryo sasoko</dc:creator>
    <dc:creator>zumhur alamin</dc:creator>
    <dc:creator>ritzkal</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040202</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>04-17-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>04-17-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>82</prism:startingPage>
    <prism:doi>10.56578/ataiml040202</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040201">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 2, Pages undefined: A Hybrid Graph-Attention and Contextual Sentiment Embedding Model for Sentiment Analysis in Legal Documents</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040201</link>
    <description>Sentiment analysis in legal documents presents significant challenges due to the intricate structure, domain-specific terminology, and strong contextual dependencies inherent in legal texts. In this study, a novel hybrid framework is proposed, integrating Graph Attention Networks (GATs) with domain-specific embeddings, i.e., Legal Bidirectional Encoder Representations from Transformers (LegalBERT) and an aspect-oriented sentiment classification approach to improve both predictive accuracy and interpretability. Unlike conventional deep learning models, the proposed method explicitly captures hierarchical relationships within legal texts through GATs while leveraging LegalBERT to enhance domain-specific semantic representation. Additionally, auxiliary features, including positional information and topic relevance, were incorporated to refine sentiment predictions. A comprehensive evaluation conducted on diverse legal datasets demonstrates that the proposed model achieves state-of-the-art performance, attaining an accuracy of 93.1% and surpassing existing benchmarks by a significant margin. Model interpretability was further enhanced through SHapley Additive exPlanations (SHAP) and Legal Context Attribution Score (LCAS) techniques, which provide transparency into decision-making processes. An ablation study confirms the critical contribution of each model component, while scalability experiments validate the model’s efficiency across datasets ranging from 10,000 to 200,000 sentences. Despite increased computational demands, strong robustness and scalability are exhibited, making this framework suitable for large-scale legal applications. Future research will focus on multilingual adaptation, computational optimization, and broader applications within the field of legal analytics.</description>
    <pubDate>03-27-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Sentiment analysis in legal documents presents significant challenges due to the intricate structure, domain-specific terminology, and strong contextual dependencies inherent in legal texts. In this study, a novel hybrid framework is proposed, integrating Graph Attention Networks (GATs) with domain-specific embeddings, i.e., Legal Bidirectional Encoder Representations from Transformers (LegalBERT) and an aspect-oriented sentiment classification approach to improve both predictive accuracy and interpretability. Unlike conventional deep learning models, the proposed method explicitly captures hierarchical relationships within legal texts through GATs while leveraging LegalBERT to enhance domain-specific semantic representation. Additionally, auxiliary features, including positional information and topic relevance, were incorporated to refine sentiment predictions. A comprehensive evaluation conducted on diverse legal datasets demonstrates that the proposed model achieves state-of-the-art performance, attaining an accuracy of 93.1% and surpassing existing benchmarks by a significant margin. Model interpretability was further enhanced through SHapley Additive exPlanations (SHAP) and Legal Context Attribution Score (LCAS) techniques, which provide transparency into decision-making processes. An ablation study confirms the critical contribution of each model component, while scalability experiments validate the model’s efficiency across datasets ranging from 10,000 to 200,000 sentences. Despite increased computational demands, strong robustness and scalability are exhibited, making this framework suitable for large-scale legal applications. Future research will focus on multilingual adaptation, computational optimization, and broader applications within the field of legal analytics.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Hybrid Graph-Attention and Contextual Sentiment Embedding Model for Sentiment Analysis in Legal Documents</dc:title>
    <dc:creator>sulaxan jadhav</dc:creator>
    <dc:creator>ashvini pradeep shende</dc:creator>
    <dc:creator>samruddhi sapkal</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040201</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>62</prism:startingPage>
    <prism:doi>10.56578/ataiml040201</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_2/ataiml040201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040105">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 1, Pages undefined: Advanced Image Restoration Through CIPFS-Integrated Mathematical Transformations</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040105</link>
    <description>The restoration of blurred images remains a critical challenge in computational image processing, necessitating advanced methodologies capable of reconstructing fine details while mitigating structural degradation. In this study, an innovative image restoration framework was introduced, employing Complex Interval Pythagorean Fuzzy Sets (CIPFSs) integrated with mathematically structured transformations to achieve enhanced deblurring performance. The proposed methodology initiates with the geometric correction of pixel-level distortions induced by blurring. A key innovation lies in the incorporation of CIPFS-based entropy, which is synergistically combined with local statistical energy to enable robust blur estimation and adaptive correction. Unlike traditional fuzzy logic-based approaches, CIPFS facilitates a more expressive modeling of uncertainty by leveraging complex interval-valued membership functions, thereby enabling nuanced differentiation of blur intensity across image regions. A fuzzy inference mechanism was utilized to guide the refinement process, ensuring that localized corrections are adaptively applied to degraded regions while leaving undistorted areas unaffected. To preserve edge integrity, a geometric step function was applied to reinforce structural boundaries and suppress over-smoothing artifacts. In the final restoration phase, structural consistency is enforced through normalization and regularization techniques to ensure coherence with the original image context. Experimental validations demonstrate that the proposed model delivers superior image clarity, improved edge sharpness, and reduced visual artifacts compared to state-of-the-art deblurring methods. Enhanced robustness against varying blur patterns and noise intensities was also confirmed, indicating strong generalization potential. By unifying the expressive power of CIPFS with analytically driven restoration strategies, this approach contributes a significant advancement to the domain of image deblurring and restoration under uncertainty.</description>
    <pubDate>03-27-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The restoration of blurred images remains a critical challenge in computational image processing, necessitating advanced methodologies capable of reconstructing fine details while mitigating structural degradation. In this study, an innovative image restoration framework was introduced, employing Complex Interval Pythagorean Fuzzy Sets (CIPFSs) integrated with mathematically structured transformations to achieve enhanced deblurring performance. The proposed methodology initiates with the geometric correction of pixel-level distortions induced by blurring. A key innovation lies in the incorporation of CIPFS-based entropy, which is synergistically combined with local statistical energy to enable robust blur estimation and adaptive correction. Unlike traditional fuzzy logic-based approaches, CIPFS facilitates a more expressive modeling of uncertainty by leveraging complex interval-valued membership functions, thereby enabling nuanced differentiation of blur intensity across image regions. A fuzzy inference mechanism was utilized to guide the refinement process, ensuring that localized corrections are adaptively applied to degraded regions while leaving undistorted areas unaffected. To preserve edge integrity, a geometric step function was applied to reinforce structural boundaries and suppress over-smoothing artifacts. In the final restoration phase, structural consistency is enforced through normalization and regularization techniques to ensure coherence with the original image context. Experimental validations demonstrate that the proposed model delivers superior image clarity, improved edge sharpness, and reduced visual artifacts compared to state-of-the-art deblurring methods. Enhanced robustness against varying blur patterns and noise intensities was also confirmed, indicating strong generalization potential. By unifying the expressive power of CIPFS with analytically driven restoration strategies, this approach contributes a significant advancement to the domain of image deblurring and restoration under uncertainty.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advanced Image Restoration Through CIPFS-Integrated Mathematical Transformations</dc:title>
    <dc:creator>zakir husain</dc:creator>
    <dc:creator>kai siong yow</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040105</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>50</prism:startingPage>
    <prism:doi>10.56578/ataiml040105</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040104">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 1, Pages undefined: Investigating Stance Marking in Computer-Assisted AI Chatbot Discourse</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040104</link>
    <description>Stance, a critical discourse marker, reflects the expression of attitudes, feelings, evaluations, or judgments by speakers or writers toward a topic or other participants in a conversation. This study investigates the manifestation of stance in the discourse of four prominent artificial intelligence (AI) chatbots—ChatGPT, Gemini, MetaAI, and Bing Copilot—focusing on three dimensions: interpersonal stance (how chatbots perceive one another), epistemic stance (their relationship to the topic of discussion), and style stance (their communicative style). Through a systematic analysis, it is revealed that these chatbots employ various stance markers, including hedging, self-mention, power dominance, alignment, and face-saving strategies. Notably, the use of face-saving framing by AI models, despite their lack of a genuine “face,” highlights the distinction between authentic interactional intent and the reproduction of linguistic conventions. This suggests that stance in AI discourse is not a product of subjective intent but rather an inherent feature of natural language. However, this study extends the discourse by examining stance as a feature of chatbot-to-chatbot communication rather than human-AI interactions, thereby bridging the gap between human linguistic behaviors and AI tendencies. It is concluded that stance is not an extraneous feature of discourse but an integral and unavoidable aspect of language use, which chatbots inevitably replicate. In other words, if chatbots must use language, then pragmatic features like stance are inevitable. Ultimately, this raises a broader question: Is it even possible for a chatbot to produce language devoid of stance? The implications of this research underscore the intrinsic connection between language use and pragmatic features, suggesting that stance is an inescapable component of any linguistic output, including that of AI systems.</description>
    <pubDate>03-10-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Stance, a critical discourse marker, reflects the expression of attitudes, feelings, evaluations, or judgments by speakers or writers toward a topic or other participants in a conversation. This study investigates the manifestation of stance in the discourse of four prominent artificial intelligence (AI) chatbots—ChatGPT, Gemini, MetaAI, and Bing Copilot—focusing on three dimensions: interpersonal stance (how chatbots perceive one another), epistemic stance (their relationship to the topic of discussion), and style stance (their communicative style). Through a systematic analysis, it is revealed that these chatbots employ various stance markers, including hedging, self-mention, power dominance, alignment, and face-saving strategies. Notably, the use of face-saving framing by AI models, despite their lack of a genuine “face,” highlights the distinction between authentic interactional intent and the reproduction of linguistic conventions. This suggests that stance in AI discourse is not a product of subjective intent but rather an inherent feature of natural language. However, this study extends the discourse by examining stance as a feature of chatbot-to-chatbot communication rather than human-AI interactions, thereby bridging the gap between human linguistic behaviors and AI tendencies. It is concluded that stance is not an extraneous feature of discourse but an integral and unavoidable aspect of language use, which chatbots inevitably replicate. In other words, if chatbots must use language, then pragmatic features like stance are inevitable. Ultimately, this raises a broader question: Is it even possible for a chatbot to produce language devoid of stance? The implications of this research underscore the intrinsic connection between language use and pragmatic features, suggesting that stance is an inescapable component of any linguistic output, including that of AI systems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Investigating Stance Marking in Computer-Assisted AI Chatbot Discourse</dc:title>
    <dc:creator>kayode victor amusan</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040104</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-10-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-10-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>40</prism:startingPage>
    <prism:doi>10.56578/ataiml040104</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040103">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 1, Pages undefined: Facial Expression Recognition Through Transfer Learning: Integration of VGG16, ResNet, and AlexNet with a Multiclass Classifier</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040103</link>
    <description>This study investigates the recognition of seven primary human emotions—contempt, anger, disgust, surprise, fear, happiness, and sadness—based on facial expressions. A transfer learning approach was employed, utilizing three pre-trained convolutional neural network (CNN) architectures: AlexNet, VGG16, and ResNet50. The system was structured to perform facial expression recognition (FER) by incorporating three key stages: face detection, feature extraction, and emotion classification using a multiclass classifier. The proposed methodology was designed to enhance pattern recognition accuracy through a carefully structured training pipeline. Furthermore, the performance of the transfer learning models was compared using a multiclass support vector machine (SVM) classifier, and extensive testing was planned on large-scale datasets to further evaluate detection accuracy. This study addresses the challenge of spontaneous FER, a critical research area in human-computer interaction, security, and healthcare. A key contribution of this study is the development of an efficient feature extraction method, which facilitates FER with minimal reliance on extensive datasets. The proposed system demonstrates notable improvements in recognition accuracy compared to traditional approaches, significantly reducing misclassification rates. It is also shown to require less computational time and resources, thereby enhancing its scalability and applicability to real-world scenarios. The approach outperforms conventional techniques, including SVMs with handcrafted features, by leveraging the robust feature extraction capabilities of transfer learning. This framework offers a scalable and reliable solution for FER tasks, with potential applications in healthcare, security, and human-computer interaction. Additionally, the system’s ability to function effectively in the absence of a caregiver provides significant assistance to individuals with disabilities in expressing their emotional needs. This research contributes to the growing body of work on facial emotion recognition and paves the way for future advancements in artificial intelligence-driven emotion detection systems.</description>
    <pubDate>02-20-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study investigates the recognition of seven primary human emotions—contempt, anger, disgust, surprise, fear, happiness, and sadness—based on facial expressions. A transfer learning approach was employed, utilizing three pre-trained convolutional neural network (CNN) architectures: AlexNet, VGG16, and ResNet50. The system was structured to perform facial expression recognition (FER) by incorporating three key stages: face detection, feature extraction, and emotion classification using a multiclass classifier. The proposed methodology was designed to enhance pattern recognition accuracy through a carefully structured training pipeline. Furthermore, the performance of the transfer learning models was compared using a multiclass support vector machine (SVM) classifier, and extensive testing was planned on large-scale datasets to further evaluate detection accuracy. This study addresses the challenge of spontaneous FER, a critical research area in human-computer interaction, security, and healthcare. A key contribution of this study is the development of an efficient feature extraction method, which facilitates FER with minimal reliance on extensive datasets. The proposed system demonstrates notable improvements in recognition accuracy compared to traditional approaches, significantly reducing misclassification rates. It is also shown to require less computational time and resources, thereby enhancing its scalability and applicability to real-world scenarios. The approach outperforms conventional techniques, including SVMs with handcrafted features, by leveraging the robust feature extraction capabilities of transfer learning. This framework offers a scalable and reliable solution for FER tasks, with potential applications in healthcare, security, and human-computer interaction. Additionally, the system’s ability to function effectively in the absence of a caregiver provides significant assistance to individuals with disabilities in expressing their emotional needs. This research contributes to the growing body of work on facial emotion recognition and paves the way for future advancements in artificial intelligence-driven emotion detection systems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Facial Expression Recognition Through Transfer Learning: Integration of VGG16, ResNet, and AlexNet with a Multiclass Classifier</dc:title>
    <dc:creator>balaiah paulchamy</dc:creator>
    <dc:creator>abid yahya</dc:creator>
    <dc:creator>natarajan chinnasamy</dc:creator>
    <dc:creator>kalpana kasilingam</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040103</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>02-20-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>02-20-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>25</prism:startingPage>
    <prism:doi>10.56578/ataiml040103</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040102">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 1, Pages undefined: Impact of Data Preprocessing Techniques on the Performance of Machine Learning Models for Drought Prediction</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040102</link>
    <description>Drought, a complex natural phenomenon with profound global impacts, including the depletion of water resources, reduced agricultural productivity, and ecological disruption, has become a critical challenge in the context of climate change. Effective drought prediction models are essential for mitigating these adverse effects. This study investigates the contribution of various data preprocessing steps—specifically class imbalance handling and dimensionality reduction techniques—to the performance of machine learning models for drought prediction. Synthetic Minority Over-sampling Technique (SMOTE) and near miss sampling methods were employed to address class imbalances within the dataset. Additionally, Principal Component Analysis (PCA) and Linear Discriminant Analysis (LDA) were applied for dimensionality reduction, aiming to improve computational efficiency while retaining essential features. Decision tree algorithms were trained on the preprocessed data to assess the impact of these preprocessing techniques on model accuracy, precision, recall, and F1-score. The results indicate that the SMOTE-based sampling approach significantly enhances the overall performance of the drought prediction model, particularly in terms of accuracy and robustness. Furthermore, the combination of SMOTE, PCA, and LDA demonstrates a substantial improvement in model reliability and generalizability. These findings underscore the critical importance of carefully selecting and applying appropriate data preprocessing techniques to address class imbalances and reduce feature space, thus optimizing the performance of machine learning models in drought prediction. This study highlights the potential of preprocessing strategies in improving the predictive capabilities of models, providing valuable insights for future research in climate-related prediction tasks.</description>
    <pubDate>02-20-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Drought, a complex natural phenomenon with profound global impacts, including the depletion of water resources, reduced agricultural productivity, and ecological disruption, has become a critical challenge in the context of climate change. Effective drought prediction models are essential for mitigating these adverse effects. This study investigates the contribution of various data preprocessing steps—specifically class imbalance handling and dimensionality reduction techniques—to the performance of machine learning models for drought prediction. Synthetic Minority Over-sampling Technique (SMOTE) and near miss sampling methods were employed to address class imbalances within the dataset. Additionally, Principal Component Analysis (PCA) and Linear Discriminant Analysis (LDA) were applied for dimensionality reduction, aiming to improve computational efficiency while retaining essential features. Decision tree algorithms were trained on the preprocessed data to assess the impact of these preprocessing techniques on model accuracy, precision, recall, and F1-score. The results indicate that the SMOTE-based sampling approach significantly enhances the overall performance of the drought prediction model, particularly in terms of accuracy and robustness. Furthermore, the combination of SMOTE, PCA, and LDA demonstrates a substantial improvement in model reliability and generalizability. These findings underscore the critical importance of carefully selecting and applying appropriate data preprocessing techniques to address class imbalances and reduce feature space, thus optimizing the performance of machine learning models in drought prediction. This study highlights the potential of preprocessing strategies in improving the predictive capabilities of models, providing valuable insights for future research in climate-related prediction tasks.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Impact of Data Preprocessing Techniques on the Performance of Machine Learning Models for Drought Prediction</dc:title>
    <dc:creator>serap erçel</dc:creator>
    <dc:creator>sinem akyol</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040102</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>02-20-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>02-20-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>14</prism:startingPage>
    <prism:doi>10.56578/ataiml040102</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040101">
    <title>Acadlore Transactions on AI and Machine Learning, 2025, Volume 4, Issue 1, Pages undefined: Advanced Tanning Detection Through Image Processing and Computer Vision</title>
    <link>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040101</link>
    <description>This study introduces an advanced approach to the automated detection of skin tanning, leveraging image processing and computer vision techniques to accurately assess tanning levels. A method was proposed in which skin tone variations were analyzed by comparing a reference image with a current image of the same subject. This approach establishes a reliable framework for estimating tanning levels through a sequence of image preprocessing, skin segmentation, dominant color extraction, and tanning assessment. The hue-saturation-value (HSV) color space was employed to quantify these variations, with particular emphasis placed on the saturation component, which is identified as a critical factor for tanning detection. This novel focus on the saturation component offers a robust and objective alternative to traditional visual assessment methods. Additionally, the potential integration of machine learning techniques to enhance skin segmentation and improve image analysis accuracy was explored. The proposed framework was positioned within an Internet of Things (IoT) ecosystem for real-time monitoring of sun safety, providing a practical application for both individual and public health contexts. Experimental results demonstrate the efficacy of the proposed method in distinguishing various tanning levels, thereby offering significant advancements in the fields of cosmetic dermatology, public health, and preventive medicine. These findings suggest that the integration of image processing, computer vision, and machine learning can provide a powerful tool for the automated assessment of skin tanning, with broad implications for real-time health monitoring and the prevention of overexposure to ultraviolet (UV) radiation.</description>
    <pubDate>01-20-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study introduces an advanced approach to the automated detection of skin tanning, leveraging image processing and computer vision techniques to accurately assess tanning levels. A method was proposed in which skin tone variations were analyzed by comparing a reference image with a current image of the same subject. This approach establishes a reliable framework for estimating tanning levels through a sequence of image preprocessing, skin segmentation, dominant color extraction, and tanning assessment. The hue-saturation-value (HSV) color space was employed to quantify these variations, with particular emphasis placed on the saturation component, which is identified as a critical factor for tanning detection. This novel focus on the saturation component offers a robust and objective alternative to traditional visual assessment methods. Additionally, the potential integration of machine learning techniques to enhance skin segmentation and improve image analysis accuracy was explored. The proposed framework was positioned within an Internet of Things (IoT) ecosystem for real-time monitoring of sun safety, providing a practical application for both individual and public health contexts. Experimental results demonstrate the efficacy of the proposed method in distinguishing various tanning levels, thereby offering significant advancements in the fields of cosmetic dermatology, public health, and preventive medicine. These findings suggest that the integration of image processing, computer vision, and machine learning can provide a powerful tool for the automated assessment of skin tanning, with broad implications for real-time health monitoring and the prevention of overexposure to ultraviolet (UV) radiation.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advanced Tanning Detection Through Image Processing and Computer Vision</dc:title>
    <dc:creator>sayak mukhopadhyay</dc:creator>
    <dc:creator>janmejay gupta</dc:creator>
    <dc:creator>akshay kumar</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml040101</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-20-2025</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-20-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ataiml040101</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2025_4_1/ataiml040101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030405">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 4, Pages undefined: Application of Low-Rank Tensor Completion Combined with Prior Knowledge in Visual Data</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030405</link>
    <description>In recent years, representing computer vision data in tensor form has become an important method of data representation. However, due to the limitations of signal acquisition devices, the actual data obtained may be damaged, such as image loss, noise interference, or a combination of both. Using Low-Rank Tensor Completion (LRTC) techniques to recover missing or corrupted tensor data has become a hot research topic. In this paper, we adopt a tensor coupled total variation (t-CTV) norm based on t-SVD as the minimization criterion to capture the combined effects of low-rank and local piecewise smooth priors, thus eliminating the need for balance parameters in the process. At the same time, we utilize the Non-Local Means (NLM) denoiser to smooth the image and reduce noise by leveraging the nonlocal self-similarity of the image. Furthermore, an Alternating Direction Method of Multipliers (ADMM) algorithm is designed for the proposed optimization model, NLM-TCTV. Extensive numerical experiments on real tensor data (including color, medical, and satellite remote sensing images) show that the proposed method has good robustness, performs well in noisy images, and surpasses many existing methods in both quality and visual effects.</description>
    <pubDate>12-30-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In recent years, representing computer vision data in tensor form has become an important method of data representation. However, due to the limitations of signal acquisition devices, the actual data obtained may be damaged, such as image loss, noise interference, or a combination of both. Using Low-Rank Tensor Completion (LRTC) techniques to recover missing or corrupted tensor data has become a hot research topic. In this paper, we adopt a tensor coupled total variation (t-CTV) norm based on t-SVD as the minimization criterion to capture the combined effects of low-rank and local piecewise smooth priors, thus eliminating the need for balance parameters in the process. At the same time, we utilize the Non-Local Means (NLM) denoiser to smooth the image and reduce noise by leveraging the nonlocal self-similarity of the image. Furthermore, an Alternating Direction Method of Multipliers (ADMM) algorithm is designed for the proposed optimization model, NLM-TCTV. Extensive numerical experiments on real tensor data (including color, medical, and satellite remote sensing images) show that the proposed method has good robustness, performs well in noisy images, and surpasses many existing methods in both quality and visual effects.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Application of Low-Rank Tensor Completion Combined with Prior Knowledge in Visual Data</dc:title>
    <dc:creator>junzhe zhao</dc:creator>
    <dc:creator>huimin wang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030405</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>237</prism:startingPage>
    <prism:doi>10.56578/ataiml030405</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030404">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 4, Pages undefined: Innovative Hybrid Deep Learning Models for Financial Sentiment Analysis</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030404</link>
    <description>This study explores hybrid deep learning architectures for the classification of financial sentiment, focusing on the integration of the Convolutional Neural Network (CNN) with the Support Vector Machine (SVM) and the Random Forest (RF). CNN, with its powerful feature extraction capabilities, was combined with SVM’s ability to handle non-linear decision boundaries, while RF enhanced model generalization through ensemble learning. The proposed hybrid frameworks addressed two fundamental challenges in sentiment analysis: overfitting and class imbalance. These challenges were mitigated, resulting in improved model accuracy and reliability compared to standalone methods. Empirical evaluations demonstrated that the CNN-SVM model achieved competitive or superior validation accuracy and loss, indicating its suitability for precise financial sentiment classification. By enabling more accurate sentiment categorization, the model provides actionable insights for financial analysts and investors, thereby supporting better market assessment and investment decision-making. Future work is suggested to incorporate advanced techniques such as adversarial training and domain-specific pre-trained models to further enhance model performance.</description>
    <pubDate>12-23-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study explores hybrid deep learning architectures for the classification of financial sentiment, focusing on the integration of the Convolutional Neural Network (CNN) with the Support Vector Machine (SVM) and the Random Forest (RF). CNN, with its powerful feature extraction capabilities, was combined with SVM’s ability to handle non-linear decision boundaries, while RF enhanced model generalization through ensemble learning. The proposed hybrid frameworks addressed two fundamental challenges in sentiment analysis: overfitting and class imbalance. These challenges were mitigated, resulting in improved model accuracy and reliability compared to standalone methods. Empirical evaluations demonstrated that the CNN-SVM model achieved competitive or superior validation accuracy and loss, indicating its suitability for precise financial sentiment classification. By enabling more accurate sentiment categorization, the model provides actionable insights for financial analysts and investors, thereby supporting better market assessment and investment decision-making. Future work is suggested to incorporate advanced techniques such as adversarial training and domain-specific pre-trained models to further enhance model performance.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Innovative Hybrid Deep Learning Models for Financial Sentiment Analysis</dc:title>
    <dc:creator>ridwan b. marqas</dc:creator>
    <dc:creator>abdulazeez mousa</dc:creator>
    <dc:creator>fatih özyurt</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030404</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-23-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-23-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>225</prism:startingPage>
    <prism:doi>10.56578/ataiml030404</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030403">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 4, Pages undefined: A Comprehensive Review of Ant Colony Optimization in Swarm Intelligence for Complex Problem Solving</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030403</link>
    <description>Swarm intelligence (SI) has emerged as a transformative approach in solving complex optimization problems by drawing inspiration from collective behaviors observed in nature, particularly among social animals and insects. Ant Colony Optimization (ACO), a prominent subclass of SI algorithms, models the foraging behavior of ant colonies to address a range of challenging combinatorial problems. Originally introduced in 1992 for the Traveling Salesman Problem (TSP), ACO employs artificial pheromone trails and heuristic information to probabilistically guide solution construction. The artificial ants within ACO algorithms engage in a stochastic search process, iteratively refining solutions through the deposition and evaporation of pheromone levels based on previous search experiences. This review synthesizes the extensive body of research that has since advanced ACO from its initial ant system (AS) model to sophisticated algorithmic variants. These advances have both significantly enhanced ACO's practical performance across various application domains and contributed to a deeper theoretical understanding of its mechanics. The focus of this study is placed on the behavioral foundations of ACO, as well as on the metaheuristic frameworks that enable its versatility and robustness in handling large-scale, computationally intensive tasks. Additionally, this study highlights current limitations and potential areas for future exploration within ACO, aiming to facilitate a comprehensive understanding of this dynamic field of swarm-based optimization.</description>
    <pubDate>11-07-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Swarm intelligence (SI) has emerged as a transformative approach in solving complex optimization problems by drawing inspiration from collective behaviors observed in nature, particularly among social animals and insects. Ant Colony Optimization (ACO), a prominent subclass of SI algorithms, models the foraging behavior of ant colonies to address a range of challenging combinatorial problems. Originally introduced in 1992 for the Traveling Salesman Problem (TSP), ACO employs artificial pheromone trails and heuristic information to probabilistically guide solution construction. The artificial ants within ACO algorithms engage in a stochastic search process, iteratively refining solutions through the deposition and evaporation of pheromone levels based on previous search experiences. This review synthesizes the extensive body of research that has since advanced ACO from its initial ant system (AS) model to sophisticated algorithmic variants. These advances have both significantly enhanced ACO's practical performance across various application domains and contributed to a deeper theoretical understanding of its mechanics. The focus of this study is placed on the behavioral foundations of ACO, as well as on the metaheuristic frameworks that enable its versatility and robustness in handling large-scale, computationally intensive tasks. Additionally, this study highlights current limitations and potential areas for future exploration within ACO, aiming to facilitate a comprehensive understanding of this dynamic field of swarm-based optimization.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Comprehensive Review of Ant Colony Optimization in Swarm Intelligence for Complex Problem Solving</dc:title>
    <dc:creator>batool abdulsatar abdulghani</dc:creator>
    <dc:creator>mohammed abdulsattar abdulghani</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030403</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-07-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-07-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>214</prism:startingPage>
    <prism:doi>10.56578/ataiml030403</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030402">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 4, Pages undefined: A  Novel Machine Learning Approach for Optimizing Radar Warning Receiver Preprogramming</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030402</link>
    <description>Radar warning receivers (RWRs) are critical for swiftly and accurately identifying potential threats in complex electromagnetic environments. Numerous methods have been developed over the years, with recent advances in artificial intelligence (AI) significantly enhancing RWR capabilities. This study presents a machine learning-based approach for emitter identification within RWR systems, leveraging a comprehensive radar signal library. Key parameters such as signal frequency, pulse width, pulse repetition frequency (PRF), and beam width were extracted from pulsed radar signals and utilized in various machine learning algorithms. The preprogramming phase of RWRs was optimized through the application of multiple classification algorithms, including k-Nearest Neighbors (KNN), Decision Tree (DT), the ensemble learning method, support vector machine (SVM), and Artificial Neural Network (ANN). These algorithms were compared against conventional methods to evaluate their performance. The machine learning models demonstrated a high degree of accuracy, achieving over 95% in training phases and exceeding 99% in test simulations. The findings highlight the superiority of machine learning algorithms in terms of speed and precision when compared to traditional approaches. Furthermore, the flexibility of machine learning techniques to adapt to diverse problem sets underscores their potential as a preferred solution for future RWR applications. This study suggests that the integration of machine learning into RWR emitter identification not only enhances the operational efficiency of electronic warfare (EW) systems but also represents a significant advancement in the field. The increasing relevance of machine learning in recent years positions it as a promising tool for addressing complex signal processing challenges in EW.</description>
    <pubDate>10-21-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Radar warning receivers (RWRs) are critical for swiftly and accurately identifying potential threats in complex electromagnetic environments. Numerous methods have been developed over the years, with recent advances in artificial intelligence (AI) significantly enhancing RWR capabilities. This study presents a machine learning-based approach for emitter identification within RWR systems, leveraging a comprehensive radar signal library. Key parameters such as signal frequency, pulse width, pulse repetition frequency (PRF), and beam width were extracted from pulsed radar signals and utilized in various machine learning algorithms. The preprogramming phase of RWRs was optimized through the application of multiple classification algorithms, including k-Nearest Neighbors (KNN), Decision Tree (DT), the ensemble learning method, support vector machine (SVM), and Artificial Neural Network (ANN). These algorithms were compared against conventional methods to evaluate their performance. The machine learning models demonstrated a high degree of accuracy, achieving over 95% in training phases and exceeding 99% in test simulations. The findings highlight the superiority of machine learning algorithms in terms of speed and precision when compared to traditional approaches. Furthermore, the flexibility of machine learning techniques to adapt to diverse problem sets underscores their potential as a preferred solution for future RWR applications. This study suggests that the integration of machine learning into RWR emitter identification not only enhances the operational efficiency of electronic warfare (EW) systems but also represents a significant advancement in the field. The increasing relevance of machine learning in recent years positions it as a promising tool for addressing complex signal processing challenges in EW.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A  Novel Machine Learning Approach for Optimizing Radar Warning Receiver Preprogramming</dc:title>
    <dc:creator>mert demircan</dc:creator>
    <dc:creator>ahmet güngör pakfiliz</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030402</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>10-21-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>10-21-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>202</prism:startingPage>
    <prism:doi>10.56578/ataiml030402</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030401">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 4, Pages undefined: Microwave Detection System for Wheat Moisture Content Based on Metasurface Lens Antennas</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030401</link>
    <description>Maintaining wheat moisture content within a safe range is of critical importance for ensuring the quality and safety of wheat. High-precision, rapid detection of wheat moisture content is a key factor in enabling effective control processes. A microwave detection system based on metasurface lens antennas was proposed in this study, which facilitates accurate, non-invasive, and contactless measurement of wheat moisture content. The system measures the attenuation characteristics of wheat with varying moisture content from 23.5 GHz to 24.5 GHz in the frequency range. A linear regression equation (coefficient of determination R2=0.9946) was established by using the measured actual moisture content obtained through the standard drying method, and was used as the prediction model for wheat moisture. Totally, 72 wheat samples were selected for moisture content prediction, yielding a root mean square error (RMSE) of 0.193%, mean absolute error (MAE) of 0.16%, and maximum relative error (MRE) of 5.25%. The results indicate that the proposed microwave detection system, based on metasurface lens antennas, provides an effective method for detecting wheat moisture content.</description>
    <pubDate>10-15-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Maintaining wheat moisture content within a safe range is of critical importance for ensuring the quality and safety of wheat. High-precision, rapid detection of wheat moisture content is a key factor in enabling effective control processes. A microwave detection system based on metasurface lens antennas was proposed in this study, which facilitates accurate, non-invasive, and contactless measurement of wheat moisture content. The system measures the attenuation characteristics of wheat with varying moisture content from 23.5 GHz to 24.5 GHz in the frequency range. A linear regression equation (coefficient of determination R&lt;sup&gt;2&lt;/sup&gt;=0.9946) was established by using the measured actual moisture content obtained through the standard drying method, and was used as the prediction model for wheat moisture. Totally, 72 wheat samples were selected for moisture content prediction, yielding a root mean square error (RMSE) of 0.193%, mean absolute error (MAE) of 0.16%, and maximum relative error (MRE) of 5.25%. The results indicate that the proposed microwave detection system, based on metasurface lens antennas, provides an effective method for detecting wheat moisture content.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Microwave Detection System for Wheat Moisture Content Based on Metasurface Lens Antennas</dc:title>
    <dc:creator>suping yu</dc:creator>
    <dc:creator>weiwei mao</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030401</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>10-15-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>10-15-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>193</prism:startingPage>
    <prism:doi>10.56578/ataiml030401</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_4/ataiml030401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030305">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 3, Pages undefined: Robust Leaf Disease Detection Using Complex Fuzzy Sets and HSV-Based Color Segmentation Techniques</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030305</link>
    <description>Leaf diseases pose a significant threat to global agricultural productivity, impacting both crop yields and quality. Traditional detection methods often rely on expert knowledge, are labor-intensive, and can be time-consuming. To address these limitations, a novel framework was developed for the segmentation and detection of leaf diseases, incorporating complex fuzzy set (CFS) theory and advanced spatial averaging and difference techniques. This approach leverages the Hue, Saturation, and Value (HSV) color model, which offers superior contrast and visual clarity, to effectively distinguish between healthy and diseased regions in leaf images. The HSV space was utilized due to its ability to enhance the visibility of subtle disease patterns. CFSs were introduced to manage the inherent uncertainty and imprecision associated with disease characteristics, enabling a more accurate delineation of affected areas. Spatial techniques further refine the segmentation, improving detection precision and robustness. Experimental validation on diverse datasets demonstrates the proposed method’s high accuracy across a variety of plant diseases, highlighting its reliability and adaptability to real-world agricultural conditions. Moreover, the framework enhances interpretability by offering insights into the progression of disease, thus supporting informed decision-making for crop protection and management. The proposed model shows considerable potential in practical agricultural applications, where it can assist farmers and agronomists in timely and accurate disease identification, ultimately improving crop management practices.</description>
    <pubDate>09-29-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Leaf diseases pose a significant threat to global agricultural productivity, impacting both crop yields and quality. Traditional detection methods often rely on expert knowledge, are labor-intensive, and can be time-consuming. To address these limitations, a novel framework was developed for the segmentation and detection of leaf diseases, incorporating complex fuzzy set (CFS) theory and advanced spatial averaging and difference techniques. This approach leverages the Hue, Saturation, and Value (HSV) color model, which offers superior contrast and visual clarity, to effectively distinguish between healthy and diseased regions in leaf images. The HSV space was utilized due to its ability to enhance the visibility of subtle disease patterns. CFSs were introduced to manage the inherent uncertainty and imprecision associated with disease characteristics, enabling a more accurate delineation of affected areas. Spatial techniques further refine the segmentation, improving detection precision and robustness. Experimental validation on diverse datasets demonstrates the proposed method’s high accuracy across a variety of plant diseases, highlighting its reliability and adaptability to real-world agricultural conditions. Moreover, the framework enhances interpretability by offering insights into the progression of disease, thus supporting informed decision-making for crop protection and management. The proposed model shows considerable potential in practical agricultural applications, where it can assist farmers and agronomists in timely and accurate disease identification, ultimately improving crop management practices.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Robust Leaf Disease Detection Using Complex Fuzzy Sets and HSV-Based Color Segmentation Techniques</dc:title>
    <dc:creator>ibrar hussain</dc:creator>
    <dc:creator>rifaqat ali</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030305</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-29-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-29-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>183</prism:startingPage>
    <prism:doi>10.56578/ataiml030305</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030304">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 3, Pages undefined: Predicting Bank Users’ Time Deposits Based on LSTM-Stacked Modeling</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030304</link>
    <description>Accurately predicting whether bank users will opt for time deposit products is critical for optimizing marketing strategies and enhancing user engagement, ultimately improving a bank’s profitability. Traditional predictive models, such as linear regression and Logistic Regression (LR), are often limited in their ability to capture the complex, time-dependent patterns in user behavior. In this study, a hybrid approach that combines Long Short-Term Memory (LSTM) neural networks and a stacked ensemble learning framework is proposed to address these limitations. Initially, LSTM models were employed to extract temporal features from two distinct bank marketing datasets, thereby capturing the sequential nature of user interactions. These extracted features were subsequently input into several base classifiers, including Random Forest (RF), Support Vector Machine (SVM), and k-Nearest Neighbour (KNN), to conduct initial classifications. The outputs of these classifiers were then integrated using a LR model for final decision-making through a stacking ensemble method. The experimental evaluation demonstrates that the proposed LSTM-stacked model outperforms traditional models in predicting user time deposits on both datasets, providing robust predictive performance. The results suggest that leveraging temporal feature extraction with LSTM and combining it with ensemble techniques yields superior prediction accuracy, thereby offering a more sophisticated solution for banks aiming to enhance their marketing efficiency.</description>
    <pubDate>09-29-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Accurately predicting whether bank users will opt for time deposit products is critical for optimizing marketing strategies and enhancing user engagement, ultimately improving a bank’s profitability. Traditional predictive models, such as linear regression and Logistic Regression (LR), are often limited in their ability to capture the complex, time-dependent patterns in user behavior. In this study, a hybrid approach that combines Long Short-Term Memory (LSTM) neural networks and a stacked ensemble learning framework is proposed to address these limitations. Initially, LSTM models were employed to extract temporal features from two distinct bank marketing datasets, thereby capturing the sequential nature of user interactions. These extracted features were subsequently input into several base classifiers, including Random Forest (RF), Support Vector Machine (SVM), and k-Nearest Neighbour (KNN), to conduct initial classifications. The outputs of these classifiers were then integrated using a LR model for final decision-making through a stacking ensemble method. The experimental evaluation demonstrates that the proposed LSTM-stacked model outperforms traditional models in predicting user time deposits on both datasets, providing robust predictive performance. The results suggest that leveraging temporal feature extraction with LSTM and combining it with ensemble techniques yields superior prediction accuracy, thereby offering a more sophisticated solution for banks aiming to enhance their marketing efficiency.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Predicting Bank Users’ Time Deposits Based on LSTM-Stacked Modeling</dc:title>
    <dc:creator>zeyi yang</dc:creator>
    <dc:creator>yi zhang</dc:creator>
    <dc:creator>lina yu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030304</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-29-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-29-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>172</prism:startingPage>
    <prism:doi>10.56578/ataiml030304</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030303">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 3, Pages undefined: Evaluating the Impact of Data Normalization on Rice Classification Using Machine Learning Algorithms</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030303</link>
    <description>Rice is a staple food for a significant portion of the global population, particularly in countries where it constitutes the primary source of sustenance. Accurate classification of rice varieties is critical for enhancing both agricultural yield and economic outcomes. Traditional classification methods are often inefficient, leading to increased costs, higher misclassification rates, and time loss. To address these limitations, automated classification systems employing machine learning (ML) algorithms have gained attention. However, when raw data is inadequately organized or scattered, classification accuracy can decline. To improve data organization, normalization processes are often employed. Despite its widespread use, the specific contribution of normalization to classification performance requires further validation. In this study, a dataset comprising two rice varieties Osmancik and Cammeo produced in Turkey was utilized to evaluate the impact of normalization on classification outcomes. The k-Nearest Neighbor (KNN) algorithm was applied to both normalized and non-normalized datasets, and their respective performances were compared across various training and testing ratios. The normalized dataset achieved a classification accuracy of 0.950, compared to 0.921 for the non-normalized dataset. This approximately 3% improvement demonstrates the positive effect of data normalization on classification accuracy. These findings underscore the importance of incorporating normalization in ML models for rice classification to optimize performance and accuracy.</description>
    <pubDate>09-19-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Rice is a staple food for a significant portion of the global population, particularly in countries where it constitutes the primary source of sustenance. Accurate classification of rice varieties is critical for enhancing both agricultural yield and economic outcomes. Traditional classification methods are often inefficient, leading to increased costs, higher misclassification rates, and time loss. To address these limitations, automated classification systems employing machine learning (ML) algorithms have gained attention. However, when raw data is inadequately organized or scattered, classification accuracy can decline. To improve data organization, normalization processes are often employed. Despite its widespread use, the specific contribution of normalization to classification performance requires further validation. In this study, a dataset comprising two rice varieties Osmancik and Cammeo produced in Turkey was utilized to evaluate the impact of normalization on classification outcomes. The k-Nearest Neighbor (KNN) algorithm was applied to both normalized and non-normalized datasets, and their respective performances were compared across various training and testing ratios. The normalized dataset achieved a classification accuracy of 0.950, compared to 0.921 for the non-normalized dataset. This approximately 3% improvement demonstrates the positive effect of data normalization on classification accuracy. These findings underscore the importance of incorporating normalization in ML models for rice classification to optimize performance and accuracy.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Evaluating the Impact of Data Normalization on Rice Classification Using Machine Learning Algorithms</dc:title>
    <dc:creator>ahmet çelik</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030303</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-19-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-19-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>162</prism:startingPage>
    <prism:doi>10.56578/ataiml030303</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030302">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 3, Pages undefined: Integrating Long Short-Term Memory and Multilayer Perception for an Intelligent Public Aﬀairs Distribution Model</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030302</link>
    <description>In the realm of urban public affairs management, the necessity for accurate and intelligent distribution of resources has become increasingly imperative for effective social governance. This study, drawing on crime data from Chicago in 2022, introduces a novel approach to public affairs distribution by employing Long Short-Term Memory (LSTM), Multilayer Perceptron (MLP), and their integration. By extensively preprocessing textual, numerical, boolean, temporal, and geographical data, the proposed models were engineered to discern complex interrelations among multidimensional features, thereby enhancing their capability to classify and predict public affairs events. Comparative analysis reveals that the hybrid LSTM-MLP model exhibits superior prediction accuracy over the individual LSTM or MLP models, evidencing enhanced proficiency in capturing intricate event patterns and trends. The effectiveness of the model was further corroborated through a detailed examination of training and validation accuracies, loss trajectories, and confusion matrices. This study contributes a robust methodology to the field of intelligent public affairs prediction and resource allocation, demonstrating significant practical applicability and potential for widespread implementation.</description>
    <pubDate>08-01-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of urban public affairs management, the necessity for accurate and intelligent distribution of resources has become increasingly imperative for effective social governance. This study, drawing on crime data from Chicago in 2022, introduces a novel approach to public affairs distribution by employing Long Short-Term Memory (LSTM), Multilayer Perceptron (MLP), and their integration. By extensively preprocessing textual, numerical, boolean, temporal, and geographical data, the proposed models were engineered to discern complex interrelations among multidimensional features, thereby enhancing their capability to classify and predict public affairs events. Comparative analysis reveals that the hybrid LSTM-MLP model exhibits superior prediction accuracy over the individual LSTM or MLP models, evidencing enhanced proficiency in capturing intricate event patterns and trends. The effectiveness of the model was further corroborated through a detailed examination of training and validation accuracies, loss trajectories, and confusion matrices. This study contributes a robust methodology to the field of intelligent public affairs prediction and resource allocation, demonstrating significant practical applicability and potential for widespread implementation.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Integrating Long Short-Term Memory and Multilayer Perception for an Intelligent Public Aﬀairs Distribution Model</dc:title>
    <dc:creator>hong fang</dc:creator>
    <dc:creator>minjing peng</dc:creator>
    <dc:creator>xiaotian du</dc:creator>
    <dc:creator>baisheng lin</dc:creator>
    <dc:creator>mingjun jiang</dc:creator>
    <dc:creator>jieyi hu</dc:creator>
    <dc:creator>zhenjiang long</dc:creator>
    <dc:creator>qiaoxian hu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030302</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>08-01-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>08-01-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>148</prism:startingPage>
    <prism:doi>10.56578/ataiml030302</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030301">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 3, Pages undefined: Comparative Analysis of Machine Learning Algorithms for Sentiment Analysis in Film Reviews</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030301</link>
    <description>Sentiment analysis, a crucial component of natural language processing (NLP), involves the classification of subjective information by extracting emotional content from textual data. This technique plays a significant role in the movie industry by analyzing public opinions about films. The present research addresses a gap in the literature by conducting a comparative analysis of various machine learning algorithms for sentiment analysis in film reviews, utilizing a dataset from Kaggle comprising 50,000 reviews. Classifiers such as Logistic Regression, Multinomial Naive Bayes, Linear Support Vector Classification (LinearSVC), and Gradient Boosting were employed to categorize the reviews into positive and negative sentiments. The emphasis was placed on specifying and comparing these classifiers in the context of film review sentiment analysis, highlighting their respective advantages and disadvantages. The dataset underwent thorough preprocessing, including data cleaning and the application of stemming techniques to enhance processing efficiency. The performance of the classifiers was rigorously evaluated using metrics such as accuracy, precision, recall, and F1-score. Among the classifiers, LinearSVC demonstrated the highest accuracy at 90.98%. This comprehensive evaluation not only identified the most effective classifier but also elucidated the contextual efficiencies of various algorithms. The findings indicate that LinearSVC excels at accurately classifying sentiments in film reviews, thereby offering new insights into public opinions on films. Furthermore, the extended comparison provides a step-by-step guide for selecting the most suitable classifier based on dataset characteristics and context, contributing valuable knowledge to the existing literature on the impact of different machine learning approaches on sentiment analysis outcomes in the movie industry.</description>
    <pubDate>07-22-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Sentiment analysis, a crucial component of natural language processing (NLP), involves the classification of subjective information by extracting emotional content from textual data. This technique plays a significant role in the movie industry by analyzing public opinions about films. The present research addresses a gap in the literature by conducting a comparative analysis of various machine learning algorithms for sentiment analysis in film reviews, utilizing a dataset from Kaggle comprising 50,000 reviews. Classifiers such as Logistic Regression, Multinomial Naive Bayes, Linear Support Vector Classification (LinearSVC), and Gradient Boosting were employed to categorize the reviews into positive and negative sentiments. The emphasis was placed on specifying and comparing these classifiers in the context of film review sentiment analysis, highlighting their respective advantages and disadvantages. The dataset underwent thorough preprocessing, including data cleaning and the application of stemming techniques to enhance processing efficiency. The performance of the classifiers was rigorously evaluated using metrics such as accuracy, precision, recall, and F1-score. Among the classifiers, LinearSVC demonstrated the highest accuracy at 90.98%. This comprehensive evaluation not only identified the most effective classifier but also elucidated the contextual efficiencies of various algorithms. The findings indicate that LinearSVC excels at accurately classifying sentiments in film reviews, thereby offering new insights into public opinions on films. Furthermore, the extended comparison provides a step-by-step guide for selecting the most suitable classifier based on dataset characteristics and context, contributing valuable knowledge to the existing literature on the impact of different machine learning approaches on sentiment analysis outcomes in the movie industry.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Comparative Analysis of Machine Learning Algorithms for Sentiment Analysis in Film Reviews</dc:title>
    <dc:creator>mohamed cherradi</dc:creator>
    <dc:creator>anass el haddadi</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030301</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>07-22-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>07-22-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>137</prism:startingPage>
    <prism:doi>10.56578/ataiml030301</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_3/ataiml030301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030205">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 2, Pages undefined: DNA-Level Enhanced Vigenère Encryption for Securing Color Images</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030205</link>
    <description>This study presents the development of a novel method for color image encryption, leveraging an enhanced Vigenère algorithm. The conventional Vigenère cipher is augmented with substantial substitution tables derived from widely used chaotic maps in the cryptography domain, including the logistic map and the A.J. map. These enhancements incorporate new confusion and diffusion functions integrated into the substitution tables. Following the Vigenère encryption process, a transition to deoxyribonucleic acid (DNA) notation is implemented, controlled by a pseudo-random crossover matrix. This matrix facilitates a genetic crossover specifically adapted for image encryption. Simulations conducted on a variety of images of diverse formats and sizes demonstrate the robustness of this approach against differential and frequency-based attacks. The substantial size of the encryption key significantly enhances the system's security, providing strong protection against brute-force attacks.</description>
    <pubDate>06-25-2024</pubDate>
    <content:encoded>&lt;![CDATA[ This study presents the development of a novel method for color image encryption, leveraging an enhanced Vigenère algorithm. The conventional Vigenère cipher is augmented with substantial substitution tables derived from widely used chaotic maps in the cryptography domain, including the logistic map and the A.J. map. These enhancements incorporate new confusion and diffusion functions integrated into the substitution tables. Following the Vigenère encryption process, a transition to deoxyribonucleic acid (DNA) notation is implemented, controlled by a pseudo-random crossover matrix. This matrix facilitates a genetic crossover specifically adapted for image encryption. Simulations conducted on a variety of images of diverse formats and sizes demonstrate the robustness of this approach against differential and frequency-based attacks. The substantial size of the encryption key significantly enhances the system's security, providing strong protection against brute-force attacks. ]]&gt;</content:encoded>
    <dc:title>DNA-Level Enhanced Vigenère Encryption for Securing Color Images</dc:title>
    <dc:creator>abdelhakim chemlal</dc:creator>
    <dc:creator>hassan tabti</dc:creator>
    <dc:creator>hamid el bourakkadi</dc:creator>
    <dc:creator>rrghout hicham</dc:creator>
    <dc:creator>abdellatif jarjar</dc:creator>
    <dc:creator>abdellhamid benazzi</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030205</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-25-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-25-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>119</prism:startingPage>
    <prism:doi>10.56578/ataiml030205</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030204">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 2, Pages undefined: Characterization and Risk Assessment of Cyber Security Threats in Cloud Computing: A Comparative Evaluation of Mitigation Techniques</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030204</link>
    <description>Advancements in information technology have significantly enhanced productivity and efficiency through the adoption of cloud computing, yet this adoption has also introduced a spectrum of security threats. Effective cybersecurity mitigation strategies are imperative to minimize the impact on cloud infrastructure and ensure reliability. This study seeks to categorize and assess the risk levels of cybersecurity threats in cloud computing environments, providing a comprehensive characterization based on eleven major causes, including natural disasters, loss of encryption keys, unauthorized login access, and others. Using fuzzy set theory to analyze uncertainties and model threats, threats were identified, prioritized, and categorized according to their impact on cloud infrastructure. A high level of data loss was revealed in five key features, such as encryption key compromise and unauthorized login access, while a lower impact was observed in unknown cloud storage and exposure to sensitive data. Seven threat features, including encryption key loss and operating system failure, were found to significantly contribute to data breaches. In contrast, others like virtual machine sharing and impersonation, exhibited lower risk levels. A comparative analysis of threat mitigation techniques determined Spoofing, Tampering, Repudiation, Information Disclosure, Denial of Service and Elevation of Privilege (STRIDE) as the most effective methodology with a score of 59, followed by Quality Threat Modeling Methodology (QTMM) (57), Common Vulnerability Scoring System (CVSS) (51), Process for Attack Simulation and Threat Analysis (PASTA) (50), and Persona non-Grata (PnG) (47). Attack Tree and Hierarchical Threat Modeling Methodology (HTMM) each achieved 46, while Linkability, Identifiablility, Nonrepudiation, Detectability, Disclosure of Information, Unawareness and Noncompliance (LINDDUN) scored 45. These findings underscore the value of fuzzy set theory in tandem with threat modeling to categorize and assess cybersecurity risks in cloud computing. STRIDE is recommended as an effective modeling technique for cloud environments. This comprehensive analysis provides critical insights for organizations and security experts, empowering them to proactively address recurring threats and minimize disruptions to daily operations.</description>
    <pubDate>05-15-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Advancements in information technology have significantly enhanced productivity and efficiency through the adoption of cloud computing, yet this adoption has also introduced a spectrum of security threats. Effective cybersecurity mitigation strategies are imperative to minimize the impact on cloud infrastructure and ensure reliability. This study seeks to categorize and assess the risk levels of cybersecurity threats in cloud computing environments, providing a comprehensive characterization based on eleven major causes, including natural disasters, loss of encryption keys, unauthorized login access, and others. Using fuzzy set theory to analyze uncertainties and model threats, threats were identified, prioritized, and categorized according to their impact on cloud infrastructure. A high level of data loss was revealed in five key features, such as encryption key compromise and unauthorized login access, while a lower impact was observed in unknown cloud storage and exposure to sensitive data. Seven threat features, including encryption key loss and operating system failure, were found to significantly contribute to data breaches. In contrast, others like virtual machine sharing and impersonation, exhibited lower risk levels. A comparative analysis of threat mitigation techniques determined Spoofing, Tampering, Repudiation, Information Disclosure, Denial of Service and Elevation of Privilege (STRIDE) as the most effective methodology with a score of 59, followed by Quality Threat Modeling Methodology (QTMM) (57), Common Vulnerability Scoring System (CVSS) (51), Process for Attack Simulation and Threat Analysis (PASTA) (50), and Persona non-Grata (PnG) (47). Attack Tree and Hierarchical Threat Modeling Methodology (HTMM) each achieved 46, while Linkability, Identifiablility, Nonrepudiation, Detectability, Disclosure of Information, Unawareness and Noncompliance (LINDDUN) scored 45. These findings underscore the value of fuzzy set theory in tandem with threat modeling to categorize and assess cybersecurity risks in cloud computing. STRIDE is recommended as an effective modeling technique for cloud environments. This comprehensive analysis provides critical insights for organizations and security experts, empowering them to proactively address recurring threats and minimize disruptions to daily operations.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Characterization and Risk Assessment of Cyber Security Threats in Cloud Computing: A Comparative Evaluation of Mitigation Techniques</dc:title>
    <dc:creator>oludele awodele</dc:creator>
    <dc:creator>chibueze ogbonna</dc:creator>
    <dc:creator>emmanuel o. ogu</dc:creator>
    <dc:creator>johnson o. hinmikaiye</dc:creator>
    <dc:creator>jide e. t. akinsola</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030204</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>05-15-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>05-15-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>106</prism:startingPage>
    <prism:doi>10.56578/ataiml030204</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030203">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 2, Pages undefined: Advanced Dental Implant System Classification with Pre-trained CNN Models and Multi-branch Spectral Channel Attention Networks</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030203</link>
    <description>Dental implants (DIs) are prone to failure due to uncommon mechanical complications and fractures. Precise identification of implant fixture systems from periapical radiographs is imperative for accurate diagnosis and treatment, particularly in the absence of comprehensive medical records. Existing methods predominantly leverage spatial features derived from implant images using convolutional neural networks (CNNs). However, texture images exhibit distinctive patterns detectable as strong energy at specific frequencies in the frequency domain, a characteristic that motivates this study to employ frequency-domain analysis through a novel multi-branch spectral channel attention network (MBSCAN). High-frequency data obtained via a two-dimensional (2D) discrete cosine transform (DCT) are exploited to retain phase information and broaden the application of frequency-domain attention mechanisms. Fine-tuning of the multi-branch spectral channel attention (MBSCA) parameters is achieved through the modified aquila optimizer (MAO) algorithm, optimizing classification accuracy. Furthermore, pre-trained CNN architectures such as Visual Geometry Group (VGG) 16 and VGG19 are harnessed to extract features for classifying intact and fractured DIs from panoramic and periapical radiographs. The dataset comprises 251 radiographic images of intact DIs and 194 images of fractured DIs, meticulously selected from a pool of 21,398 DIs examined across two dental facilities. The proposed model has exhibited robust accuracy in detecting and classifying fractured DIs, particularly when relying exclusively on periapical images. The MBSCA-MAO scheme has demonstrated exceptional performance, achieving a classification accuracy of 95.7% with precision, recall, and F1-score values of 95.2%, 94.3%, and 95.6%, respectively. Comparative analysis indicates that the proposed model significantly surpasses existing methods, showcasing its superior efficacy in DI classification.</description>
    <pubDate>05-15-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Dental implants (DIs) are prone to failure due to uncommon mechanical complications and fractures. Precise identification of implant fixture systems from periapical radiographs is imperative for accurate diagnosis and treatment, particularly in the absence of comprehensive medical records. Existing methods predominantly leverage spatial features derived from implant images using convolutional neural networks (CNNs). However, texture images exhibit distinctive patterns detectable as strong energy at specific frequencies in the frequency domain, a characteristic that motivates this study to employ frequency-domain analysis through a novel multi-branch spectral channel attention network (MBSCAN). High-frequency data obtained via a two-dimensional (2D) discrete cosine transform (DCT) are exploited to retain phase information and broaden the application of frequency-domain attention mechanisms. Fine-tuning of the multi-branch spectral channel attention (MBSCA) parameters is achieved through the modified aquila optimizer (MAO) algorithm, optimizing classification accuracy. Furthermore, pre-trained CNN architectures such as Visual Geometry Group (VGG) 16 and VGG19 are harnessed to extract features for classifying intact and fractured DIs from panoramic and periapical radiographs. The dataset comprises 251 radiographic images of intact DIs and 194 images of fractured DIs, meticulously selected from a pool of 21,398 DIs examined across two dental facilities. The proposed model has exhibited robust accuracy in detecting and classifying fractured DIs, particularly when relying exclusively on periapical images. The MBSCA-MAO scheme has demonstrated exceptional performance, achieving a classification accuracy of 95.7% with precision, recall, and F1-score values of 95.2%, 94.3%, and 95.6%, respectively. Comparative analysis indicates that the proposed model significantly surpasses existing methods, showcasing its superior efficacy in DI classification.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advanced Dental Implant System Classification with Pre-trained CNN Models and Multi-branch Spectral Channel Attention Networks</dc:title>
    <dc:creator>srinivasa rao vemula</dc:creator>
    <dc:creator>maruthi vemula</dc:creator>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030203</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>05-15-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>05-15-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>94</prism:startingPage>
    <prism:doi>10.56578/ataiml030203</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030202">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 2, Pages undefined: Enhanced Named Entity Recognition Based on Multi-Feature Fusion Using Dual Graph Neural Networks</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030202</link>
    <description>Named Entity Recognition (NER), a pivotal task in information extraction, is aimed at identifying named entities of various types within text. Traditional NER methods, however, often fall short in providing sufficient semantic representation of text and preserving word order information. Addressing these challenges, a novel approach is proposed, leveraging dual Graph Neural Networks (GNNs) based on multi-feature fusion. This approach constructs a co-occurrence graph and a dependency syntax graph from text sequences, capturing textual features from a dual-graph perspective to overcome the oversight of word interdependencies. Furthermore, Bidirectional Long Short-Term Memory Networks (BiLSTMs) are utilized to encode text, addressing the issues of neglecting word order features and the difficulty in capturing contextual semantic information. Additionally, to enable the model to learn features across different subspaces and the varying degrees of information significance, a multi-head self-attention mechanism is introduced for calculating internal dependency weights within feature vectors. The proposed model achieves F1-scores of 84.85% and 96.34% on the CCKS-2019 and Resume datasets, respectively, marking improvements of 1.13 and 0.67 percentage points over baseline models. The results affirm the effectiveness of the presented method in enhancing performance on the NER task.</description>
    <pubDate>04-02-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Named Entity Recognition (NER), a pivotal task in information extraction, is aimed at identifying named entities of various types within text. Traditional NER methods, however, often fall short in providing sufficient semantic representation of text and preserving word order information. Addressing these challenges, a novel approach is proposed, leveraging dual Graph Neural Networks (GNNs) based on multi-feature fusion. This approach constructs a co-occurrence graph and a dependency syntax graph from text sequences, capturing textual features from a dual-graph perspective to overcome the oversight of word interdependencies. Furthermore, Bidirectional Long Short-Term Memory Networks (BiLSTMs) are utilized to encode text, addressing the issues of neglecting word order features and the difficulty in capturing contextual semantic information. Additionally, to enable the model to learn features across different subspaces and the varying degrees of information significance, a multi-head self-attention mechanism is introduced for calculating internal dependency weights within feature vectors. The proposed model achieves F1-scores of 84.85% and 96.34% on the CCKS-2019 and Resume datasets, respectively, marking improvements of 1.13 and 0.67 percentage points over baseline models. The results affirm the effectiveness of the presented method in enhancing performance on the NER task.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Named Entity Recognition Based on Multi-Feature Fusion Using Dual Graph Neural Networks</dc:title>
    <dc:creator>hanzhao gu</dc:creator>
    <dc:creator>jialin ma</dc:creator>
    <dc:creator>yanran zhao</dc:creator>
    <dc:creator>ashim khadka</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030202</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>04-02-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>04-02-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>84</prism:startingPage>
    <prism:doi>10.56578/ataiml030202</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030201">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 2, Pages undefined: Advances in Breast Cancer Segmentation: A Comprehensive Review</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030201</link>
    <description>The diagnosis and treatment of breast cancer (BC) are significantly subject to medical imaging techniques, with segmentation being crucial in delineating pathological regions for precise diagnosis and treatment planning. This comprehensive analysis explores a variety of segmentation methodologies, encompassing classical, machine learning, deep learning (DL), and manual segmentation, as applied in the medical imaging field for BC detection. Classical segmentation techniques, which include edge-driven and threshold-driven segmentation, are highlighted for their utilization of filters and region-based methods to achieve precise delineation. Emphasis is placed on the establishment of clear guidelines for the selection and comparison of these classical approaches. Segmentation through machine learning is discussed, encompassing both unsupervised and supervised techniques that leverage annotated images and pathology reports for model training, with a focus on their efficacy in BC segmentation tasks. DL methods, especially models such as U-Net and convolutional neural networks (CNNs), are underscored for their remarkable efficiency in segmenting BC images, with U-Net models noted for their minimal requirement for annotated images and achieving accuracy levels up to 99.7%. Manual segmentation, though reliable, is identified as time-consuming and susceptible to errors. Various metrics, such as Dice, F-score, Intersection over Union (IOU), and Area Under the Curve (AUC), are used for assessing and comparing the segmentation techniques. The analysis acknowledges the challenges posed by limited dataset availability, data range inadequacy, and confidentiality concerns, which hinder the broader integration of segmentation methods into clinical practice. Solutions to overcome these challenges are proposed, including the promotion of partnerships to develop and distribute extensive datasets for BC segmentation. This approach would necessitate the pooling of resources from multiple organizations and the adoption of anonymization techniques to safeguard data privacy. Through this lens, the analysis aims to provide a thorough analysis of the practical implications of segmentation methods in BC diagnosis and management, paving the way for future advancements in the field.</description>
    <pubDate>03-20-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The diagnosis and treatment of breast cancer (BC) are significantly subject to medical imaging techniques, with segmentation being crucial in delineating pathological regions for precise diagnosis and treatment planning. This comprehensive analysis explores a variety of segmentation methodologies, encompassing classical, machine learning, deep learning (DL), and manual segmentation, as applied in the medical imaging field for BC detection. Classical segmentation techniques, which include edge-driven and threshold-driven segmentation, are highlighted for their utilization of filters and region-based methods to achieve precise delineation. Emphasis is placed on the establishment of clear guidelines for the selection and comparison of these classical approaches. Segmentation through machine learning is discussed, encompassing both unsupervised and supervised techniques that leverage annotated images and pathology reports for model training, with a focus on their efficacy in BC segmentation tasks. DL methods, especially models such as U-Net and convolutional neural networks (CNNs), are underscored for their remarkable efficiency in segmenting BC images, with U-Net models noted for their minimal requirement for annotated images and achieving accuracy levels up to 99.7%. Manual segmentation, though reliable, is identified as time-consuming and susceptible to errors. Various metrics, such as Dice, F-score, Intersection over Union (IOU), and Area Under the Curve (AUC), are used for assessing and comparing the segmentation techniques. The analysis acknowledges the challenges posed by limited dataset availability, data range inadequacy, and confidentiality concerns, which hinder the broader integration of segmentation methods into clinical practice. Solutions to overcome these challenges are proposed, including the promotion of partnerships to develop and distribute extensive datasets for BC segmentation. This approach would necessitate the pooling of resources from multiple organizations and the adoption of anonymization techniques to safeguard data privacy. Through this lens, the analysis aims to provide a thorough analysis of the practical implications of segmentation methods in BC diagnosis and management, paving the way for future advancements in the field.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advances in Breast Cancer Segmentation: A Comprehensive Review</dc:title>
    <dc:creator>ayah abo-el-rejal</dc:creator>
    <dc:creator>shehab eldeen ayman</dc:creator>
    <dc:creator>farah aymen</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030201</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-20-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-20-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>70</prism:startingPage>
    <prism:doi>10.56578/ataiml030201</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_2/ataiml030201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030105">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 1, Pages undefined: Enhancing Melanoma Skin Cancer Diagnosis Through Transfer Learning: An EfficientNetB0 Approach</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030105</link>
    <description>Skin cancer, a significant health concern globally, necessitates innovative strategies for its early detection and classification. In this context, a novel methodology employing the state-of-the-art EfficientNetB0 deep learning architecture has been developed, aiming to augment the accuracy and efficiency of skin cancer diagnoses. This approach focuses on automating the classification of skin lesions, addressing the challenges posed by their complex structures and the subjective nature of conventional diagnostic methods. Through the adoption of advanced training techniques, including adaptive learning rates and Rectified Adam (RAdam) optimization, a robust model for skin cancer classification has been constructed. The findings underscore the model's capability to achieve convergence during training, illustrating its potential to transform dermatological diagnostics significantly. This research contributes to the broader fields of medical imaging and artificial intelligence (AI), underscoring the efficacy of deep learning in enhancing diagnostic processes. Future endeavors will explore the realms of explainable AI (XAI), collaboration with medical professionals, and adaptation of the model for telemedicine, ensuring its continued relevance and applicability in the dynamic landscape of skin cancer diagnosis.</description>
    <pubDate>03-13-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Skin cancer, a significant health concern globally, necessitates innovative strategies for its early detection and classification. In this context, a novel methodology employing the state-of-the-art EfficientNetB0 deep learning architecture has been developed, aiming to augment the accuracy and efficiency of skin cancer diagnoses. This approach focuses on automating the classification of skin lesions, addressing the challenges posed by their complex structures and the subjective nature of conventional diagnostic methods. Through the adoption of advanced training techniques, including adaptive learning rates and Rectified Adam (RAdam) optimization, a robust model for skin cancer classification has been constructed. The findings underscore the model's capability to achieve convergence during training, illustrating its potential to transform dermatological diagnostics significantly. This research contributes to the broader fields of medical imaging and artificial intelligence (AI), underscoring the efficacy of deep learning in enhancing diagnostic processes. Future endeavors will explore the realms of explainable AI (XAI), collaboration with medical professionals, and adaptation of the model for telemedicine, ensuring its continued relevance and applicability in the dynamic landscape of skin cancer diagnosis.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing Melanoma Skin Cancer Diagnosis Through Transfer Learning: An EfficientNetB0 Approach</dc:title>
    <dc:creator>rashmi ashtagi</dc:creator>
    <dc:creator>pramila vasantrao kharat</dc:creator>
    <dc:creator>vinaya sarmalkar</dc:creator>
    <dc:creator>sridevi hosmani</dc:creator>
    <dc:creator>abhijeet r. patil</dc:creator>
    <dc:creator>afsha imran akkalkot</dc:creator>
    <dc:creator>adithya padthe</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030105</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-13-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-13-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>57</prism:startingPage>
    <prism:doi>10.56578/ataiml030105</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030104">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 1, Pages undefined: Enhanced Color Image Encryption Utilizing a Novel Vigenere Method with Pseudorandom Affine Functions</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030104</link>
    <description>In the realm of digital image security, this study presents an innovative encryption methodology for color images, significantly advancing the traditional Vigenere cipher through the integration of two extensive pseudorandom substitution matrices. These matrices are derived from chaotic maps widely recognized for their cryptographic utility, specifically the logistic map and the skew tent map, chosen for their straightforward implementation capabilities in encryption systems and their high sensitivity to initial conditions. The process commences with the vectorization of the original image and the computation of initial values to alter the starting pixel's value, thereby initiating the encryption sequence. A novel aspect of this method is the introduction of a Vigenere mechanism that employs dynamic pseudorandom affine functions at the pixel level, enhancing the cipher's robustness. Subsequently, a comprehensive permutation strategy is applied to bolster the vector's integrity and elevate the temporal complexity against potential cryptographic attacks. Through simulations conducted on a varied collection of images, encompassing different sizes and formats, the proposed encryption technique demonstrates formidable resilience against both brute-force and differential statistical attacks, thereby affirming its efficacy and security in safeguarding digital imagery.</description>
    <pubDate>03-13-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of digital image security, this study presents an innovative encryption methodology for color images, significantly advancing the traditional Vigenere cipher through the integration of two extensive pseudorandom substitution matrices. These matrices are derived from chaotic maps widely recognized for their cryptographic utility, specifically the logistic map and the skew tent map, chosen for their straightforward implementation capabilities in encryption systems and their high sensitivity to initial conditions. The process commences with the vectorization of the original image and the computation of initial values to alter the starting pixel's value, thereby initiating the encryption sequence. A novel aspect of this method is the introduction of a Vigenere mechanism that employs dynamic pseudorandom affine functions at the pixel level, enhancing the cipher's robustness. Subsequently, a comprehensive permutation strategy is applied to bolster the vector's integrity and elevate the temporal complexity against potential cryptographic attacks. Through simulations conducted on a varied collection of images, encompassing different sizes and formats, the proposed encryption technique demonstrates formidable resilience against both brute-force and differential statistical attacks, thereby affirming its efficacy and security in safeguarding digital imagery.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Color Image Encryption Utilizing a Novel Vigenere Method with Pseudorandom Affine Functions</dc:title>
    <dc:creator>hamid el bourakkadi</dc:creator>
    <dc:creator>abdelhakim chemlal</dc:creator>
    <dc:creator>hassan tabti</dc:creator>
    <dc:creator>mourad kattass</dc:creator>
    <dc:creator>abdellatif jarjar</dc:creator>
    <dc:creator>abdellhamid benazzi</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030104</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-13-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-13-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>36</prism:startingPage>
    <prism:doi>10.56578/ataiml030104</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030103">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 1, Pages undefined: Enhanced Real-Time Facial Expression Recognition Using Deep Learning</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030103</link>
    <description>In the realm of facial expression recognition (FER), the identification and classification of seven universal emotional states, surprise, disgust, fear, happiness, neutrality, anger, and contempt, are of paramount importance. This research focuses on the application of convolutional neural networks (CNNs) for the extraction and categorization of these expressions. Over the past decade, CNNs have emerged as a significant area of research in human-computer interaction, surpassing previous methodologies with their superior feature learning capabilities. While current models demonstrate exceptional accuracy in recognizing facial expressions within controlled laboratory datasets, their performance significantly diminishes when applied to real-time, uncontrolled datasets. Challenges such as degraded image quality, occlusions, variable lighting, and alterations in head pose are commonly encountered in images sourced from unstructured environments like the internet. This study aims to enhance the recognition accuracy of FER by employing deep learning techniques to process images captured in real-time, particularly those of lower resolution. The objective is to augment the accuracy of FER in real-world datasets, which are inherently more complex and collected under less controlled conditions, compared to laboratory-collected data. The effectiveness of a deep learning-based approach to emotion detection in photographs is rigorously evaluated in this work. The proposed method is exhaustively compared with manual techniques and other existing approaches to assess its efficacy. This comparison forms the foundation for a subjective evaluation methodology, focusing on validation and end-user satisfaction. The findings conclusively demonstrate the method's proficiency in accurately recognizing emotions in both laboratory and real-world scenarios, thereby underscoring the potential of deep learning in the domain of facial emotion identification.</description>
    <pubDate>01-24-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of facial expression recognition (FER), the identification and classification of seven universal emotional states, surprise, disgust, fear, happiness, neutrality, anger, and contempt, are of paramount importance. This research focuses on the application of convolutional neural networks (CNNs) for the extraction and categorization of these expressions. Over the past decade, CNNs have emerged as a significant area of research in human-computer interaction, surpassing previous methodologies with their superior feature learning capabilities. While current models demonstrate exceptional accuracy in recognizing facial expressions within controlled laboratory datasets, their performance significantly diminishes when applied to real-time, uncontrolled datasets. Challenges such as degraded image quality, occlusions, variable lighting, and alterations in head pose are commonly encountered in images sourced from unstructured environments like the internet. This study aims to enhance the recognition accuracy of FER by employing deep learning techniques to process images captured in real-time, particularly those of lower resolution. The objective is to augment the accuracy of FER in real-world datasets, which are inherently more complex and collected under less controlled conditions, compared to laboratory-collected data. The effectiveness of a deep learning-based approach to emotion detection in photographs is rigorously evaluated in this work. The proposed method is exhaustively compared with manual techniques and other existing approaches to assess its efficacy. This comparison forms the foundation for a subjective evaluation methodology, focusing on validation and end-user satisfaction. The findings conclusively demonstrate the method's proficiency in accurately recognizing emotions in both laboratory and real-world scenarios, thereby underscoring the potential of deep learning in the domain of facial emotion identification.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Real-Time Facial Expression Recognition Using Deep Learning</dc:title>
    <dc:creator>hafiz burhan ul haq</dc:creator>
    <dc:creator>waseem akram</dc:creator>
    <dc:creator>muhammad nauman irshad</dc:creator>
    <dc:creator>amna kosar</dc:creator>
    <dc:creator>muhammad abid</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030103</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-24-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-24-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>24</prism:startingPage>
    <prism:doi>10.56578/ataiml030103</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030102">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 1, Pages undefined: Adaptive Lane Keeping Assistance System with Integrated Driver Intent and Lane Departure Warning</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030102</link>
    <description>The development of an adaptive Lane Keeping Assistance System (LKAS) is presented, focusing on enhancing vehicular lateral stability and alleviating driver workload. Traditional LKAS with static parameters struggle to accommodate varying driver behaviors. Addressing this challenge, the proposed system integrates adaptive driver characteristics, aligning with individual driving habits and intentions. A novel lane departure decision model is introduced, employing time-space domain fusion to effectively discern driver's lane change intentions, thus informing system decisions. Further innovation is realized through the application of reinforcement learning theory, culminating in the creation of a master controller for lane departure intervention. This controller dynamically adjusts to driver behavior, optimizing lane keeping accuracy. Extensive simulations, coupled with hardware-in-the-loop experiments using a driving simulator, substantiate the system's efficacy, demonstrating marked improvements in lane keeping precision. These advancements position the system as a significant contribution to the field of driver assistance technologies.</description>
    <pubDate>01-21-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The development of an adaptive Lane Keeping Assistance System (LKAS) is presented, focusing on enhancing vehicular lateral stability and alleviating driver workload. Traditional LKAS with static parameters struggle to accommodate varying driver behaviors. Addressing this challenge, the proposed system integrates adaptive driver characteristics, aligning with individual driving habits and intentions. A novel lane departure decision model is introduced, employing time-space domain fusion to effectively discern driver's lane change intentions, thus informing system decisions. Further innovation is realized through the application of reinforcement learning theory, culminating in the creation of a master controller for lane departure intervention. This controller dynamically adjusts to driver behavior, optimizing lane keeping accuracy. Extensive simulations, coupled with hardware-in-the-loop experiments using a driving simulator, substantiate the system's efficacy, demonstrating marked improvements in lane keeping precision. These advancements position the system as a significant contribution to the field of driver assistance technologies.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Adaptive Lane Keeping Assistance System with Integrated Driver Intent and Lane Departure Warning</dc:title>
    <dc:creator>haigang wei</dc:creator>
    <dc:creator>wei tong</dc:creator>
    <dc:creator>yueyong jiang</dc:creator>
    <dc:creator>jianlu li</dc:creator>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030102</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-21-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-21-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>11</prism:startingPage>
    <prism:doi>10.56578/ataiml030102</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030101">
    <title>Acadlore Transactions on AI and Machine Learning, 2024, Volume 3, Issue 1, Pages undefined: Enhanced Pest and Disease Detection in Agriculture Using Deep Learning-Enabled Drones</title>
    <link>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030101</link>
    <description>In this study, an integrated pest and disease recognition system for agricultural drones has been developed, leveraging deep learning technologies to significantly improve the accuracy and efficiency of pest and disease detection in agricultural settings. By employing convolutional neural networks (CNN) in conjunction with high-definition image acquisition and wireless data transmission, the system demonstrates proficiency in the effective identification and classification of various agricultural pests and diseases. Methodologically, a deep learning framework has been innovatively applied, incorporating critical modules such as image acquisition, data transmission, and pest and disease identification. This comprehensive approach facilitates rapid and precise classification of agricultural pests and diseases, while catering to the needs of remote operation and real-time data processing, thus ensuring both system efficiency and data security. Comparative analyses reveal that this system offers a notable enhancement in both accuracy and response time for pest and disease recognition, surpassing traditional detection methods and optimizing the management of agricultural pests and diseases. The significant contribution of this research is the successful integration of deep learning into the domain of agricultural pest and disease detection, marking a new era in smart agriculture technology. The findings of this study bear substantial theoretical and practical implications, advancing precision agriculture practices and contributing to the sustainability and efficiency of agricultural production.</description>
    <pubDate>01-11-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In this study, an integrated pest and disease recognition system for agricultural drones has been developed, leveraging deep learning technologies to significantly improve the accuracy and efficiency of pest and disease detection in agricultural settings. By employing convolutional neural networks (CNN) in conjunction with high-definition image acquisition and wireless data transmission, the system demonstrates proficiency in the effective identification and classification of various agricultural pests and diseases. Methodologically, a deep learning framework has been innovatively applied, incorporating critical modules such as image acquisition, data transmission, and pest and disease identification. This comprehensive approach facilitates rapid and precise classification of agricultural pests and diseases, while catering to the needs of remote operation and real-time data processing, thus ensuring both system efficiency and data security. Comparative analyses reveal that this system offers a notable enhancement in both accuracy and response time for pest and disease recognition, surpassing traditional detection methods and optimizing the management of agricultural pests and diseases. The significant contribution of this research is the successful integration of deep learning into the domain of agricultural pest and disease detection, marking a new era in smart agriculture technology. The findings of this study bear substantial theoretical and practical implications, advancing precision agriculture practices and contributing to the sustainability and efficiency of agricultural production.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Pest and Disease Detection in Agriculture Using Deep Learning-Enabled Drones</dc:title>
    <dc:creator>wenqi li</dc:creator>
    <dc:creator>xixi han</dc:creator>
    <dc:creator>zhibo lin</dc:creator>
    <dc:creator>atta-ur rahman</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml030101</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>01-11-2024</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>01-11-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ataiml030101</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2024_3_1/ataiml030101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020405">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 4, Pages undefined: DWT-Based Digital Watermarking for Various Attacks</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020405</link>
    <description>In the domain of intellectual property protection, the embedding of digital watermarks has emerged as a pivotal technique for the assertion of copyright, the conveyance of confidential messages, and the endorsement of authenticity within digital media. This research delineates the implementation of a non-blind watermarking algorithm, utilizing alpha blending facilitated by discrete wavelet transform (DWT) to embed watermarks into genuine images. Thereafter, an extraction process, constituting the inverse of embedding, retrieves these watermarks. The robustness of the embedded watermark against prevalent manipulative attacks, specifically median filter, salt and pepper (SAP) noise, Gaussian noise, speckle noise, and rotation, is rigorously evaluated. The performance of the DWT-based watermarking is quantified using the peak signal-to-noise ratio (PSNR), an objective metric reflecting fidelity. It is ascertained that the watermark remains tenaciously intact under such adversarial conditions, underscoring the proposed method's suitability for applications in digital image security and copyright verification.</description>
    <pubDate>11-15-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the domain of intellectual property protection, the embedding of digital watermarks has emerged as a pivotal technique for the assertion of copyright, the conveyance of confidential messages, and the endorsement of authenticity within digital media. This research delineates the implementation of a non-blind watermarking algorithm, utilizing alpha blending facilitated by discrete wavelet transform (DWT) to embed watermarks into genuine images. Thereafter, an extraction process, constituting the inverse of embedding, retrieves these watermarks. The robustness of the embedded watermark against prevalent manipulative attacks, specifically median filter, salt and pepper (SAP) noise, Gaussian noise, speckle noise, and rotation, is rigorously evaluated. The performance of the DWT-based watermarking is quantified using the peak signal-to-noise ratio (PSNR), an objective metric reflecting fidelity. It is ascertained that the watermark remains tenaciously intact under such adversarial conditions, underscoring the proposed method's suitability for applications in digital image security and copyright verification.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>DWT-Based Digital Watermarking for Various Attacks</dc:title>
    <dc:creator>nail alaoui</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020405</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-15-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-15-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>226</prism:startingPage>
    <prism:doi>10.56578/ataiml020405</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020404">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 4, Pages undefined: Predictive Modelling of Employee Attrition Using Deep Learning</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020404</link>
    <description>This investigation delineates an optimised predictive model for employee attrition within a substantial workforce, identifying pertinent models tailored to the specific context of employee and organisational variables. The selection and refinement of the appropriate predictive model serve as cornerstones for enhancements and updates, which are integral to honing the model's precision in prognosticating potential departures. Through meticulous optimisation, the model demonstrates proficiency in pinpointing the pivotal factors contributing to employee turnover and elucidating the interdependencies among salient variables. A suite of 27 general and eight critical variables were scrutinised. Pertinent correlations were unearthed, notably between monthly income and job satisfaction, home-to-work distance and job satisfaction, as well as age with both job satisfaction and performance metrics. Drawing from prior studies in analogous domains, a three-stage analytical methodology encompassing data exploration, model selection, and implementation was employed. The rigorous training of the optimised model encompassed both attrition factors and variable correlations, culminating in predictive outcomes with a precision of 90% and an accuracy of 87%. Implementing the refined model projected that 113 out of 709 employees, equating to 15.93%, were at a heightened risk of exiting the organisation. This quantitative foresight equips stakeholders with a strategic tool for preemptive interventions to mitigate turnover and sustain organisational vitality.</description>
    <pubDate>11-14-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This investigation delineates an optimised predictive model for employee attrition within a substantial workforce, identifying pertinent models tailored to the specific context of employee and organisational variables. The selection and refinement of the appropriate predictive model serve as cornerstones for enhancements and updates, which are integral to honing the model's precision in prognosticating potential departures. Through meticulous optimisation, the model demonstrates proficiency in pinpointing the pivotal factors contributing to employee turnover and elucidating the interdependencies among salient variables. A suite of 27 general and eight critical variables were scrutinised. Pertinent correlations were unearthed, notably between monthly income and job satisfaction, home-to-work distance and job satisfaction, as well as age with both job satisfaction and performance metrics. Drawing from prior studies in analogous domains, a three-stage analytical methodology encompassing data exploration, model selection, and implementation was employed. The rigorous training of the optimised model encompassed both attrition factors and variable correlations, culminating in predictive outcomes with a precision of 90% and an accuracy of 87%. Implementing the refined model projected that 113 out of 709 employees, equating to 15.93%, were at a heightened risk of exiting the organisation. This quantitative foresight equips stakeholders with a strategic tool for preemptive interventions to mitigate turnover and sustain organisational vitality.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Predictive Modelling of Employee Attrition Using Deep Learning</dc:title>
    <dc:creator>dino michael quinteros</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020404</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-14-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-14-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>212</prism:startingPage>
    <prism:doi>10.56578/ataiml020404</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020403">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 4, Pages undefined: Comparative Analysis of Mortality Predictions from Lassa Fever in Nigeria: A Study Using Count Regression and Machine Learning Methods</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020403</link>
    <description>In Sub-Saharan Africa, particularly in Nigeria, Lassa fever poses a significant infectious disease threat. This investigation employed count regression and machine learning techniques to model mortality rates associated with confirmed Lassa fever cases. Utilizing weekly data from January 7, 2018, to April 2, 2023, provided by the Nigeria Centre for Disease Control (NCDC), an analytical comparison between these methods was conducted. Overdispersion was indicated (p</description>
    <pubDate>11-12-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In Sub-Saharan Africa, particularly in Nigeria, Lassa fever poses a significant infectious disease threat. This investigation employed count regression and machine learning techniques to model mortality rates associated with confirmed Lassa fever cases. Utilizing weekly data from January 7, 2018, to April 2, 2023, provided by the Nigeria Centre for Disease Control (NCDC), an analytical comparison between these methods was conducted. Overdispersion was indicated (p&lt;0.01), prompting the exclusive use of negative binomial and generalized negative binomial regression models. Machine learning algorithms, specifically medium Gaussian support vector machine (MGSVM), ensemble boosted trees, ensemble bagged trees, and exponential Gaussian Process Regression (GPR), were applied, with 80% of the data allocated for training and the remaining 20% for testing. The efficacy of these methods was evaluated using the coefficients of determination (R²) and the root mean square error (RMSE). Descriptive statistics revealed a total of 30,461 confirmed cases, 4,745 suspected cases, and 772 confirmed fatalities attributable to Lassa fever during the study period. The negative binomial regression model demonstrated superior performance (R²=0.1864, RMSE=4.33) relative to the generalized negative binomial model (R²=0.1915, RMSE=18.2425). However, machine learning algorithms surpassed the count regression models in predictive capability, with ensemble boosted trees emerging as the most effective (R²=0.85, RMSE=1.5994). Analysis also identified the number of confirmed cases as having a significant positive correlation with mortality rates (r=0.885, p&lt;0.01). The findings underscore the importance of promoting community hygiene practices, such as preventing rodent intrusion and securing food storage, to mitigate the transmission and consequent fatalities of Lassa fever.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Comparative Analysis of Mortality Predictions from Lassa Fever in Nigeria: A Study Using Count Regression and Machine Learning Methods</dc:title>
    <dc:creator>timothy kayode samson</dc:creator>
    <dc:creator>tosin akingbade</dc:creator>
    <dc:creator>jesutomi orija</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020403</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-12-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-12-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>204</prism:startingPage>
    <prism:doi>10.56578/ataiml020403</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020402">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 4, Pages undefined: Automated Identification of Insect Pests: A Deep Transfer Learning Approach Using ResNet</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020402</link>
    <description>In the realm of agriculture, crop yields of fundamental cereals such as rice, wheat, maize, soybeans, and sugarcane are adversely impacted by insect pest invasions, leading to significant reductions in agricultural output. Traditional manual identification of these pests is labor-intensive and time-consuming, underscoring the necessity for an automated early detection and classification system. Recent advancements in machine learning, particularly deep learning, have provided robust methodologies for the classification and detection of a diverse array of insect infestations in crop fields. However, inaccuracies in pest classification could inadvertently precipitate the use of inappropriate pesticides, further endangering both agricultural yields and the surrounding ecosystems. In light of this, the efficacy of nine distinct pre-trained deep learning algorithms was evaluated to discern their capability in the accurate detection and classification of insect pests. This assessment utilized two prevalent datasets, comprising ten pest classes of varied sizes. Among the transfer learning techniques scrutinized, adaptations of ResNet-50 and ResNet-101 were deployed. It was observed that ResNet-50, when employed in a transfer learning paradigm, achieved an exemplary classification accuracy of 99.40% in the detection of agricultural pests. Such a high level of precision represents a significant advancement in the field of precision agriculture.</description>
    <pubDate>11-12-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of agriculture, crop yields of fundamental cereals such as rice, wheat, maize, soybeans, and sugarcane are adversely impacted by insect pest invasions, leading to significant reductions in agricultural output. Traditional manual identification of these pests is labor-intensive and time-consuming, underscoring the necessity for an automated early detection and classification system. Recent advancements in machine learning, particularly deep learning, have provided robust methodologies for the classification and detection of a diverse array of insect infestations in crop fields. However, inaccuracies in pest classification could inadvertently precipitate the use of inappropriate pesticides, further endangering both agricultural yields and the surrounding ecosystems. In light of this, the efficacy of nine distinct pre-trained deep learning algorithms was evaluated to discern their capability in the accurate detection and classification of insect pests. This assessment utilized two prevalent datasets, comprising ten pest classes of varied sizes. Among the transfer learning techniques scrutinized, adaptations of ResNet-50 and ResNet-101 were deployed. It was observed that ResNet-50, when employed in a transfer learning paradigm, achieved an exemplary classification accuracy of 99.40% in the detection of agricultural pests. Such a high level of precision represents a significant advancement in the field of precision agriculture.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Automated Identification of Insect Pests: A Deep Transfer Learning Approach Using ResNet</dc:title>
    <dc:creator>christine dewi</dc:creator>
    <dc:creator>henoch juli christanto</dc:creator>
    <dc:creator>guowei dai</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020402</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-12-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-12-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>194</prism:startingPage>
    <prism:doi>10.56578/ataiml020402</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020401">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 4, Pages undefined: Augmenting Diabetic Retinopathy Severity Prediction with a Dual-Level Deep Learning Approach Utilizing Customized MobileNet Feature Embeddings</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020401</link>
    <description>Diabetic retinopathy, a severe ocular disease correlated with elevated blood glucose levels in diabetic patients, carries a significant risk of visual impairment. The essentiality of its timely and precise severity classification is underscored for effective therapeutic intervention. Deep learning methodologies have been shown to yield encouraging results in the detection and categorisation of severity levels of diabetic retinopathy. This study proposes a dual-level approach, wherein the MobileNetV2 model is modified for a regression task, predicting retinopathy severity levels and subsequently fine-tuned on fundus images. The refined MobileNetV2 model is then utilised for learning feature embeddings, and a Support Vector Machine (SVM) classifier is trained for grading retinopathy severity. Upon implementation, this dual-level approach demonstrated remarkable performance, achieving an accuracy rate of 87% and a kappa value of 93.76% when evaluated on the APTOS19 benchmark dataset. Additionally, the efficacy of data augmentation and the handling of class imbalance issues were explored. These findings suggest that the novel dual-level approach provides an efficient and highly effective solution for the detection and classification of diabetic retinopathy severity levels.</description>
    <pubDate>10-11-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Diabetic retinopathy, a severe ocular disease correlated with elevated blood glucose levels in diabetic patients, carries a significant risk of visual impairment. The essentiality of its timely and precise severity classification is underscored for effective therapeutic intervention. Deep learning methodologies have been shown to yield encouraging results in the detection and categorisation of severity levels of diabetic retinopathy. This study proposes a dual-level approach, wherein the MobileNetV2 model is modified for a regression task, predicting retinopathy severity levels and subsequently fine-tuned on fundus images. The refined MobileNetV2 model is then utilised for learning feature embeddings, and a Support Vector Machine (SVM) classifier is trained for grading retinopathy severity. Upon implementation, this dual-level approach demonstrated remarkable performance, achieving an accuracy rate of 87% and a kappa value of 93.76% when evaluated on the APTOS19 benchmark dataset. Additionally, the efficacy of data augmentation and the handling of class imbalance issues were explored. These findings suggest that the novel dual-level approach provides an efficient and highly effective solution for the detection and classification of diabetic retinopathy severity levels.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Augmenting Diabetic Retinopathy Severity Prediction with a Dual-Level Deep Learning Approach Utilizing Customized MobileNet Feature Embeddings</dc:title>
    <dc:creator>jyostna devi bodapati</dc:creator>
    <dc:creator>rajasekhar konda</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020401</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>10-11-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>10-11-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>182</prism:startingPage>
    <prism:doi>10.56578/ataiml020401</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_4/ataiml020401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020305">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 3, Pages undefined: Convolutional Neural Network-Assisted Scattering Inversion in Diverse Noise Environments</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020305</link>
    <description>In addressing the challenge of obstacle scattering inversion amidst intricate noise conditions, a model predicated on convolutional neural networks (CNN) has been proposed, demonstrating high precision. Five distinct noise scenarios, encompassing Gaussian white noise, uniform distribution noise, Poisson distribution noise, Laplace noise, and impulse noise, were evaluated. Far-field data paired with the Fourier coefficients of obstacle boundary curves were employed as network input and output, respectively. Through the convolutional processes inherent to the CNN, salient features within the far-field data related to obstacles were adeptly identified. Concurrently, the statistical characteristics of the noise were assimilated, and its perturbing effects were diminished, thus facilitating the inversion of obstacle shape parameters. The intrinsic capacity of CNNs to intuitively learn and differentiate salient features from data eradicates the necessity for external intervention or manually designed feature extractors. This adaptability confers upon CNNs a significant edge in tackling obstacle scattering inversion challenges, particularly in light of fluctuating data distributions and feature variability. Numerical experiments have substantiated that the aforementioned CNN model excels in addressing scattering inversion complications within multifaceted noise conditions, consistently delivering solutions with remarkable precision.</description>
    <pubDate>09-25-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In addressing the challenge of obstacle scattering inversion amidst intricate noise conditions, a model predicated on convolutional neural networks (CNN) has been proposed, demonstrating high precision. Five distinct noise scenarios, encompassing Gaussian white noise, uniform distribution noise, Poisson distribution noise, Laplace noise, and impulse noise, were evaluated. Far-field data paired with the Fourier coefficients of obstacle boundary curves were employed as network input and output, respectively. Through the convolutional processes inherent to the CNN, salient features within the far-field data related to obstacles were adeptly identified. Concurrently, the statistical characteristics of the noise were assimilated, and its perturbing effects were diminished, thus facilitating the inversion of obstacle shape parameters. The intrinsic capacity of CNNs to intuitively learn and differentiate salient features from data eradicates the necessity for external intervention or manually designed feature extractors. This adaptability confers upon CNNs a significant edge in tackling obstacle scattering inversion challenges, particularly in light of fluctuating data distributions and feature variability. Numerical experiments have substantiated that the aforementioned CNN model excels in addressing scattering inversion complications within multifaceted noise conditions, consistently delivering solutions with remarkable precision.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Convolutional Neural Network-Assisted Scattering Inversion in Diverse Noise Environments</dc:title>
    <dc:creator>jiabao zhuang</dc:creator>
    <dc:creator>pinchao meng</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020305</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-25-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-25-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>170</prism:startingPage>
    <prism:doi>10.56578/ataiml020305</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020304">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 3, Pages undefined: Multi-Variable Time Series Decoding with Long Short-Term Memory and Mixture Attention</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020304</link>
    <description>The task of interpreting multi-variable time series data, while also forecasting outcomes accurately, is an ongoing challenge within the machine learning domain. This study presents an advanced method of utilizing Long Short-Term Memory (LSTM) recurrent neural networks in the analysis of such data, with specific attention to both target and exogenous variables. The novel approach aims to extract hidden states that are unique to individual variables, thereby capturing the distinctive dynamics inherent in multi-variable time series and allowing the elucidation of each variable's contribution to predictive outcomes. A pioneering mixture attention mechanism is introduced, which, by leveraging the aforementioned variable-specific hidden states, characterizes the generative process of the target variable. The study further enhances this methodology by formulating associated training techniques that permit concurrent learning of network parameters, variable interactions, and temporal significance with respect to the target prediction. The effectiveness of this approach is empirically validated through rigorous experimentation on three real-world datasets, including the 2022 closing prices of three major stocks - Apple (AAPL), Amazon (AMZN), and Microsoft (MSFT). The results demonstrated superior predictive performance, attributable to the successful encapsulation of the diverse dynamics of different variables. Furthermore, the study provides a comprehensive evaluation of the interpretability outcomes, both qualitatively and quantitatively. The presented framework thus holds substantial promise as a comprehensive solution that not only enhances prediction accuracy but also aids in the extraction of valuable insights from complex multi-variable datasets.</description>
    <pubDate>09-21-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The task of interpreting multi-variable time series data, while also forecasting outcomes accurately, is an ongoing challenge within the machine learning domain. This study presents an advanced method of utilizing Long Short-Term Memory (LSTM) recurrent neural networks in the analysis of such data, with specific attention to both target and exogenous variables. The novel approach aims to extract hidden states that are unique to individual variables, thereby capturing the distinctive dynamics inherent in multi-variable time series and allowing the elucidation of each variable's contribution to predictive outcomes. A pioneering mixture attention mechanism is introduced, which, by leveraging the aforementioned variable-specific hidden states, characterizes the generative process of the target variable. The study further enhances this methodology by formulating associated training techniques that permit concurrent learning of network parameters, variable interactions, and temporal significance with respect to the target prediction. The effectiveness of this approach is empirically validated through rigorous experimentation on three real-world datasets, including the 2022 closing prices of three major stocks - Apple (AAPL), Amazon (AMZN), and Microsoft (MSFT). The results demonstrated superior predictive performance, attributable to the successful encapsulation of the diverse dynamics of different variables. Furthermore, the study provides a comprehensive evaluation of the interpretability outcomes, both qualitatively and quantitatively. The presented framework thus holds substantial promise as a comprehensive solution that not only enhances prediction accuracy but also aids in the extraction of valuable insights from complex multi-variable datasets.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Multi-Variable Time Series Decoding with Long Short-Term Memory and Mixture Attention</dc:title>
    <dc:creator>soukaina seddik</dc:creator>
    <dc:creator>hayat routaib</dc:creator>
    <dc:creator>anass elhaddadi</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020304</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-21-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-21-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>154</prism:startingPage>
    <prism:doi>10.56578/ataiml020304</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020303">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 3, Pages undefined: An Efficient Descriptor-Based Approach for Dominant Point Detection in Shape Contours</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020303</link>
    <description>Dominant points, or control points, represent areas of high curvature on shape contours and are extensively utilized in the representation of shape outlines. Herein, we introduce a novel, descriptor-based approach for the efficient detection of these pivotal points. Each point on a shape contour is evaluated and mapped to an invariant descriptor set, accomplished through the use of point-neighborhood. These descriptors are then harnessed to discern whether a point qualifies as a dominant one. Our proposed methodology eliminates the need for costly computations typically associated with evaluating candidate dominant points. Furthermore, our algorithm significantly outperforms its predecessors in terms of speed, relying solely on integer operations and obviating the necessity for an optimization phase. Experimental outcomes, derived from the widely used MPEG7_CE-Shape-1_Part_B, denote a minimum enhancement of 2.3 times in terms of running time. This implies that the proposed methodology is particularly suitable for real-time applications or scenarios managing shapes comprising a substantial number of points.</description>
    <pubDate>09-20-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Dominant points, or control points, represent areas of high curvature on shape contours and are extensively utilized in the representation of shape outlines. Herein, we introduce a novel, descriptor-based approach for the efficient detection of these pivotal points. Each point on a shape contour is evaluated and mapped to an invariant descriptor set, accomplished through the use of point-neighborhood. These descriptors are then harnessed to discern whether a point qualifies as a dominant one. Our proposed methodology eliminates the need for costly computations typically associated with evaluating candidate dominant points. Furthermore, our algorithm significantly outperforms its predecessors in terms of speed, relying solely on integer operations and obviating the necessity for an optimization phase. Experimental outcomes, derived from the widely used MPEG7_CE-Shape-1_Part_B, denote a minimum enhancement of 2.3 times in terms of running time. This implies that the proposed methodology is particularly suitable for real-time applications or scenarios managing shapes comprising a substantial number of points.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>An Efficient Descriptor-Based Approach for Dominant Point Detection in Shape Contours</dc:title>
    <dc:creator>mohammad t. parvez</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020303</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>09-20-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>09-20-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>142</prism:startingPage>
    <prism:doi>10.56578/ataiml020303</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020302">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 3, Pages undefined: A Comparative Study on AI-Based Algorithms for Cost Prediction in Pharmaceutical Transport Logistics</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020302</link>
    <description>Pharmaceutical transport logistics, especially in humanitarian and hospital contexts, is becoming increasingly essential with a growing need to monitor associated costs. In Morocco, however, studies focusing on the cost implications of pharmaceutical delivery conditions are conspicuously absent. This creates a high-dimensional classification framework, where the selection of variables becomes challenging in the face of correlated distribution predictors. The integration of Artificial Intelligence (AI) in cost prediction has emerged as a vital necessity amidst escalating complexities and cost considerations. Cost prediction, being inherently correlated with almost all variables and inputs, offers an interpretable value in performance management, financial planning, and contract negotiation. This study undertakes a comparative analysis of a broad spectrum of prediction algorithms applied to the same, albeit reduced, database. A dozen such algorithms are put into practical use, with variable selection implemented through importance measures. The primary objective of this comparative evaluation is to determine the superior performing algorithm — one that delivers optimal adaptation to the context within a fixed environment. The prediction algorithm incorporates a myriad of inputs and constraints derived from data collection systems. AI's application facilitates the inclusion of diverse variables such as transportation routes, congestion, distances, freight weight, and environmental factors, thereby enhancing the accuracy and efficiency of cost estimation. The Orthogonal Matching Pursuit model emerged as the most successful, boasting an R² value nearing unity. Accurate cost prediction in transport can yield valuable insights into budgeting, estimation, customer service, managerial risk, environmental considerations, and strategic deployment for a company. Improved decision-making and resource allocation can thereby be achieved, leading to enhanced profitability and sustainability.</description>
    <pubDate>08-31-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Pharmaceutical transport logistics, especially in humanitarian and hospital contexts, is becoming increasingly essential with a growing need to monitor associated costs. In Morocco, however, studies focusing on the cost implications of pharmaceutical delivery conditions are conspicuously absent. This creates a high-dimensional classification framework, where the selection of variables becomes challenging in the face of correlated distribution predictors. The integration of Artificial Intelligence (AI) in cost prediction has emerged as a vital necessity amidst escalating complexities and cost considerations. Cost prediction, being inherently correlated with almost all variables and inputs, offers an interpretable value in performance management, financial planning, and contract negotiation. This study undertakes a comparative analysis of a broad spectrum of prediction algorithms applied to the same, albeit reduced, database. A dozen such algorithms are put into practical use, with variable selection implemented through importance measures. The primary objective of this comparative evaluation is to determine the superior performing algorithm — one that delivers optimal adaptation to the context within a fixed environment. The prediction algorithm incorporates a myriad of inputs and constraints derived from data collection systems. AI's application facilitates the inclusion of diverse variables such as transportation routes, congestion, distances, freight weight, and environmental factors, thereby enhancing the accuracy and efficiency of cost estimation. The Orthogonal Matching Pursuit model emerged as the most successful, boasting an R² value nearing unity. Accurate cost prediction in transport can yield valuable insights into budgeting, estimation, customer service, managerial risk, environmental considerations, and strategic deployment for a company. Improved decision-making and resource allocation can thereby be achieved, leading to enhanced profitability and sustainability.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Comparative Study on AI-Based Algorithms for Cost Prediction in Pharmaceutical Transport Logistics</dc:title>
    <dc:creator>fadwa farchi</dc:creator>
    <dc:creator>chayma farchi</dc:creator>
    <dc:creator>badr touzi</dc:creator>
    <dc:creator>charif mabrouki</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020302</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>08-31-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>08-31-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>129</prism:startingPage>
    <prism:doi>10.56578/ataiml020302</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020301">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 3, Pages undefined: Advanced Hybrid Segmentation Model Leveraging AlexNet Architecture for Enhanced Liver Cancer Detection</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020301</link>
    <description>Liver cancer, one of the rapidly escalating forms of cancer, remains a principal cause of mortality globally. Its death rates can be attenuated through vigilant monitoring and early detection. This study aims to develop a sophisticated model to assist medical professionals in the classification of liver tumours using biopsy tissue images, thereby facilitating preliminary diagnosis.The study presents a novel, bio-inspired deep learning strategy purposed for augmenting liver cancer detection. The uniqueness of this approach rests in its two-fold contribution: Firstly, an innovative hybrid segmentation technique, integrating the SegNet network, UNet network, and Al-Biruni Earth Radius (BER) procedure, is introduced to extract liver lesions from Computed Tomography (CT) images. The algorithm initially applies the SegNet to isolate the liver from the abdominal image in a CT scan. Since hyperparameters significantly influence segmentation performance, the BER algorithm is hybridized with each network for optimal tuning. The method proposed herein is inspired by the pursuit of a common objective by swarm members. Al-Biruni's methodology for calculating Earth's radius sets the search space, extending beyond local solutions that require exploration. Secondly, a pre-trained AlexNet model is utilized for diagnosis, further enhancing the method's effectiveness. The proposed segmentation and classification algorithms have been compared with contemporary state-of-the-art techniques. The results demonstrated that in terms of specificity, F1-score, accuracy, and computational time, the proposed method outperforms its competitors, indicating its potential in advancing liver cancer detection.</description>
    <pubDate>08-21-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Liver cancer, one of the rapidly escalating forms of cancer, remains a principal cause of mortality globally. Its death rates can be attenuated through vigilant monitoring and early detection. This study aims to develop a sophisticated model to assist medical professionals in the classification of liver tumours using biopsy tissue images, thereby facilitating preliminary diagnosis.The study presents a novel, bio-inspired deep learning strategy purposed for augmenting liver cancer detection. The uniqueness of this approach rests in its two-fold contribution: Firstly, an innovative hybrid segmentation technique, integrating the SegNet network, UNet network, and Al-Biruni Earth Radius (BER) procedure, is introduced to extract liver lesions from Computed Tomography (CT) images. The algorithm initially applies the SegNet to isolate the liver from the abdominal image in a CT scan. Since hyperparameters significantly influence segmentation performance, the BER algorithm is hybridized with each network for optimal tuning. The method proposed herein is inspired by the pursuit of a common objective by swarm members. Al-Biruni's methodology for calculating Earth's radius sets the search space, extending beyond local solutions that require exploration. Secondly, a pre-trained AlexNet model is utilized for diagnosis, further enhancing the method's effectiveness. The proposed segmentation and classification algorithms have been compared with contemporary state-of-the-art techniques. The results demonstrated that in terms of specificity, F1-score, accuracy, and computational time, the proposed method outperforms its competitors, indicating its potential in advancing liver cancer detection.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advanced Hybrid Segmentation Model Leveraging AlexNet Architecture for Enhanced Liver Cancer Detection</dc:title>
    <dc:creator>venkata raja sekhar reddy nagireddy</dc:creator>
    <dc:creator>khaja shareef shaik</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020301</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>08-21-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>08-21-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>116</prism:startingPage>
    <prism:doi>10.56578/ataiml020301</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_3/ataiml020301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020205">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 2, Pages undefined: Artificial Intelligence in Cervical Cancer Research and Applications</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020205</link>
    <description>Cervical cancer remains a leading cause of death among females, posing a severe threat to women's health. Due to the uneven distribution of resources in different regions, there are challenges regarding physicians' experience, quantity, and medical conditions. Early screening, diagnosis, and treatment of cervical cancer still face significant obstacles. In recent years, artificial intelligence (AI) has been increasingly applied to various diseases' screening, diagnosis, and treatment. Currently, AI has many research applications in cervical cancer screening, diagnosis, treatment, and prognosis, assisting doctors and clinical experts in decision-making, improving efficiency and accuracy. This study discusses the application of AI in cervical cancer screening, including HPV typing and detection, cervical cytology screening, and colposcopy screening, as well as AI in cervical cancer diagnosis and treatment, including magnetic resonance imaging (MRI) and computed tomography (CT). Finally, the study briefly describes the current challenges faced by AI applications in cervical cancer and proposes future research directions.</description>
    <pubDate>06-13-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Cervical cancer remains a leading cause of death among females, posing a severe threat to women's health. Due to the uneven distribution of resources in different regions, there are challenges regarding physicians' experience, quantity, and medical conditions. Early screening, diagnosis, and treatment of cervical cancer still face significant obstacles. In recent years, artificial intelligence (AI) has been increasingly applied to various diseases' screening, diagnosis, and treatment. Currently, AI has many research applications in cervical cancer screening, diagnosis, treatment, and prognosis, assisting doctors and clinical experts in decision-making, improving efficiency and accuracy. This study discusses the application of AI in cervical cancer screening, including HPV typing and detection, cervical cytology screening, and colposcopy screening, as well as AI in cervical cancer diagnosis and treatment, including magnetic resonance imaging (MRI) and computed tomography (CT). Finally, the study briefly describes the current challenges faced by AI applications in cervical cancer and proposes future research directions.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Artificial Intelligence in Cervical Cancer Research and Applications</dc:title>
    <dc:creator>chunhui liu</dc:creator>
    <dc:creator>jiahui yang</dc:creator>
    <dc:creator>ying liu</dc:creator>
    <dc:creator>ying zhang</dc:creator>
    <dc:creator>shuang liu</dc:creator>
    <dc:creator>tetiana chaikovska</dc:creator>
    <dc:creator>chan liu</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020205</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-13-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-13-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>99</prism:startingPage>
    <prism:doi>10.56578/ataiml020205</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020204">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 2, Pages undefined: Enhancing Face Spoofing Attack Detection: Performance Evaluation of a VGG-19 CNN Model</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020204</link>
    <description>With the wide use of facial verification and authentication systems, the performance evaluation of Spoofing Attack Detection (SAD) module in the systems is important, because poor performance leads to successful face spoofing attacks. Previous studies on face SAD used a pretrained Visual Geometry Group (VGG) -16 architecture to extract feature maps from face images using the convolutional layers, and trained a face SAD model to classify real and fake face images, obtaining poor performance for unseen face images. Therefore, this study aimed to evaluate the performance of VGG-19 face SAD model. Experimental approach was used to build the model. VGG-19 algorithm was used to extract Red Green Blue (RGB) and deep neural network features from the face datasets. Evaluation results showed that the performance of the VGG-19 face SAD model improved by 6% compared with the state-of-the-art approaches, with the lowest equal error rate (EER) of 0.4%. In addition, the model had strong generalization ability in top-1 accuracy, threshold operation, quality test, fake face test, equal error rate, and overall test standard evaluation metrics.</description>
    <pubDate>06-12-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;With the wide use of facial verification and authentication systems, the performance evaluation of Spoofing Attack Detection (SAD) module in the systems is important, because poor performance leads to successful face spoofing attacks. Previous studies on face SAD used a pretrained Visual Geometry Group (VGG) -16 architecture to extract feature maps from face images using the convolutional layers, and trained a face SAD model to classify real and fake face images, obtaining poor performance for unseen face images. Therefore, this study aimed to evaluate the performance of VGG-19 face SAD model. Experimental approach was used to build the model. VGG-19 algorithm was used to extract Red Green Blue (RGB) and deep neural network features from the face datasets. Evaluation results showed that the performance of the VGG-19 face SAD model improved by 6% compared with the state-of-the-art approaches, with the lowest equal error rate (EER) of 0.4%. In addition, the model had strong generalization ability in top-1 accuracy, threshold operation, quality test, fake face test, equal error rate, and overall test standard evaluation metrics.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing Face Spoofing Attack Detection: Performance Evaluation of a VGG-19 CNN Model</dc:title>
    <dc:creator>thomas ayanwola</dc:creator>
    <dc:creator>awodele oludele</dc:creator>
    <dc:creator>michael agbaje</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020204</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-12-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-12-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>84</prism:startingPage>
    <prism:doi>10.56578/ataiml020204</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020203">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 2, Pages undefined: Detecting False Data Injection Attacks in Industrial Internet of Things Using an Optimized Bidirectional Gated Recurrent Unit-Swarm Optimization Algorithm Model</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020203</link>
    <description>The rapid adoption of the Industrial Internet of Things (IIoT) paradigm has left systems vulnerable due to insufficient security measures. False data injection attacks (FDIAs) present a significant security concern in IIoT, as they aim to deceive industrial platforms by manipulating sensor readings. Traditional threat detection methods have proven inadequate in addressing FDIAs, and most existing countermeasures overlook the necessity of validating data, particularly in the context of data clustering services. To address this issue, this study proposes an innovative approach for FDIA detection using an optimized bidirectional gated recurrent unit (BiGRU) model, with the Sailfish Optimization Algorithm (SOA) employed to select optimal weights. The proposed model exploits temporal and spatial correlations in sensor data to identify fabricated information and subsequently cleanse the affected data. Evaluation results demonstrate the effectiveness of the proposed method in detecting FDIAs, outperforming state-of-the-art techniques in the same task. Furthermore, the data cleaning process showcased the ability to recover damaged or corrupted data, providing an additional advantage.</description>
    <pubDate>06-05-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The rapid adoption of the Industrial Internet of Things (IIoT) paradigm has left systems vulnerable due to insufficient security measures. False data injection attacks (FDIAs) present a significant security concern in IIoT, as they aim to deceive industrial platforms by manipulating sensor readings. Traditional threat detection methods have proven inadequate in addressing FDIAs, and most existing countermeasures overlook the necessity of validating data, particularly in the context of data clustering services. To address this issue, this study proposes an innovative approach for FDIA detection using an optimized bidirectional gated recurrent unit (BiGRU) model, with the Sailfish Optimization Algorithm (SOA) employed to select optimal weights. The proposed model exploits temporal and spatial correlations in sensor data to identify fabricated information and subsequently cleanse the affected data. Evaluation results demonstrate the effectiveness of the proposed method in detecting FDIAs, outperforming state-of-the-art techniques in the same task. Furthermore, the data cleaning process showcased the ability to recover damaged or corrupted data, providing an additional advantage.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Detecting False Data Injection Attacks in Industrial Internet of Things Using an Optimized Bidirectional Gated Recurrent Unit-Swarm Optimization Algorithm Model</dc:title>
    <dc:creator>nadella sree divya</dc:creator>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020203</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-05-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-05-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>75</prism:startingPage>
    <prism:doi>10.56578/ataiml020203</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020202">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 2, Pages undefined: Diagnosis of Chronic Kidney Disease Based on CNN and LSTM</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020202</link>
    <description>Kidney plays an extremely important role in human health, and one of its important tasks is to purify the blood from toxic substances. Chronic Kidney Disease (CKD) means that kidney begins to lose its function gradually and show some symptoms, such as fatigue, weakness, nausea, vomiting, and frequent urination. Early diagnosis and treatment increase the likelihood of recovery from the disease. Due to high classification performance, artificial intelligence techniques have been widely used to classify disease data in the last ten years. In this study, a hybrid model based on Convolutional Neural Network (CNN) and Long Short-Term Memory (LSTM) was proposed using a two-class data set, which automatically classified CKD. This dataset consisted of thirteen features and one output. If the features showed, CKD was diagnosed. Compared with many well-known machine learning methods, the proposed CNN-LSTM based model obtained a classification accuracy of 99.17%.</description>
    <pubDate>06-05-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Kidney plays an extremely important role in human health, and one of its important tasks is to purify the blood from toxic substances. Chronic Kidney Disease (CKD) means that kidney begins to lose its function gradually and show some symptoms, such as fatigue, weakness, nausea, vomiting, and frequent urination. Early diagnosis and treatment increase the likelihood of recovery from the disease. Due to high classification performance, artificial intelligence techniques have been widely used to classify disease data in the last ten years. In this study, a hybrid model based on Convolutional Neural Network (CNN) and Long Short-Term Memory (LSTM) was proposed using a two-class data set, which automatically classified CKD. This dataset consisted of thirteen features and one output. If the features showed, CKD was diagnosed. Compared with many well-known machine learning methods, the proposed CNN-LSTM based model obtained a classification accuracy of 99.17%.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Diagnosis of Chronic Kidney Disease Based on CNN and LSTM</dc:title>
    <dc:creator>elif nur yildiz</dc:creator>
    <dc:creator>emine cengil</dc:creator>
    <dc:creator>muhammed yildirim</dc:creator>
    <dc:creator>harun bingol</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020202</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-05-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-05-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>66</prism:startingPage>
    <prism:doi>10.56578/ataiml020202</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020201">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 2, Pages undefined: Hierarchical Aggregate Assessment of Multi-Level Teams Using Competency Ontologies</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020201</link>
    <description>It is complex to assess multi-level hierarchical teams, because the solution needs to organize their rapid dynamic adaptation to perform operational tasks, and train team members without sufficient competencies, skills and experience. Assessment also reveals the strengths and weaknesses of the whole team and each team member, which provides opportunities for their further growth in the future. Assessment of the work of teams needs external knowledge and processing methods. Therefore, this study proposed to use ontological approach to improve the assessment of multi-level hierarchical teams, because ontology integrated domain knowledge with relevant competencies of positions and levels in the hierarchical teams. Information on competencies of applicants was acquired in the portfolio analysis. After subdividing the hierarchical teams, appropriate ontologies and Web-services were used to obtain assessment results and competence improvement recommendations for the teams at various sublevels. The step-by-step team assessment method was described, which used elements of semantic similarity between different information objects to match applicants and equipment with team positions. This method could be used as a component of integrated multi-criteria decision-making and was targeted at specific cases of user tasks. The set of assessment criteria was pre-determined by tasks, and built based on domain knowledge. However, particular criterion were dynamic, and changed along with environmental at different time points.</description>
    <pubDate>06-05-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;It is complex to assess multi-level hierarchical teams, because the solution needs to organize their rapid dynamic adaptation to perform operational tasks, and train team members without sufficient competencies, skills and experience. Assessment also reveals the strengths and weaknesses of the whole team and each team member, which provides opportunities for their further growth in the future. Assessment of the work of teams needs external knowledge and processing methods. Therefore, this study proposed to use ontological approach to improve the assessment of multi-level hierarchical teams, because ontology integrated domain knowledge with relevant competencies of positions and levels in the hierarchical teams. Information on competencies of applicants was acquired in the portfolio analysis. After subdividing the hierarchical teams, appropriate ontologies and Web-services were used to obtain assessment results and competence improvement recommendations for the teams at various sublevels. The step-by-step team assessment method was described, which used elements of semantic similarity between different information objects to match applicants and equipment with team positions. This method could be used as a component of integrated multi-criteria decision-making and was targeted at specific cases of user tasks. The set of assessment criteria was pre-determined by tasks, and built based on domain knowledge. However, particular criterion were dynamic, and changed along with environmental at different time points.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Hierarchical Aggregate Assessment of Multi-Level Teams Using Competency Ontologies</dc:title>
    <dc:creator>anatoly gladun</dc:creator>
    <dc:creator>julia rogushina</dc:creator>
    <dc:creator>martin lesage</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020201</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>06-05-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>06-05-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>55</prism:startingPage>
    <prism:doi>10.56578/ataiml020201</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_2/ataiml020201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020105">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 1, Pages undefined: Information Acquisition Method of Tomato Plug Seedlings Based on Cycle-Consistent Adversarial Network</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020105</link>
    <description>In order to solve the interference caused by the overlapping and extrusion of adjacent plug seedlings, accurately obtain the information of tomato plug seedlings, and improve the transplanting effect of automatic tomato transplanters, this study proposes a seedling information acquisition method based on Cycle-Consistent Adversarial Network (CycleGAN). CycleGAN is a generative unsupervised deep learning method, which can realize the free conversion of the source-domain plug seedling image and the target-domain plug label image. It collects more than 500 images of tomato plug seedlings in different growth stages as a collection image set; follows certain principles to label the plug seedling images to obtain a label image set, and uses two image sets to train the CycleGAN network model. Finally, the trained model is used to process the images of tomato plug seedlings to obtain their label images. According to the labeling principle, the correct rate of model recognition is between 91% and 97%. The recognition results show that the CycleGAN model can recognize and judge whether the seedlings affected by the adjacent seedling holes are suitable for transplanting, so the application of this method can greatly improve the intelligence level of the automatic tomato transplanters.</description>
    <pubDate>03-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In order to solve the interference caused by the overlapping and extrusion of adjacent plug seedlings, accurately obtain the information of tomato plug seedlings, and improve the transplanting effect of automatic tomato transplanters, this study proposes a seedling information acquisition method based on Cycle-Consistent Adversarial Network (CycleGAN). CycleGAN is a generative unsupervised deep learning method, which can realize the free conversion of the source-domain plug seedling image and the target-domain plug label image. It collects more than 500 images of tomato plug seedlings in different growth stages as a collection image set; follows certain principles to label the plug seedling images to obtain a label image set, and uses two image sets to train the CycleGAN network model. Finally, the trained model is used to process the images of tomato plug seedlings to obtain their label images. According to the labeling principle, the correct rate of model recognition is between 91% and 97%. The recognition results show that the CycleGAN model can recognize and judge whether the seedlings affected by the adjacent seedling holes are suitable for transplanting, so the application of this method can greatly improve the intelligence level of the automatic tomato transplanters.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Information Acquisition Method of Tomato Plug Seedlings Based on Cycle-Consistent Adversarial Network</dc:title>
    <dc:creator>yong zhang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020105</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>46</prism:startingPage>
    <prism:doi>10.56578/ataiml020105</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020104">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 1, Pages undefined: Floor Segmentation Approach Using FCM and CNN</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020104</link>
    <description>Floor plans play an essential role in the architecture design and construction, which serves as an important communication tool between engineers, architects and clients. Automatic identification of various design elements in a floor plan image can improve work efficiency and accuracy. This paper proposed a method consists of two stages, Fuzzy C-Means (FCM) segmentation and Convolutional Neural Network (CNN) segmentation. In FCM stage, the given input image was partitioned into homogeneous regions based on similarity for merging. In CNN stage, the interactive information was introduced as markers of the object area and background area, which were input by the users to roughly indicate the position and main features of the object and background. The segmentation evaluation was measured using probabilistic rand index, variation of information, global consistency error, and boundary displacement error. Experiments were conducted on real dataset to evaluate performance of the proposed model. The experimental results revealed the proposed model was successful.</description>
    <pubDate>03-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Floor plans play an essential role in the architecture design and construction, which serves as an important communication tool between engineers, architects and clients. Automatic identification of various design elements in a floor plan image can improve work efficiency and accuracy. This paper proposed a method consists of two stages, Fuzzy C-Means (FCM) segmentation and Convolutional Neural Network (CNN) segmentation. In FCM stage, the given input image was partitioned into homogeneous regions based on similarity for merging. In CNN stage, the interactive information was introduced as markers of the object area and background area, which were input by the users to roughly indicate the position and main features of the object and background. The segmentation evaluation was measured using probabilistic rand index, variation of information, global consistency error, and boundary displacement error. Experiments were conducted on real dataset to evaluate performance of the proposed model. The experimental results revealed the proposed model was successful.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Floor Segmentation Approach Using FCM and CNN</dc:title>
    <dc:creator>kavya ravishankar</dc:creator>
    <dc:creator>puspha devaraj</dc:creator>
    <dc:creator>sharath kumar yeliyur hanumathaiah</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020104</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>33</prism:startingPage>
    <prism:doi>10.56578/ataiml020104</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020103">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 1, Pages undefined: Intelligent Diagnosis of Obstetric Diseases Using HGS-AOA Based Extreme Learning Machine</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020103</link>
    <description>This paper aimed to realize intelligent diagnosis of obstetric diseases using electronic medical records (EMRs). The Optimized Kernel Extreme Machine Learning (OKEML) technique was proposed to rebalance data. The hybrid approach of the Hunger Games Search (HGS) and the Arithmetic Optimization Algorithm (AOA) was adopted. This paper tested the effectiveness of the OKEML-HGS-AOA on Chinese Obstetric EMR (COEMR) datasets. Compared with other models, the proposed model outperformed the state-of-the-art experimental results on the COEMR, Arxiv Academic Paper Dataset (AAPD), and the Reuters Corpus Volume 1 (RCV1) datasets, with an accuracy of 88%, 90%, and 91%, respectively.</description>
    <pubDate>03-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This paper aimed to realize intelligent diagnosis of obstetric diseases using electronic medical records (EMRs). The Optimized Kernel Extreme Machine Learning (OKEML) technique was proposed to rebalance data. The hybrid approach of the Hunger Games Search (HGS) and the Arithmetic Optimization Algorithm (AOA) was adopted. This paper tested the effectiveness of the OKEML-HGS-AOA on Chinese Obstetric EMR (COEMR) datasets. Compared with other models, the proposed model outperformed the state-of-the-art experimental results on the COEMR, Arxiv Academic Paper Dataset (AAPD), and the Reuters Corpus Volume 1 (RCV1) datasets, with an accuracy of 88%, 90%, and 91%, respectively.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Intelligent Diagnosis of Obstetric Diseases Using HGS-AOA Based Extreme Learning Machine</dc:title>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:creator>vijay kumar damera</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020103</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>21</prism:startingPage>
    <prism:doi>10.56578/ataiml020103</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020102">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 1, Pages undefined: Performance Evaluation of ANN Models for Prediction</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020102</link>
    <description>One of the biggest problems that humans are faced with today is pollution and climate change. Pollution is not a new phenomenon and remains a leading cause of diseases and deaths. Mining, industrialization, exploration and urbanization caused global pollution, whose burdens are shared by developed and undeveloped countries alike. Awareness and stricter laws in the developed countries have contributed to environmental protection. Although all countries have paid attention to pollution, the impact and severity of its long-term consequences are being felt. There is a cause-and-effect link between the pollution of air, water and soil and the environment. This research aimed to prove that the main function of the philosophy of science is to have a functional understanding of knowledge, which views knowledge as a tool for prediction. Prediction is the function or mission of science or the goal that must be achieved if the scientific project is successful. In other words, prediction is the final harvest of description and interpretation. In addition, science is primarily concerned with the prediction of events that have occurred in the universe. A mature prediction is what science provides to validate scientific models. This paper introduced the concepts of using machine learning techniques to enhance the prediction process results. Pollution data set and the negative effects of polluted air data were used. We built, trained and tested various models in order to find the optimal model, which could enhance the results of the prediction process.</description>
    <pubDate>03-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;One of the biggest problems that humans are faced with today is pollution and climate change. Pollution is not a new phenomenon and remains a leading cause of diseases and deaths. Mining, industrialization, exploration and urbanization caused global pollution, whose burdens are shared by developed and undeveloped countries alike. Awareness and stricter laws in the developed countries have contributed to environmental protection. Although all countries have paid attention to pollution, the impact and severity of its long-term consequences are being felt. There is a cause-and-effect link between the pollution of air, water and soil and the environment. This research aimed to prove that the main function of the philosophy of science is to have a functional understanding of knowledge, which views knowledge as a tool for prediction. Prediction is the function or mission of science or the goal that must be achieved if the scientific project is successful. In other words, prediction is the final harvest of description and interpretation. In addition, science is primarily concerned with the prediction of events that have occurred in the universe. A mature prediction is what science provides to validate scientific models. This paper introduced the concepts of using machine learning techniques to enhance the prediction process results. Pollution data set and the negative effects of polluted air data were used. We built, trained and tested various models in order to find the optimal model, which could enhance the results of the prediction process.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Performance Evaluation of ANN Models for Prediction</dc:title>
    <dc:creator>mohmmad khrisat</dc:creator>
    <dc:creator>ziad alqadi</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020102</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>13</prism:startingPage>
    <prism:doi>10.56578/ataiml020102</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020101">
    <title>Acadlore Transactions on AI and Machine Learning, 2023, Volume 2, Issue 1, Pages undefined: An End-to-End CNN Approach for Enhancing Underwater Images Using Spatial and Frequency Domain Techniques</title>
    <link>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020101</link>
    <description>Underwater image processing area has been a central point of interest to many people in many fields such as control of underwater vehicles, archaeology, marine biology research, etc. Underwater exploration is becoming a big part of our life such as underwater marine and creatures research, pipeline and communication logistics, military use, touristic and entertainment use. Underwater images are subject to poor visibility, distortion, poor quality, etc., due to several reasons such as light propagation. The real problem occurs when these images have to be taken at a depth which is more than 500 feet where artificial light needs to be introduced. This work tackles the underwater environment challenges such as as colour casts, lack of image sharpness, low contrast, low visibility, and blurry appearance in deep ocean images by proposing an end-to-end deep underwater image enhancement network (WGH-net) based on convolutional neural network (CNN) algorithm. Quantitative and qualitative metrics results proved that our method achieved competitive results with the previous work methods as it was experimentally tested on different images from several datasets.</description>
    <pubDate>03-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Underwater image processing area has been a central point of interest to many people in many fields such as control of underwater vehicles, archaeology, marine biology research, etc. Underwater exploration is becoming a big part of our life such as underwater marine and creatures research, pipeline and communication logistics, military use, touristic and entertainment use. Underwater images are subject to poor visibility, distortion, poor quality, etc., due to several reasons such as light propagation. The real problem occurs when these images have to be taken at a depth which is more than 500 feet where artificial light needs to be introduced. This work tackles the underwater environment challenges such as as colour casts, lack of image sharpness, low contrast, low visibility, and blurry appearance in deep ocean images by proposing an end-to-end deep underwater image enhancement network (WGH-net) based on convolutional neural network (CNN) algorithm. Quantitative and qualitative metrics results proved that our method achieved competitive results with the previous work methods as it was experimentally tested on different images from several datasets.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>An End-to-End CNN Approach for Enhancing Underwater Images Using Spatial and Frequency Domain Techniques</dc:title>
    <dc:creator>ayah abo el rejal</dc:creator>
    <dc:creator>khaled nagaty</dc:creator>
    <dc:creator>andreas pester</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml020101</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>03-27-2023</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>03-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ataiml020101</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2023_2_1/ataiml020101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010205">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 2, Pages undefined: COVID-19 - Outbreak Prediction Using SIR Model</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010205</link>
    <description>This paper deals with the trendy topic of coronavirus. The disease is causing severe damage to the entire population as well as to the nation’s economy. Machine Learning algorithms like Support Vector Machines and SIR Models have been used to prepare good and valid predictions of this disease. Total cases, recovered cases, infected cases, and Deaths reported are there in the paper ahead represented beautifully in form of pie charts, bar graphs, and line plots. Predictions are there for the next 20 days and we all hope that the cases remain as low as possible, and we achieve the peak of the disease as early as possible. Also, it should be made clear that these are not clinically and globally accepted to be true, and these should not be used anywhere on a medical basis. This clearly gives us the right approach and a brief idea of how Machine Learning can be used in such pandemic situations.</description>
    <pubDate>12-30-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This paper deals with the trendy topic of coronavirus. The disease is causing severe damage to the entire population as well as to the nation’s economy. Machine Learning algorithms like Support Vector Machines and SIR Models have been used to prepare good and valid predictions of this disease. Total cases, recovered cases, infected cases, and Deaths reported are there in the paper ahead represented beautifully in form of pie charts, bar graphs, and line plots. Predictions are there for the next 20 days and we all hope that the cases remain as low as possible, and we achieve the peak of the disease as early as possible. Also, it should be made clear that these are not clinically and globally accepted to be true, and these should not be used anywhere on a medical basis. This clearly gives us the right approach and a brief idea of how Machine Learning can be used in such pandemic situations.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>COVID-19 - Outbreak Prediction Using SIR Model</dc:title>
    <dc:creator>vijay khare</dc:creator>
    <dc:creator>rishabh kaloni</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010205</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>109</prism:startingPage>
    <prism:doi>10.56578/ataiml010205</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010204">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 2, Pages undefined: Human Behavior Identification Based on Graphology Using Artificial Neural Network</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010204</link>
    <description>Handwriting reflects a person's true nature, phobias, emotional outbursts, honesty, defenses and many more characteristics. Analysis of handwriting, also known as graphology, is a science that uses the strokes and patterns disclosed by handwriting to identify, evaluate, and analyze personality. It is the study of the patterns and physical characteristics of handwriting to identify the author, indicate the author's psychological state while writing, or analyze personality traits. Traditionally, professionals also called graphologists predict the behavior of the writer by analyzing their handwriting, but this procedure is tedious and expensive. Therefore, this paper focuses on developing an application for personality identification that can predict behavioral characteristics directly using a computer without any human involvement. Most of the existing applications use English as the primary language to identify the personality trait of the writer however, our approach uses Devanagari scripts for prediction, thereby eliminating the language barrier. Our proposed method uses a machine learning approach to predict personality by analyzing Devanagari samples using Artificial Neural Network. We have created our own Devanagari word dataset. There are almost 4000 images which belong to 5 classes namely Introvert, Extrovert, Optimistic, Pessimistic and Stable mind-set. The testing accuracy achieved by the proposed method is 94.75%.</description>
    <pubDate>12-30-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Handwriting reflects a person's true nature, phobias, emotional outbursts, honesty, defenses and many more characteristics. Analysis of handwriting, also known as graphology, is a science that uses the strokes and patterns disclosed by handwriting to identify, evaluate, and analyze personality. It is the study of the patterns and physical characteristics of handwriting to identify the author, indicate the author's psychological state while writing, or analyze personality traits. Traditionally, professionals also called graphologists predict the behavior of the writer by analyzing their handwriting, but this procedure is tedious and expensive. Therefore, this paper focuses on developing an application for personality identification that can predict behavioral characteristics directly using a computer without any human involvement. Most of the existing applications use English as the primary language to identify the personality trait of the writer however, our approach uses Devanagari scripts for prediction, thereby eliminating the language barrier. Our proposed method uses a machine learning approach to predict personality by analyzing Devanagari samples using Artificial Neural Network. We have created our own Devanagari word dataset. There are almost 4000 images which belong to 5 classes namely Introvert, Extrovert, Optimistic, Pessimistic and Stable mind-set. The testing accuracy achieved by the proposed method is 94.75%.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Human Behavior Identification Based on Graphology Using Artificial Neural Network</dc:title>
    <dc:creator>shalaka prasad deore</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010204</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>101</prism:startingPage>
    <prism:doi>10.56578/ataiml010204</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010203">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 2, Pages undefined: Gait Based Person Identification Using Deep Learning Model of Generative Adversarial Network</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010203</link>
    <description>The proliferation of digital age security tools is often attributed to the rise of visual surveillance. Since an individual's gait is highly indicative of their identity, it is becoming an increasingly popular biometric modality for use in autonomous visual surveillance and monitoring. There are various steps used in gait recognition frameworks such as segmentation, feature extraction, feature learning and similarity measurement. These steps are mutually independent with each part fixed, which results in a suboptimal performance in a challenging condition. It can be done independently of the users' involvement. Low-resolution video and straightforward instrumentation can verify an individual's identity, making impersonation a rarity. Using the benefits of the Generative Adversarial Network (GAN), this investigation tackles the problem of unevenly distributed unlabeled data with infrequently performed tasks. To estimate the data circulation in various circumstances using constrained observed gait data, a multimodal generator is applied here. When it comes to sharing knowledge, the variety provided by the data generated by a multimodal generator is hard to beat. The capability to distinguish gait activities with varying patterns due to environmental dynamics is enhanced by this multimodal generator. This system is more stable than other gait-based recognition methods because it can process data that is not equally dispersed throughout a different environment. The system's reliability is enhanced by the multimodal generator's capacity to produce a wide variety of outputs. The testing results show that this algorithm is superior to other gait-based recognition methods because it can adapt to changing environments.</description>
    <pubDate>12-30-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The proliferation of digital age security tools is often attributed to the rise of visual surveillance. Since an individual's gait is highly indicative of their identity, it is becoming an increasingly popular biometric modality for use in autonomous visual surveillance and monitoring. There are various steps used in gait recognition frameworks such as segmentation, feature extraction, feature learning and similarity measurement. These steps are mutually independent with each part fixed, which results in a suboptimal performance in a challenging condition. It can be done independently of the users' involvement. Low-resolution video and straightforward instrumentation can verify an individual's identity, making impersonation a rarity. Using the benefits of the Generative Adversarial Network (GAN), this investigation tackles the problem of unevenly distributed unlabeled data with infrequently performed tasks. To estimate the data circulation in various circumstances using constrained observed gait data, a multimodal generator is applied here. When it comes to sharing knowledge, the variety provided by the data generated by a multimodal generator is hard to beat. The capability to distinguish gait activities with varying patterns due to environmental dynamics is enhanced by this multimodal generator. This system is more stable than other gait-based recognition methods because it can process data that is not equally dispersed throughout a different environment. The system's reliability is enhanced by the multimodal generator's capacity to produce a wide variety of outputs. The testing results show that this algorithm is superior to other gait-based recognition methods because it can adapt to changing environments.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Gait Based Person Identification Using Deep Learning Model of Generative Adversarial Network</dc:title>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:creator>vijay kumar damera</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010203</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>90</prism:startingPage>
    <prism:doi>10.56578/ataiml010203</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010202">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 2, Pages undefined: Modelling of Depth Prediction Algorithm for Intra Prediction Complexity Reduction</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010202</link>
    <description>Video compression gained its relevance with the boon of the internet, mobile phones, variable resolution acquisition device etc. The redundant information is explored in initial stages of compression that’s is prediction. Inter prediction that is prediction within the frame generates high computational complexity when working with traditional signal processing procedures. The paper proposes the design of a deep convolutional neural network model to perform inter prediction by crossing out the flaws in the traditional method. It briefs the modeling of network, mathematics behind each stage and evaluation of the proposed model with sample dataset. The video frame’s coding tree unit (CTU) of 64x64 is the input, the model converts and store it as a 16-element vector with the goodness of CNN network. It gives an overview of deep depth decision algorithm. The evaluation process shows that the model performs better for compression with less computational complexity.</description>
    <pubDate>12-30-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Video compression gained its relevance with the boon of the internet, mobile phones, variable resolution acquisition device etc. The redundant information is explored in initial stages of compression that’s is prediction. Inter prediction that is prediction within the frame generates high computational complexity when working with traditional signal processing procedures. The paper proposes the design of a deep convolutional neural network model to perform inter prediction by crossing out the flaws in the traditional method. It briefs the modeling of network, mathematics behind each stage and evaluation of the proposed model with sample dataset. The video frame’s coding tree unit (CTU) of 64x64 is the input, the model converts and store it as a 16-element vector with the goodness of CNN network. It gives an overview of deep depth decision algorithm. The evaluation process shows that the model performs better for compression with less computational complexity.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Modelling of Depth Prediction Algorithm for Intra Prediction Complexity Reduction</dc:title>
    <dc:creator>helen k. joy</dc:creator>
    <dc:creator>manjunath r. kounte</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010202</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>81</prism:startingPage>
    <prism:doi>10.56578/ataiml010202</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010201">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 2, Pages undefined: Analysis of Artificial Intelligence and Natural Language Processing Significance as Expert Systems Support for E-Health Using Pre-Train Deep Learning Models</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010201</link>
    <description>Artificial intelligence (AI) and natural language processing (NLP) are relentless technologies for healthcare that can support a strong and secure digital system with embedded applications of internet of things (IoTs). The study tried to build an artificial intelligence-natural language processing cluster system. In the system, rich content is extracted using parts of speech and then classified into an understandable dataset. The unavailable uniqueness systems with standardize process and procedures for artificial intelligence and natural language processing across different systems to support E-healthcare sector is a big challenge for nations and the world at large. Aim to train a cluster system that extract rich content and fit into a deep learning model frame to enable interpretation of the dataset for healthcare needs through a fast and secure digital system. The study uses (behavior-oriented driven and influential functions) to determine the significance of AI and NLP on E-health. Based on a selective scorings method, a rate of 1 out of 5 grading was developed called the Key Benefits score. The behavior-oriented drive and influential function allows an in-depth evaluation of E-health based on the selection of text content applied to the sample proposed study. Results show a score of 3.947 scale significance of NLP and AI on E-health. The study concluded that well-defined artificial intelligence and natural language processing applications are perfect areas that advance positive results in healthcare electronic services.</description>
    <pubDate>12-30-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Artificial intelligence (AI) and natural language processing (NLP) are relentless technologies for healthcare that can support a strong and secure digital system with embedded applications of internet of things (IoTs). The study tried to build an artificial intelligence-natural language processing cluster system. In the system, rich content is extracted using parts of speech and then classified into an understandable dataset. The unavailable uniqueness systems with standardize process and procedures for artificial intelligence and natural language processing across different systems to support E-healthcare sector is a big challenge for nations and the world at large. Aim to train a cluster system that extract rich content and fit into a deep learning model frame to enable interpretation of the dataset for healthcare needs through a fast and secure digital system. The study uses (behavior-oriented driven and influential functions) to determine the significance of AI and NLP on E-health. Based on a selective scorings method, a rate of 1 out of 5 grading was developed called the Key Benefits score. The behavior-oriented drive and influential function allows an in-depth evaluation of E-health based on the selection of text content applied to the sample proposed study. Results show a score of 3.947 scale significance of NLP and AI on E-health. The study concluded that well-defined artificial intelligence and natural language processing applications are perfect areas that advance positive results in healthcare electronic services.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Analysis of Artificial Intelligence and Natural Language Processing Significance as Expert Systems Support for E-Health Using Pre-Train Deep Learning Models</dc:title>
    <dc:creator>pascal muam mah</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010201</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>12-30-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>12-30-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>68</prism:startingPage>
    <prism:doi>10.56578/ataiml010201</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_2/ataiml010201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010108">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: Liver Lesion Segmentation Using Deep Learning Models</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010108</link>
    <description>An estimated 9.6 million deaths, or one in every six deaths, were attributed to cancer in 2018, making it the second highest cause of death worldwide. Men are more likely to develop lung, prostate, colorectal, stomach, and liver cancer than women, who are more likely to develop breast, colorectal, lung, cervical, and thyroid cancer. The primary goals of medical image segmentation include studying anatomical structure, identifying regions of interest (RoI), and measuring tissue volume to track tumor growth. It is crucial to diagnose and treat liver lesions quickly in order to stop the tumor from spreading further. Deep learning model-based liver segmentation has become very popular in the field of medical image analysis. This study explores various deep learning-based liver lesion segmentation algorithms and methodologies. Based on the developed models, the performance, and their limitations of these methodologies are contrasted. In the end, it was concluded that small size lesion segmentation, in particular, is still an open research subject for computer-aided systems of liver lesion segmentation, for there are still a number of technical issues that need to be resolved.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;An estimated 9.6 million deaths, or one in every six deaths, were attributed to cancer in 2018, making it the second highest cause of death worldwide. Men are more likely to develop lung, prostate, colorectal, stomach, and liver cancer than women, who are more likely to develop breast, colorectal, lung, cervical, and thyroid cancer. The primary goals of medical image segmentation include studying anatomical structure, identifying regions of interest (RoI), and measuring tissue volume to track tumor growth. It is crucial to diagnose and treat liver lesions quickly in order to stop the tumor from spreading further. Deep learning model-based liver segmentation has become very popular in the field of medical image analysis. This study explores various deep learning-based liver lesion segmentation algorithms and methodologies. Based on the developed models, the performance, and their limitations of these methodologies are contrasted. In the end, it was concluded that small size lesion segmentation, in particular, is still an open research subject for computer-aided systems of liver lesion segmentation, for there are still a number of technical issues that need to be resolved.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Liver Lesion Segmentation Using Deep Learning Models</dc:title>
    <dc:creator>aasia rehman</dc:creator>
    <dc:creator>muheet ahmed butt</dc:creator>
    <dc:creator>majid zaman</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010108</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>61</prism:startingPage>
    <prism:doi>10.56578/ataiml010108</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010108</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010107">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: Performance Comparison of Three Classifiers for Fetal Health Classification Based on Cardiotocographic Data</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010107</link>
    <description>The global child mortality rate, which is steadily declining, will be around 26 fatalities per 1000 live births in 2022. Numerous Sustainable Development Goals of the United Nations take into account the declining child mortality rate, which illustrates how far humanity has come. Cardiotocograms (CTGs) are a simple and affordable tool that most professionals choose to reduce infant and mother mortality. Three of the most cutting-edge methodologies are utilized in this research to classify the data, and their results are compared. All three classifiers outperformed the random forest, whose accuracy was 94.3%.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The global child mortality rate, which is steadily declining, will be around 26 fatalities per 1000 live births in 2022. Numerous Sustainable Development Goals of the United Nations take into account the declining child mortality rate, which illustrates how far humanity has come. Cardiotocograms (CTGs) are a simple and affordable tool that most professionals choose to reduce infant and mother mortality. Three of the most cutting-edge methodologies are utilized in this research to classify the data, and their results are compared. All three classifiers outperformed the random forest, whose accuracy was 94.3%.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Performance Comparison of Three Classifiers for Fetal Health Classification Based on Cardiotocographic Data</dc:title>
    <dc:creator>vijay khare</dc:creator>
    <dc:creator>sakshi kumari</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010107</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>52</prism:startingPage>
    <prism:doi>10.56578/ataiml010107</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010107</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010106">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: Mask Wearing Detection Based on YOLOv5 Target Detection Algorithm under COVID-19</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010106</link>
    <description>Deep learning methods have been widely used in object detection in recent years as a result of advancements in artificial intelligence algorithms and hardware computing capacity. In light of the drawbacks of current manual testing mask wearing methods, this study offers a real-time detection method of mask wearing status based on the deep learning YOLOv5 algorithm to prevent COVID-19 and quicken the recovery of industrial production. The algorithm normalizes the original dataset, before connecting the data to the YOLOv5 network for iterative training, and saving the ideal weight data as a test set. The training and test results of the suggested approach are presented visually on a tensor board. With the help of cameras, this technique can collect faces, identify masked faces, and present prompts for mask use. According to experiment results, the suggested algorithm can match the requirements of real-world applications and has a high detection accuracy and good real-time performance.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Deep learning methods have been widely used in object detection in recent years as a result of advancements in artificial intelligence algorithms and hardware computing capacity. In light of the drawbacks of current manual testing mask wearing methods, this study offers a real-time detection method of mask wearing status based on the deep learning YOLOv5 algorithm to prevent COVID-19 and quicken the recovery of industrial production. The algorithm normalizes the original dataset, before connecting the data to the YOLOv5 network for iterative training, and saving the ideal weight data as a test set. The training and test results of the suggested approach are presented visually on a tensor board. With the help of cameras, this technique can collect faces, identify masked faces, and present prompts for mask use. According to experiment results, the suggested algorithm can match the requirements of real-world applications and has a high detection accuracy and good real-time performance.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Mask Wearing Detection Based on YOLOv5 Target Detection Algorithm under COVID-19</dc:title>
    <dc:creator>jiuchao xie</dc:creator>
    <dc:creator>rui xi</dc:creator>
    <dc:creator>daofang chang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010106</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>40</prism:startingPage>
    <prism:doi>10.56578/ataiml010106</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010106</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010105">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: A Dual-Selective Channel Attention Network for Osteoporosis Prediction in Computed Tomography Images of Lumbar Spine</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010105</link>
    <description>Osteoporosis is a common systemic bone disease with insidious onset and low treatment efficiency. Once it occurs, it will increase bone fragility and lead to fractures. Computed tomography (CT) is a non-invasive medical examination method that can identify the bone condition of patients. In this paper, we propose a novel channel attention module, which is subsequently integrated into the supervised deep convolutional neural network (DCNN) termed DSNet, which can perform feature fusion from two different scales, and use the method of quadratic weight calculation to enhance the interconnection among feature map channels and improve the detection and classification performance for the bone condition in lumbar spine CT images. To train and test the proposed framework, we retrospectively collect 4805 CT images of 133 patients, using DXA as the gold standard. According to the T-value diagnostic criteria defined by WHO, the vertebral bodies of L1 - L4 in CT images are labeled and classified into osteoporosis, osteopenia and normal bone mineral density. Meanwhile, the training set and test set are constructed in the ratio of 4:1. As a result, the DSNet achieves a prediction accuracy of 83.4% and a recall rate of 90.0% on the test set, indicating that the proposed model has the potential to assist clinicians in diagnosing individuals with abnormal BMD and may alert patients at high risk of osteoporosis for timely treatment.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Osteoporosis is a common systemic bone disease with insidious onset and low treatment efficiency. Once it occurs, it will increase bone fragility and lead to fractures. Computed tomography (CT) is a non-invasive medical examination method that can identify the bone condition of patients. In this paper, we propose a novel channel attention module, which is subsequently integrated into the supervised deep convolutional neural network (DCNN) termed DSNet, which can perform feature fusion from two different scales, and use the method of quadratic weight calculation to enhance the interconnection among feature map channels and improve the detection and classification performance for the bone condition in lumbar spine CT images. To train and test the proposed framework, we retrospectively collect 4805 CT images of 133 patients, using DXA as the gold standard. According to the T-value diagnostic criteria defined by WHO, the vertebral bodies of L1 - L4 in CT images are labeled and classified into osteoporosis, osteopenia and normal bone mineral density. Meanwhile, the training set and test set are constructed in the ratio of 4:1. As a result, the DSNet achieves a prediction accuracy of 83.4% and a recall rate of 90.0% on the test set, indicating that the proposed model has the potential to assist clinicians in diagnosing individuals with abnormal BMD and may alert patients at high risk of osteoporosis for timely treatment.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Dual-Selective Channel Attention Network for Osteoporosis Prediction in Computed Tomography Images of Lumbar Spine</dc:title>
    <dc:creator>linyan xue</dc:creator>
    <dc:creator>ya hou</dc:creator>
    <dc:creator>shiwei wang</dc:creator>
    <dc:creator>cheng luo</dc:creator>
    <dc:creator>zhiyin xia</dc:creator>
    <dc:creator>geng qin</dc:creator>
    <dc:creator>shuang liu</dc:creator>
    <dc:creator>zhongliang wang</dc:creator>
    <dc:creator>wenshan gao</dc:creator>
    <dc:creator>kun yang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010105</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>30</prism:startingPage>
    <prism:doi>10.56578/ataiml010105</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010104">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: Enhancing Session-Based Recommendations with Popularity-Aware Graph Neural Networks</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010104</link>
    <description>Real-time and reliable recommendations are essential for anonymous users in session-based recommendation systems. Graph neural network-based algorithms are attracting more researchers due to their simplicity and efficiency. However, current methods overlook the influence of edge frequency on feature aggregation in graph modeling and fail to account for the impact of item popularity on user interest. To address these issues, a novel approach called Popularity-Aware Graph Neural Networks for Session-based Recommendations is proposed. This study integrates both edge frequency and item popularity into the modeling process to enhance the learning of item features and user interests. A graph that includes the number of edge occurrences is constructed, and a graph neural network with an attention mechanism is utilized to learn user interests and item features by aggregating information from the graph. Finally, the session's final representation is learned based on the occurrence frequency of items. The proposed study evaluates the model on two classical e-commerce datasets and demonstrates its superiority over existing methods.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ Real-time and reliable recommendations are essential for anonymous users in session-based recommendation systems. Graph neural network-based algorithms are attracting more researchers due to their simplicity and efficiency. However, current methods overlook the influence of edge frequency on feature aggregation in graph modeling and fail to account for the impact of item popularity on user interest. To address these issues, a novel approach called Popularity-Aware Graph Neural Networks for Session-based Recommendations is proposed. This study integrates both edge frequency and item popularity into the modeling process to enhance the learning of item features and user interests. A graph that includes the number of edge occurrences is constructed, and a graph neural network with an attention mechanism is utilized to learn user interests and item features by aggregating information from the graph. Finally, the session's final representation is learned based on the occurrence frequency of items. The proposed study evaluates the model on two classical e-commerce datasets and demonstrates its superiority over existing methods. ]]&gt;</content:encoded>
    <dc:title>Enhancing Session-Based Recommendations with Popularity-Aware Graph Neural Networks</dc:title>
    <dc:creator>qingbo sun</dc:creator>
    <dc:creator>weihua yuan</dc:creator>
    <dc:creator>qi zhang</dc:creator>
    <dc:creator>zhijun zhang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010104</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>22</prism:startingPage>
    <prism:doi>10.56578/ataiml010104</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010103">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: House Price Prediction Using Exploratory Data Analysis and Machine Learning with Feature Selection</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010103</link>
    <description>In many real-world applications, it is more realistic to predict a price range than to forecast a single value. When the goal is to identify a range of prices, price prediction becomes a classification problem. The House Price Index is a typical instrument for estimating house price discrepancies. This repeat sale index analyzes the mean price variation in repeat sales or refinancing of the same assets. Since it depends on all transactions, the House Price Index is poor at projecting the price of a single house. To forecast house prices effectively, this study investigates the exploratory data analysis based on linear regression, ridge regression, Lasso regression, and Elastic Net regression, with the aid of machine learning with feature selection. The proposed prediction model for house prices was evaluated on a machine learning housing dataset, which covers 1,460 records and 81 features. By comparing the predicted and actual prices, it was learned that our model outputted an acceptable, expected values compared to the actual values. The error margin to actual values was very small. The comparison shows that our model is satisfactory in predicting house prices.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In many real-world applications, it is more realistic to predict a price range than to forecast a single value. When the goal is to identify a range of prices, price prediction becomes a classification problem. The House Price Index is a typical instrument for estimating house price discrepancies. This repeat sale index analyzes the mean price variation in repeat sales or refinancing of the same assets. Since it depends on all transactions, the House Price Index is poor at projecting the price of a single house. To forecast house prices effectively, this study investigates the exploratory data analysis based on linear regression, ridge regression, Lasso regression, and Elastic Net regression, with the aid of machine learning with feature selection. The proposed prediction model for house prices was evaluated on a machine learning housing dataset, which covers 1,460 records and 81 features. By comparing the predicted and actual prices, it was learned that our model outputted an acceptable, expected values compared to the actual values. The error margin to actual values was very small. The comparison shows that our model is satisfactory in predicting house prices.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>House Price Prediction Using Exploratory Data Analysis and Machine Learning with Feature Selection</dc:title>
    <dc:creator>fadhil m. basysyar</dc:creator>
    <dc:creator>gifthera dwilestari</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010103</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>11</prism:startingPage>
    <prism:doi>10.56578/ataiml010103</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010102">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: A Survey on Multimedia Ontologies for a Semantic Annotation of Cinematographic Resources for the Web of Data</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010102</link>
    <description>The Semantic Web provides approaches and tools that allow for the processing and analysis of online content, including multimedia resources. Multimedia resources like videos, audios, and photos are increasingly common in contemporary Web content. Cinematographic works (also known as film contents) stand out among these resources as one of the most recent attractions on the Internet. An important tool employed recently in the semantic indexation of digital resources and film content is ontological annotation. This paper studies the current multimedia ontologies related to the film contents on the web. The relevant indicators were discussed comparatively, and some open issues were reviewed in details. In this way, the authors managed to integrate the metadata related to online films practically into the web of data.</description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The Semantic Web provides approaches and tools that allow for the processing and analysis of online content, including multimedia resources. Multimedia resources like videos, audios, and photos are increasingly common in contemporary Web content. Cinematographic works (also known as film contents) stand out among these resources as one of the most recent attractions on the Internet. An important tool employed recently in the semantic indexation of digital resources and film content is ontological annotation. This paper studies the current multimedia ontologies related to the film contents on the web. The relevant indicators were discussed comparatively, and some open issues were reviewed in details. In this way, the authors managed to integrate the metadata related to online films practically into the web of data.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Survey on Multimedia Ontologies for a Semantic Annotation of Cinematographic Resources for the Web of Data</dc:title>
    <dc:creator>samdalle amaria</dc:creator>
    <dc:creator>kaladzavi guidedi</dc:creator>
    <dc:creator>warda lazarre</dc:creator>
    <dc:creator>kolyang</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010102</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>2</prism:startingPage>
    <prism:doi>10.56578/ataiml010102</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010101">
    <title>Acadlore Transactions on AI and Machine Learning, 2022, Volume 1, Issue 1, Pages undefined: Editorial to the Inaugural Issue</title>
    <link>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010101</link>
    <description> </description>
    <pubDate>11-19-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt; &lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Editorial to the Inaugural Issue</dc:title>
    <dc:creator>andreas pester</dc:creator>
    <dc:identifier>doi: 10.56578/ataiml010101</dc:identifier>
    <dc:source>Acadlore Transactions on AI and Machine Learning</dc:source>
    <dc:date>11-19-2022</dc:date>
    <prism:publicationName>Acadlore Transactions on AI and Machine Learning</prism:publicationName>
    <prism:publicationDate>11-19-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ataiml010101</prism:doi>
    <prism:url>https://www.acadlore.com/article/ATAIML/2022_1_1/ataiml010101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
    <cc:permits rdf:resource="http://creativecommons.org/ns#Reproduction"/>
    <cc:permits rdf:resource="http://creativecommons.org/ns#Distribution"/>
    <cc:permits rdf:resource="http://creativecommons.org/ns#DerivativeWorks"/>
  </cc:License>
</rdf:RDF>