<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE rdf:RDF PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.3 20210610//EN" "JATS-journalpublishing1-3.dtd">
<rdf:RDF xmlns="http://purl.org/rss/1.0/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:cc="http://web.resource.org/cc/" xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:admin="http://webns.net/mvcb/" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel rdf:about="https://www.acadlore.com/rss/journals/IDA">
    <title>Information Dynamics and Applications</title>
    <description>Latest open access articles published in Information Dynamics and Applications at https://www.acadlore.com/journals/IDA</description>
    <link>https://www.acadlore.com/journals/IDA</link>
    <admin:generatorAgent rdf:resource="https://www.acadlore.com/journals/IDA"/>
    <admin:errorReportsTo rdf:resource="mailto:support@acadlore.com"/>
    <dc:publisher>Acadlore</dc:publisher>
    <dc:language>en</dc:language>
    <dc:rights>Creative Commons Attribution(CC - BY)</dc:rights>
    <prism:copyright>IDA</prism:copyright>
    <prism:rightsAgent>support@acadlore.com</prism:rightsAgent>
    <image rdf:resource="https://media.acadlore.com/assets/media/2026/2/img_ORHUIGptR1aEMMaC.png"/>
    <items>
      <rdf:Seq>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2026_5_1/ida050101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020405"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020404"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020403"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020402"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020401"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020305"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020304"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020303"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020302"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020301"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020205"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020204"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020203"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020202"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020201"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020101"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010106"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010105"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010104"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010103"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010102"/>
        <rdf:li rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010101"/>
      </rdf:Seq>
    </items>
    <cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/"/>
  </channel>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2026_5_1/ida050101">
    <title>Information Dynamics and Applications, 2026, Volume 5, Issue 1, Pages undefined: DeCo-Adapter: Enhancing Zero-Shot Robustness via Decoupled Negative Semantic Suppression</title>
    <link>https://www.acadlore.com/article/IDA/2026_5_1/ida050101</link>
    <description>Large-scale Vision-Language Models (VLMs) like Contrastive Language-Image Pre-training (CLIP) have demonstrated their impressive zero-shot capabilities. However, adapting them to downstream tasks remains challenging, especially under domain shifts where visual features become unreliable. Existing training-free methods, such as Tip-Adapter, rely heavily on visual similarity, which often fails in out-of-distribution (OOD) scenarios. To address this, Decoupled Correction Adapter (DeCo-Adapter), a robust adaptation framework that integrates a Decoupled Knowledge Stream into the visual baseline, is proposed. Specifically, a novel Negative Semantic Suppression mechanism is introduced, leveraging Large Language Models (LLMs) to generate and penalize distractor descriptions. This mechanism effectively corrects visual ambiguities without requiring any training. Extensive experiments on ImageNet-Sketch, ImageNet-V2, and ImageNet-A demonstrate that DeCo-Adapter consistently outperforms state-of-the-art methods. Notably, it achieves a top-1 accuracy of 54.11% on ImageNet-Sketch, surpassing the strong Tip-Adapter baseline by leveraging negative knowledge for error correction.</description>
    <pubDate>03-17-2026</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Large-scale Vision-Language Models (VLMs) like Contrastive Language-Image Pre-training (CLIP) have demonstrated their impressive zero-shot capabilities. However, adapting them to downstream tasks remains challenging, especially under domain shifts where visual features become unreliable. Existing training-free methods, such as Tip-Adapter, rely heavily on visual similarity, which often fails in out-of-distribution (OOD) scenarios. To address this, Decoupled Correction Adapter (DeCo-Adapter), a robust adaptation framework that integrates a Decoupled Knowledge Stream into the visual baseline, is proposed. Specifically, a novel Negative Semantic Suppression mechanism is introduced, leveraging Large Language Models (LLMs) to generate and penalize distractor descriptions. This mechanism effectively corrects visual ambiguities without requiring any training. Extensive experiments on ImageNet-Sketch, ImageNet-V2, and ImageNet-A demonstrate that DeCo-Adapter consistently outperforms state-of-the-art methods. Notably, it achieves a top-1 accuracy of 54.11% on ImageNet-Sketch, surpassing the strong Tip-Adapter baseline by leveraging negative knowledge for error correction.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>DeCo-Adapter: Enhancing Zero-Shot Robustness via Decoupled Negative Semantic Suppression</dc:title>
    <dc:creator>yiheng chi</dc:creator>
    <dc:creator>peijian zhang</dc:creator>
    <dc:identifier>doi: 10.56578/ida050101</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-17-2026</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-17-2026</prism:publicationDate>
    <prism:year>2026</prism:year>
    <prism:volume>5</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ida050101</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2026_5_1/ida050101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040405">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 4, Pages undefined: Hybrid Improved Stacking over Tabular Temporal Features with Blockchain-Certified Data: Millet Yield Prediction and Explainability</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_4/ida040405</link>
    <description>Accurate crop yield prediction is essential for food security planning in developing countries. However, real-world deployments remain challenging due to limited imagery availability, heterogeneous tabular data, and concerns regarding data reliability. This paper proposes a tabular-only temporal deep learning framework enhanced with a blockchain-based data provenance layer for millet yield prediction in Senegal. The proposed model embeds per-timestep agroecological features using a multilayer perceptron (MLP), captures temporal dependencies through a bidirectional Long Short-Term Memory (BiLSTM) network, and integrates a hybrid improved stacking strategy by incorporating predictions from classical machine learning models, including Random Forest, XGBoost, LightGBM, and CatBoost. Unlike conventional stacking approaches, these predictions are injected directly into the temporal representation at the final timestep, thereby improving generalization and calibration performance. To ensure data integrity and traceability, a blockchain-inspired certification mechanism is introduced. This mechanism relies on canonicalization, SHA-256 hashing, and HMAC-based signatures of zone-year records. Experimental results demonstrate that the proposed approach achieves strong predictive performance (MAE $\approx$ 0.074, RMSE $\approx$ 0.101, R$^2$ $\approx$ 0.946), outperforming baseline models. A comprehensive evaluation framework is employed, including cross-validation, statistical significance testing, and explainability analysis using SHAP, LIME, and gradient-based saliency methods. Results indicate that while performance improvements are significant under static evaluation settings, they are less consistent under temporal cross-validation, highlighting the importance of robust evaluation protocols. Overall, the proposed framework provides a practical, auditable, and high-performing solution for yield prediction in data-scarce environments, combining predictive accuracy with data trustworthiness.</description>
    <pubDate>12-29-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate crop yield prediction is essential for food security planning in developing countries. However, real-world deployments remain challenging due to limited imagery availability, heterogeneous tabular data, and concerns regarding data reliability. This paper proposes a tabular-only temporal deep learning framework enhanced with a blockchain-based data provenance layer for millet yield prediction in Senegal. The proposed model embeds per-timestep agroecological features using a multilayer perceptron (MLP), captures temporal dependencies through a bidirectional Long Short-Term Memory (BiLSTM) network, and integrates a hybrid improved stacking strategy by incorporating predictions from classical machine learning models, including Random Forest, XGBoost, LightGBM, and CatBoost. Unlike conventional stacking approaches, these predictions are injected directly into the temporal representation at the final timestep, thereby improving generalization and calibration performance. To ensure data integrity and traceability, a blockchain-inspired certification mechanism is introduced. This mechanism relies on canonicalization, SHA-256 hashing, and HMAC-based signatures of zone-year records. Experimental results demonstrate that the proposed approach achieves strong predictive performance (MAE $\approx$ 0.074, RMSE $\approx$ 0.101, R$^2$ $\approx$ 0.946), outperforming baseline models. A comprehensive evaluation framework is employed, including cross-validation, statistical significance testing, and explainability analysis using SHAP, LIME, and gradient-based saliency methods. Results indicate that while performance improvements are significant under static evaluation settings, they are less consistent under temporal cross-validation, highlighting the importance of robust evaluation protocols. Overall, the proposed framework provides a practical, auditable, and high-performing solution for yield prediction in data-scarce environments, combining predictive accuracy with data trustworthiness. ]]&gt;</content:encoded>
    <dc:title>Hybrid Improved Stacking over Tabular Temporal Features with Blockchain-Certified Data: Millet Yield Prediction and Explainability</dc:title>
    <dc:creator>pape elhadji abdoulaye gueye</dc:creator>
    <dc:creator>cherif bachir deme</dc:creator>
    <dc:creator>diery ngom</dc:creator>
    <dc:creator>adrien basse</dc:creator>
    <dc:identifier>doi: 10.56578/ida040405</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-29-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-29-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>238</prism:startingPage>
    <prism:doi>10.56578/ida040405</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_4/ida040405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040404">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 4, Pages undefined: From Data to Knowledge: A Denoising Autoencoder and Stacking-Based Framework for Customer Retention</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_4/ida040404</link>
    <description>In a highly competitive telecommunications environment, customer behavior data has become an important source of organizational knowledge for service innovation and strategic decision-making. The ability to transform large-scale user data into actionable knowledge is essential for effective customer retention and sustainable business development. This study develops a knowledge discovery framework that integrates a denoising autoencoder with an enhanced stacking learning strategy to support customer retention innovation. The denoising autoencoder is employed to extract latent behavioral representations from complex and noisy user data, enabling the identification of underlying patterns that are difficult to capture through conventional statistical features. These latent representations are further combined with structured indicators and integrated through a stacking ensemble composed of decision trees, random forests, and XGBoost to achieve robust knowledge fusion. Empirical results show that the proposed framework provides more reliable identification of high-risk customers and improves decision support quality in terms of accuracy and area under curve (AUC). The study demonstrates how artificial intelligence can serve as a mechanism for organizational knowledge creation and offers practical implications for data-driven service innovation and resource allocation in the telecommunications sector.</description>
    <pubDate>12-25-2025</pubDate>
    <content:encoded>&lt;![CDATA[ In a highly competitive telecommunications environment, customer behavior data has become an important source of organizational knowledge for service innovation and strategic decision-making. The ability to transform large-scale user data into actionable knowledge is essential for effective customer retention and sustainable business development. This study develops a knowledge discovery framework that integrates a denoising autoencoder with an enhanced stacking learning strategy to support customer retention innovation. The denoising autoencoder is employed to extract latent behavioral representations from complex and noisy user data, enabling the identification of underlying patterns that are difficult to capture through conventional statistical features. These latent representations are further combined with structured indicators and integrated through a stacking ensemble composed of decision trees, random forests, and XGBoost to achieve robust knowledge fusion. Empirical results show that the proposed framework provides more reliable identification of high-risk customers and improves decision support quality in terms of accuracy and area under curve (AUC). The study demonstrates how artificial intelligence can serve as a mechanism for organizational knowledge creation and offers practical implications for data-driven service innovation and resource allocation in the telecommunications sector. ]]&gt;</content:encoded>
    <dc:title>From Data to Knowledge: A Denoising Autoencoder and Stacking-Based Framework for Customer Retention</dc:title>
    <dc:creator>zhaohe liu</dc:creator>
    <dc:identifier>doi: 10.56578/ida040404</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-25-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-25-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>224</prism:startingPage>
    <prism:doi>10.56578/ida040404</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_4/ida040404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040403">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 4, Pages undefined: An AI-Powered Adaptive Learning Framework for Personalized Education</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_4/ida040403</link>
    <description>An adaptive learning framework driven by artificial intelligence (AI) was proposed in which cognitive, emotional, and cultural dimensions of learner diversity were jointly modeled to address heterogeneous educational needs in a personalized and inclusive manner. Within the proposed system, learner adaptation was achieved through the coordinated deployment of multiple machine learning paradigms: models based on Decision Trees (DTs) were employed to dynamically align instructional content with learners’ cognitive profiles, Recurrent Neural Networks (RNNs) were utilized to capture temporal patterns in emotional engagement, and Collaborative Filtering (CF) techniques were applied to accommodate cultural preferences. The framework operates as a continuously adaptive system, enabling instructional content to be refined based on learner data derived from a dataset comprising 10,000 students. Experimental evaluation demonstrated that the proposed approach yielded statistically significant improvements in learning outcomes when compared with conventional instructional methods. Specifically, mean quiz and assignment scores were increased by 15.7% and 14.4%, respectively, while emotional engagement indicators exhibited an improvement of 35.8%. In addition, cultural satisfaction metrics were enhanced by 24.2%. These results suggest that the synergistic integration of cognitive, emotional, and cultural adaptation mechanisms contributes substantively to academic performance gains, heightened learner engagement, and improved educational equity. Beyond performance improvements, the proposed framework is designed with scalability and robustness, allowing for deployment across personalized educational contexts. As such, the framework offers a viable pathway for the development of next-generation personalized education systems capable of supporting diverse learners at scale while maintaining pedagogical effectiveness and inclusivity.</description>
    <pubDate>12-17-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;An adaptive learning framework driven by artificial intelligence (AI) was proposed in which cognitive, emotional, and cultural dimensions of learner diversity were jointly modeled to address heterogeneous educational needs in a personalized and inclusive manner. Within the proposed system, learner adaptation was achieved through the coordinated deployment of multiple machine learning paradigms: models based on Decision Trees (DTs) were employed to dynamically align instructional content with learners’ cognitive profiles, Recurrent Neural Networks (RNNs) were utilized to capture temporal patterns in emotional engagement, and Collaborative Filtering (CF) techniques were applied to accommodate cultural preferences. The framework operates as a continuously adaptive system, enabling instructional content to be refined based on learner data derived from a dataset comprising 10,000 students. Experimental evaluation demonstrated that the proposed approach yielded statistically significant improvements in learning outcomes when compared with conventional instructional methods. Specifically, mean quiz and assignment scores were increased by 15.7% and 14.4%, respectively, while emotional engagement indicators exhibited an improvement of 35.8%. In addition, cultural satisfaction metrics were enhanced by 24.2%. These results suggest that the synergistic integration of cognitive, emotional, and cultural adaptation mechanisms contributes substantively to academic performance gains, heightened learner engagement, and improved educational equity. Beyond performance improvements, the proposed framework is designed with scalability and robustness, allowing for deployment across personalized educational contexts. As such, the framework offers a viable pathway for the development of next-generation personalized education systems capable of supporting diverse learners at scale while maintaining pedagogical effectiveness and inclusivity.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>An AI-Powered Adaptive Learning Framework for Personalized Education</dc:title>
    <dc:creator>habitam asimare sendeku</dc:creator>
    <dc:creator>ravuri daniel</dc:creator>
    <dc:creator>gaddam venu gopal</dc:creator>
    <dc:identifier>doi: 10.56578/ida040403</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-17-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-17-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>212</prism:startingPage>
    <prism:doi>10.56578/ida040403</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_4/ida040403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040402">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 4, Pages undefined: A Hybrid Vision Transformer–Driven Feature Extraction and Machine Learning Framework for Automated Skin Burn Detection</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_4/ida040402</link>
    <description>Skin burns represent a major clinical concern due to their association with pain, functional impairment, sensory damage, and even life-threatening complications. Early and accurate assessment is critical for first aid, clinical intervention, and the prevention of secondary complications. However, conventional burn diagnosis remains highly dependent on visual inspection and clinical expertise, which can introduce subjectivity and delay timely decision-making. To address these limitations, a hybrid automated skin burn detection framework was proposed, integrating transformer-based feature extraction with classical machine learning classification. In this framework, discriminative visual features were extracted using multiple Vision Transformer (ViT) architectures, including ViT-B/16, ViT-L/16, ViT-B/32, and DINOv2 (a self-supervised Vision Transformer model). The extracted features were subsequently fused. Given the resulting high-dimensional feature space, dimensionality reduction was performed using the Chi-square (Chi$^2$) algorithm, through which 500 features were retained, reducing computational complexity and mitigating the risk of model overfitting. The reduced feature set was then employed for burn classification using six classifiers. Model effectiveness was assessed using accuracy, precision, sensitivity, and F1-score metrics. Experimental results demonstrated that the Support Vector Machine (SVM) classifier achieved the highest classification performance, yielding an accuracy of 82.29%. Comparable yet slightly lower accuracies were observed for the Light Gradient Boosting Machine (LGBM) (80.51%) and Extreme Gradient Boosting (XGBoost) (80.17%) classifiers. Overall, the proposed hybrid model consistently outperformed baseline models, highlighting its superior discriminative capability. These findings indicate that the proposed framework holds strong potential for integration into clinical decision support systems, offering a reliable and objective tool for automated skin burn detection.</description>
    <pubDate>12-11-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Skin burns represent a major clinical concern due to their association with pain, functional impairment, sensory damage, and even life-threatening complications. Early and accurate assessment is critical for first aid, clinical intervention, and the prevention of secondary complications. However, conventional burn diagnosis remains highly dependent on visual inspection and clinical expertise, which can introduce subjectivity and delay timely decision-making. To address these limitations, a hybrid automated skin burn detection framework was proposed, integrating transformer-based feature extraction with classical machine learning classification. In this framework, discriminative visual features were extracted using multiple Vision Transformer (ViT) architectures, including ViT-B/16, ViT-L/16, ViT-B/32, and DINOv2 (a self-supervised Vision Transformer model). The extracted features were subsequently fused. Given the resulting high-dimensional feature space, dimensionality reduction was performed using the Chi-square (Chi$^2$) algorithm, through which 500 features were retained, reducing computational complexity and mitigating the risk of model overfitting. The reduced feature set was then employed for burn classification using six classifiers. Model effectiveness was assessed using accuracy, precision, sensitivity, and F1-score metrics. Experimental results demonstrated that the Support Vector Machine (SVM) classifier achieved the highest classification performance, yielding an accuracy of 82.29%. Comparable yet slightly lower accuracies were observed for the Light Gradient Boosting Machine (LGBM) (80.51%) and Extreme Gradient Boosting (XGBoost) (80.17%) classifiers. Overall, the proposed hybrid model consistently outperformed baseline models, highlighting its superior discriminative capability. These findings indicate that the proposed framework holds strong potential for integration into clinical decision support systems, offering a reliable and objective tool for automated skin burn detection.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Hybrid Vision Transformer–Driven Feature Extraction and Machine Learning Framework for Automated Skin Burn Detection</dc:title>
    <dc:creator>ahmet yasir duman</dc:creator>
    <dc:creator>mucahit karaduman</dc:creator>
    <dc:creator>muhammed yildirim</dc:creator>
    <dc:identifier>doi: 10.56578/ida040402</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-11-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-11-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>201</prism:startingPage>
    <prism:doi>10.56578/ida040402</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_4/ida040402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_4/ida040401">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 4, Pages undefined: A Statistically Rigorous Comparison of MobileNetV2 and EfficientNet-B0 for Facial Expression Recognition on the FER2013 Benchmark</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_4/ida040401</link>
    <description>Facial expression recognition (FER) remains a challenging problem in computer vision owing to subtle inter-class visual differences, substantial intra-class variability, and severe class imbalance in commonly adopted benchmark datasets. In this study, a statistically rigorous comparative evaluation of two pretrained Convolutional Neural Network (CNN) architectures, MobileNetV2 and EfficientNet-B0, was conducted using the FER2013 dataset. To ensure methodological fairness and reproducibility, both architectures were fine-tuned and evaluated under strictly identical experimental conditions. Model performance was systematically assessed using overall classification accuracy and macro-averaged precision, recall, and F1-score to account for class imbalance, complemented by confusion matrix analysis and multi-class receiver operating characteristic area under the curve (ROC–AUC) evaluation. Beyond conventional performance reporting, the reliability and robustness of the observed differences were examined through McNemar’s test and paired bootstrap confidence intervals (CIs). The experimental results demonstrate that EfficientNet-B0 consistently outperforms MobileNetV2 across all evaluation criteria. Statistical analysis confirms that the observed performance gains are significant at the 5% significance level. These findings provide empirically grounded evidence for informed model selection in FER tasks and highlight the importance of integrating statistical validation into comparative deep learning studies. The results further suggest that EfficientNet-B0 offers a favorable balance between recognition accuracy and computational efficiency, making it a compelling candidate for real-world FER applications, including human–computer interaction, affect-aware systems, and assistive computing environments.</description>
    <pubDate>12-04-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Facial expression recognition (FER) remains a challenging problem in computer vision owing to subtle inter-class visual differences, substantial intra-class variability, and severe class imbalance in commonly adopted benchmark datasets. In this study, a statistically rigorous comparative evaluation of two pretrained Convolutional Neural Network (CNN) architectures, MobileNetV2 and EfficientNet-B0, was conducted using the FER2013 dataset. To ensure methodological fairness and reproducibility, both architectures were fine-tuned and evaluated under strictly identical experimental conditions. Model performance was systematically assessed using overall classification accuracy and macro-averaged precision, recall, and F1-score to account for class imbalance, complemented by confusion matrix analysis and multi-class receiver operating characteristic area under the curve (ROC–AUC) evaluation. Beyond conventional performance reporting, the reliability and robustness of the observed differences were examined through McNemar’s test and paired bootstrap confidence intervals (CIs). The experimental results demonstrate that EfficientNet-B0 consistently outperforms MobileNetV2 across all evaluation criteria. Statistical analysis confirms that the observed performance gains are significant at the 5% significance level. These findings provide empirically grounded evidence for informed model selection in FER tasks and highlight the importance of integrating statistical validation into comparative deep learning studies. The results further suggest that EfficientNet-B0 offers a favorable balance between recognition accuracy and computational efficiency, making it a compelling candidate for real-world FER applications, including human–computer interaction, affect-aware systems, and assistive computing environments.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Statistically Rigorous Comparison of MobileNetV2 and EfficientNet-B0 for Facial Expression Recognition on the FER2013 Benchmark</dc:title>
    <dc:creator>deepa dhondu mandave</dc:creator>
    <dc:creator>lalit vasantrao patil</dc:creator>
    <dc:identifier>doi: 10.56578/ida040401</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-04-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-04-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>189</prism:startingPage>
    <prism:doi>10.56578/ida040401</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_4/ida040401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040305">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 3, Pages undefined: ChaosL: A Grammar-Based Precision-Aware Programming Language for Reliable Computation in Chaotic Systems</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_3/ida040305</link>
    <description>This study introduces a grammar-based, chaotic-oriented programming language, termed ChaosL, to address persistent numerical precision and reproducibility challenges in the computational analysis of chaotic systems. The language, along with its compiler and parser, is designed end-to-end with consideration of chaotic maps. Numerical accuracy is systematically managed through grammar-level precision specification and automated error monitoring mechanisms, enabling exact control over floating-point representations, including single precision, double precision, and arbitrary-precision BigDecimal arithmetic with configurable decimal resolution of up to 100 digits. The proposed grammar natively supports ten widely studied one-dimensional and two-dimensional discrete chaotic maps, which may be composed using newly defined hybrid composition paradigms, namely alternate, blend, cascade, and feedback-driven coupling. To ensure computational reliability, multiple error assessment strategies are integrated, including direct error estimation, shadow computation, and interval arithmetic. In addition, ensemble-based simulation capabilities are incorporated to evaluate trajectory separation and estimate predictability horizons. The automated computation of Lyapunov exponents is embedded at the language level, achieving an accuracy of up to 99.6% while simultaneously enabling code-size reductions of approximately 85–92%. The adaptable architecture of ChaosL establishes a reproducible computational framework for discrete chaos research and facilitates the systematic identification of emergent behaviors in hybrid dynamical systems. Moreover, the design provides a scalable foundation for future extensions toward continuous-time systems, interactive visualization environments, and cloud-based collaborative experimentation, thereby advancing precision-aware computational practices in nonlinear dynamics and chaos theory.</description>
    <pubDate>09-29-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study introduces a grammar-based, chaotic-oriented programming language, termed ChaosL, to address persistent numerical precision and reproducibility challenges in the computational analysis of chaotic systems. The language, along with its compiler and parser, is designed end-to-end with consideration of chaotic maps. Numerical accuracy is systematically managed through grammar-level precision specification and automated error monitoring mechanisms, enabling exact control over floating-point representations, including single precision, double precision, and arbitrary-precision BigDecimal arithmetic with configurable decimal resolution of up to 100 digits. The proposed grammar natively supports ten widely studied one-dimensional and two-dimensional discrete chaotic maps, which may be composed using newly defined hybrid composition paradigms, namely alternate, blend, cascade, and feedback-driven coupling. To ensure computational reliability, multiple error assessment strategies are integrated, including direct error estimation, shadow computation, and interval arithmetic. In addition, ensemble-based simulation capabilities are incorporated to evaluate trajectory separation and estimate predictability horizons. The automated computation of Lyapunov exponents is embedded at the language level, achieving an accuracy of up to 99.6% while simultaneously enabling code-size reductions of approximately 85–92%. The adaptable architecture of ChaosL establishes a reproducible computational framework for discrete chaos research and facilitates the systematic identification of emergent behaviors in hybrid dynamical systems. Moreover, the design provides a scalable foundation for future extensions toward continuous-time systems, interactive visualization environments, and cloud-based collaborative experimentation, thereby advancing precision-aware computational practices in nonlinear dynamics and chaos theory.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>ChaosL: A Grammar-Based Precision-Aware Programming Language for Reliable Computation in Chaotic Systems</dc:title>
    <dc:creator>samar amil qassir</dc:creator>
    <dc:identifier>doi: 10.56578/ida040305</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-29-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-29-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>173</prism:startingPage>
    <prism:doi>10.56578/ida040305</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_3/ida040305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040304">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 3, Pages undefined: Demand Identification and Service Optimization in AI-Powered Banking Customer Service: A Kano Model-Based Approach</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_3/ida040304</link>
    <description>To strengthen competitiveness in the digital banking environment, the functional prioritization and optimization of customer service systems enabled by artificial intelligence (AI) must be systematically examined from a user-demand perspective. In this study, user requirements for AI-powered banking customer service were identified, classified, and prioritized through the Kano model combined with a structured questionnaire survey. Four functional dimensions comprising fourteen sub-functions were evaluated to determine their respective impacts on user satisfaction. The results demonstrate that priority should be assigned to rapid transfer to human agents, high response accuracy, risk alerts, service continuity and stability, privacy protection, secure identity verification, rapid response speed, comprehensive business coverage, multi-turn dialogue capability, and accurate user intent understanding. Based on these findings, a set of optimization strategies was proposed. A precise knowledge base should be constructed, and a high-availability system architecture should be deployed. Key algorithmic challenges related to semantic understanding and multi-turn dialogue management should be addressed. Full business-scenario coverage was recommended, while tiered authentication mechanisms and proactive risk-alert strategies should be implemented. Investment in non-core functions may be strategically deferred to achieve optimal resource allocation. By systematically categorizing user demand attributes and clarifying functional priorities, this study provides a robust theoretical foundation and practical decision-making framework for banks seeking to optimize AI-powered customer service systems and maximize user satisfaction in resource-constrained digital environments.</description>
    <pubDate>09-12-2025</pubDate>
    <content:encoded>&lt;![CDATA[ To strengthen competitiveness in the digital banking environment, the functional prioritization and optimization of customer service systems enabled by artificial intelligence (AI) must be systematically examined from a user-demand perspective. In this study, user requirements for AI-powered banking customer service were identified, classified, and prioritized through the Kano model combined with a structured questionnaire survey. Four functional dimensions comprising fourteen sub-functions were evaluated to determine their respective impacts on user satisfaction. The results demonstrate that priority should be assigned to rapid transfer to human agents, high response accuracy, risk alerts, service continuity and stability, privacy protection, secure identity verification, rapid response speed, comprehensive business coverage, multi-turn dialogue capability, and accurate user intent understanding. Based on these findings, a set of optimization strategies was proposed. A precise knowledge base should be constructed, and a high-availability system architecture should be deployed. Key algorithmic challenges related to semantic understanding and multi-turn dialogue management should be addressed. Full business-scenario coverage was recommended, while tiered authentication mechanisms and proactive risk-alert strategies should be implemented. Investment in non-core functions may be strategically deferred to achieve optimal resource allocation. By systematically categorizing user demand attributes and clarifying functional priorities, this study provides a robust theoretical foundation and practical decision-making framework for banks seeking to optimize AI-powered customer service systems and maximize user satisfaction in resource-constrained digital environments. ]]&gt;</content:encoded>
    <dc:title>Demand Identification and Service Optimization in AI-Powered Banking Customer Service: A Kano Model-Based Approach</dc:title>
    <dc:creator>yuchen cao</dc:creator>
    <dc:identifier>doi: 10.56578/ida040304</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-12-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-12-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>161</prism:startingPage>
    <prism:doi>10.56578/ida040304</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_3/ida040304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040303">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 3, Pages undefined: Tomato Yield Optimization Using Hybrid Nonlinear Fuzzy Modeling in Mountainous Regions</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_3/ida040303</link>
    <description>Tomato farming in Upper Dir, a mountainous region of Khyber Pakhtunkhwa in Pakistan, faces significant agro-ecological challenges such as fluctuating temperatures, irregular rainfall, soil infertility, and limited access to modern farming techniques. The region has complex topography, characterized by steep slopes and varying elevations, which further constrains agricultural planning and productivity. To address these issues, this study proposed a Hybrid Nonlinear Environmental Response Model (H-NERM) integrated with a Fuzzy Logic–Based Decision Support System (FL-DSS), to cater for the unique agro-climatic conditions in this area. The model was validated with comprehensive field and climate data collected from 2020 to 2024, including soil samples from 30 agricultural sites, 5-year meteorological records from the Pakistan Meteorological Department (PMD), and farmer-reported tomato yield across Upper Dir. All simulations were performed in Matrix Laboratory (MATLAB) R2015a using the Fuzzy Logic Toolbox and custom nonlinear solvers. Comparative analysis was conducted with conventionally regression-based and rule-based decision systems to evaluate model performance. Results demonstrated that the proposed H-NERM + FL-DSS framework significantly enhanced accuracy of yield prediction, optimized irrigation efficiency, and improved resilience to climate variability. The model provides a robust, data-driven, and scalable solution for sustainable tomato farming in Upper Dir, with strong potential for application in other mountainous or climate-sensitive agricultural regions.</description>
    <pubDate>09-04-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Tomato farming in Upper Dir, a mountainous region of Khyber Pakhtunkhwa in Pakistan, faces significant agro-ecological challenges such as fluctuating temperatures, irregular rainfall, soil infertility, and limited access to modern farming techniques. The region has complex topography, characterized by steep slopes and varying elevations, which further constrains agricultural planning and productivity. To address these issues, this study proposed a Hybrid Nonlinear Environmental Response Model (H-NERM) integrated with a Fuzzy Logic–Based Decision Support System (FL-DSS), to cater for the unique agro-climatic conditions in this area. The model was validated with comprehensive field and climate data collected from 2020 to 2024, including soil samples from 30 agricultural sites, 5-year meteorological records from the Pakistan Meteorological Department (PMD), and farmer-reported tomato yield across Upper Dir. All simulations were performed in Matrix Laboratory (MATLAB) R2015a using the Fuzzy Logic Toolbox and custom nonlinear solvers. Comparative analysis was conducted with conventionally regression-based and rule-based decision systems to evaluate model performance. Results demonstrated that the proposed H-NERM + FL-DSS framework significantly enhanced accuracy of yield prediction, optimized irrigation efficiency, and improved resilience to climate variability. The model provides a robust, data-driven, and scalable solution for sustainable tomato farming in Upper Dir, with strong potential for application in other mountainous or climate-sensitive agricultural regions. ]]&gt;</content:encoded>
    <dc:title>Tomato Yield Optimization Using Hybrid Nonlinear Fuzzy Modeling in Mountainous Regions</dc:title>
    <dc:creator>atta ullah</dc:creator>
    <dc:identifier>doi: 10.56578/ida040303</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-04-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-04-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>148</prism:startingPage>
    <prism:doi>10.56578/ida040303</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_3/ida040303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040302">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 3, Pages undefined: Application of Deep Learning Techniques in the Diagnosis and Grading of Knee Osteoarthritis (OA)</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_3/ida040302</link>
    <description>Osteoarthritis (OA) affects approximately 240 million individuals globally. Knee osteoarthritis, a crippling ailment marked by joint stiffness, discomfort, and functional impairment, is particularly the most widespread kind of arthritis among the elderly. To assess the severity of this disease, physical symptoms, medical history, and further joint screening examinations including radiography, Magnetic Resonance Imaging (MRI), and Computed Tomography (CT) scans have frequently been considered. It is difficult to identify early development of this disease as conventional diagnostic methods could be subjective. Therefore, doctors utilize the Kellgren and Lawrence (KL) scale to evaluate the severity of knee OA with visual images obtained from X-ray or MRI. The detection and prediction of the severity of knee OA indeed requires a novel model that uses deep learning models, including Inception and Xception. Utilizing the KL grading scale, the model, including Xception, ResNet-50, and Inception-ResNet-v2 could determine the degree of knee OA suffered by patients. The experimental results revealed that the Xception network achieved the highest classification accuracy of 67%, surpassing ResNet-50 and Inception-ResNet-v2, demonstrating its superior ability to automatically grade OA severity from radiographic images.</description>
    <pubDate>08-25-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Osteoarthritis (OA) affects approximately 240 million individuals globally. Knee osteoarthritis, a crippling ailment marked by joint stiffness, discomfort, and functional impairment, is particularly the most widespread kind of arthritis among the elderly. To assess the severity of this disease, physical symptoms, medical history, and further joint screening examinations including radiography, Magnetic Resonance Imaging (MRI), and Computed Tomography (CT) scans have frequently been considered. It is difficult to identify early development of this disease as conventional diagnostic methods could be subjective. Therefore, doctors utilize the Kellgren and Lawrence (KL) scale to evaluate the severity of knee OA with visual images obtained from X-ray or MRI. The detection and prediction of the severity of knee OA indeed requires a novel model that uses deep learning models, including Inception and Xception. Utilizing the KL grading scale, the model, including Xception, ResNet-50, and Inception-ResNet-v2 could determine the degree of knee OA suffered by patients. The experimental results revealed that the Xception network achieved the highest classification accuracy of 67%, surpassing ResNet-50 and Inception-ResNet-v2, demonstrating its superior ability to automatically grade OA severity from radiographic images.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Application of Deep Learning Techniques in the Diagnosis and Grading of Knee Osteoarthritis (OA)</dc:title>
    <dc:creator>varshita yeddula</dc:creator>
    <dc:creator>ranganadha reddy aluru</dc:creator>
    <dc:creator>parvathi devi budda</dc:creator>
    <dc:identifier>doi: 10.56578/ida040302</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>08-25-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>08-25-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>139</prism:startingPage>
    <prism:doi>10.56578/ida040302</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_3/ida040302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_3/ida040301">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 3, Pages undefined: Data Privacy and Security in the Age of Big Data Techniques for Ensuring Confidentiality in Large Scale Analytics</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_3/ida040301</link>
    <description>Dealing with privacy and security becomes more complicated nowadays with the emergence of big data era. Privacy, data value, and system efficiency should be managed using multiple solutions in current analytics. In this paper, privacy-preserving techniques were selected and reviewed for big data analysis to reduce threats imposed on healthcare data. Various security solutions, including k-anonymity, differential privacy, homomorphic encryption, and secure multi-party computation (SMPC), were programmed and examined using the Medical Information Mart for Intensive Care III (MIMIC-III) healthcare dataset. Assessments were conducted cautiously for each method of data collection in respect of security, time required, capacity of handling large data sets, usefulness of the data, and compliance with regulations. By using differential privacy, it was possible to maintain a balance between privacy and utility by allocating additional resources to the program. The security of data was facilitated by homomorphic encryption though it was not easy to operate and reduce the speed of computer systems. Moreover, achieving scalability in the SMPC required a significant amount of computing power. Although k-anonymity enhanced data utility, it was vulnerable to certain types of attacks. Protecting privacy in big data would limit the performance of systems; for multiple copies of data, scientists can now utilize analytics, differential privacy, and the SMPC, which is highly effective for analyzing private data. However, such approaches should be further optimized to handle real-time processing in big data applications. Experimental evaluation showed that processing 10,000 patient records using differential privacy took an average of 2.3 seconds per query and retained 92% of data utility, while homomorphic encryption required 15.7 seconds per query with 88% utility retention. The SMPC achieved a high degree of privacy with 12.5 seconds per query but slightly reduced scalability. As recommended in this study, the implementation of privacy-focused solutions in big data could help researchers and companies establish appropriate privacy policies in healthcare and other similar areas.</description>
    <pubDate>08-14-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Dealing with privacy and security becomes more complicated nowadays with the emergence of big data era. Privacy, data value, and system efficiency should be managed using multiple solutions in current analytics. In this paper, privacy-preserving techniques were selected and reviewed for big data analysis to reduce threats imposed on healthcare data. Various security solutions, including k-anonymity, differential privacy, homomorphic encryption, and secure multi-party computation (SMPC), were programmed and examined using the Medical Information Mart for Intensive Care III (MIMIC-III) healthcare dataset. Assessments were conducted cautiously for each method of data collection in respect of security, time required, capacity of handling large data sets, usefulness of the data, and compliance with regulations. By using differential privacy, it was possible to maintain a balance between privacy and utility by allocating additional resources to the program. The security of data was facilitated by homomorphic encryption though it was not easy to operate and reduce the speed of computer systems. Moreover, achieving scalability in the SMPC required a significant amount of computing power. Although k-anonymity enhanced data utility, it was vulnerable to certain types of attacks. Protecting privacy in big data would limit the performance of systems; for multiple copies of data, scientists can now utilize analytics, differential privacy, and the SMPC, which is highly effective for analyzing private data. However, such approaches should be further optimized to handle real-time processing in big data applications. Experimental evaluation showed that processing 10,000 patient records using differential privacy took an average of 2.3 seconds per query and retained 92% of data utility, while homomorphic encryption required 15.7 seconds per query with 88% utility retention. The SMPC achieved a high degree of privacy with 12.5 seconds per query but slightly reduced scalability. As recommended in this study, the implementation of privacy-focused solutions in big data could help researchers and companies establish appropriate privacy policies in healthcare and other similar areas.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Data Privacy and Security in the Age of Big Data Techniques for Ensuring Confidentiality in Large Scale Analytics</dc:title>
    <dc:creator>anil kumar pallikonda</dc:creator>
    <dc:creator>vinay kumar bandarapalli</dc:creator>
    <dc:creator>vipparla aruna</dc:creator>
    <dc:identifier>doi: 10.56578/ida040301</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>08-14-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>08-14-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>127</prism:startingPage>
    <prism:doi>10.56578/ida040301</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_3/ida040301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040205">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 2, Pages undefined: A Computational Framework for Apple Detection Using Fuzzy Logic and Structural Cues</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_2/ida040205</link>
    <description>Accurate and reliable detection of apples in complex orchard environments remains a challenging task due to varying illumination, cluttering backgrounds, and overlapping fruits. In this paper, the difficulties were tackled with a novel edge-enhanced detection framework proposed to integrate dynamic image smoothing, entropy-based edge amplification, and directional energy-driven contour extraction. An adaptive smoothing filter was adopted with a sigmoid-based weighting function to selectively preserve edge structures while suppressing noise in homogeneous regions. The input of Red Green Blue (RGB) image was subsequently transformed into the Hue, Saturation, and Value (HSV) color space to exploit hue information, thereby improving color-based feature discrimination. The introduction of a hybrid entropy-weighted gradient scheme helped strengthen edge detection, that is, the local image entropy modulated gradient magnitudes to emphasize structured regions. A global threshold was then applied to refine the enhanced edge map. Ultimately, continuous apple contours were extracted using a direction-constrained energy propagation approach, in which connected edge pixels were traced according to compass orientations, thus ensuring accurate contour assembly even under occlusion or low contrast. Experimental evaluations confirmed that the proposed framework substantially improved the accuracy of boundary detection across diverse imaging conditions; its potential application in automated fruit detection and precision harvesting was therefore highlighted.</description>
    <pubDate>06-29-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate and reliable detection of apples in complex orchard environments remains a challenging task due to varying illumination, cluttering backgrounds, and overlapping fruits. In this paper, the difficulties were tackled with a novel edge-enhanced detection framework proposed to integrate dynamic image smoothing, entropy-based edge amplification, and directional energy-driven contour extraction. An adaptive smoothing filter was adopted with a sigmoid-based weighting function to selectively preserve edge structures while suppressing noise in homogeneous regions. The input of Red Green Blue (RGB) image was subsequently transformed into the Hue, Saturation, and Value (HSV) color space to exploit hue information, thereby improving color-based feature discrimination. The introduction of a hybrid entropy-weighted gradient scheme helped strengthen edge detection, that is, the local image entropy modulated gradient magnitudes to emphasize structured regions. A global threshold was then applied to refine the enhanced edge map. Ultimately, continuous apple contours were extracted using a direction-constrained energy propagation approach, in which connected edge pixels were traced according to compass orientations, thus ensuring accurate contour assembly even under occlusion or low contrast. Experimental evaluations confirmed that the proposed framework substantially improved the accuracy of boundary detection across diverse imaging conditions; its potential application in automated fruit detection and precision harvesting was therefore highlighted. ]]&gt;</content:encoded>
    <dc:title>A Computational Framework for Apple Detection Using Fuzzy Logic and Structural Cues</dc:title>
    <dc:creator>kai siong yow</dc:creator>
    <dc:identifier>doi: 10.56578/ida040205</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-29-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-29-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>115</prism:startingPage>
    <prism:doi>10.56578/ida040205</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_2/ida040205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040204">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 2, Pages undefined: Novel Performance-Based Hyperparameter Optimization with the Use of Bounding Box Tuner (BBT)</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_2/ida040204</link>
    <description>Hyperparameter search was found not making good use of compute resources as surrogate-based optimizers consume extensive memory and demand long set-up time. Meanwhile, projects running with fixed budgets require lean tuning tools. The current study presents Bounding Box Tuner (BBT) and conducts tests of its capability to attain maximum validation accuracy while reducing tuning time and memory use. The project team compared BBT with Random Search, Gaussian Processes for Bayesian Optimization, Tree-Structured Parzen Estimator (TPE), Evolutionary Search and Local Search to decide on the optimum option. Modified National Institute of Standards and Technology (MNIST) classification with a multilayer perceptron (0.11 M weights) and Tiny Vision Transformer (TinyViT) (9.5 M weights) were adopted. Each optimizer was assigned to run 50 trials. During the trial, early pruning stopped a run if validation loss rose for four epochs. All tests applied one NVIDIA GTX 1650 Ti GPU; the key metrics for measurement included best validation accuracy, total search time, and time per trial. As regards the perceptron task, BBT reached 97.88% validation accuracy in 1994 s whereas TPE obtained 97.98% in 2976 s. Concerning TinyViT, BBT achieved 94.92% in 2364 s, and GP-Bayesian gained 94.66% in 2191 s. It was discovered that BBT kept accuracy within 0.1 percentage points of the best competitor and reduced tuning time by one-third. The algorithm renders the surrogate model unnecessary, enforces constraints by design and exposes solely three user parameters. Supported by the evidence of these benefits, BBT was considered to be a practical option for rapid and resource-aware hyperparameter optimization in deep-learning pipelines.</description>
    <pubDate>06-29-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Hyperparameter search was found not making good use of compute resources as surrogate-based optimizers consume extensive memory and demand long set-up time. Meanwhile, projects running with fixed budgets require lean tuning tools. The current study presents Bounding Box Tuner (BBT) and conducts tests of its capability to attain maximum validation accuracy while reducing tuning time and memory use. The project team compared BBT with Random Search, Gaussian Processes for Bayesian Optimization, Tree-Structured Parzen Estimator (TPE), Evolutionary Search and Local Search to decide on the optimum option. Modified National Institute of Standards and Technology (MNIST) classification with a multilayer perceptron (0.11 M weights) and Tiny Vision Transformer (TinyViT) (9.5 M weights) were adopted. Each optimizer was assigned to run 50 trials. During the trial, early pruning stopped a run if validation loss rose for four epochs. All tests applied one NVIDIA GTX 1650 Ti GPU; the key metrics for measurement included best validation accuracy, total search time, and time per trial. As regards the perceptron task, BBT reached 97.88% validation accuracy in 1994 s whereas TPE obtained 97.98% in 2976 s. Concerning TinyViT, BBT achieved 94.92% in 2364 s, and GP-Bayesian gained 94.66% in 2191 s. It was discovered that BBT kept accuracy within 0.1 percentage points of the best competitor and reduced tuning time by one-third. The algorithm renders the surrogate model unnecessary, enforces constraints by design and exposes solely three user parameters. Supported by the evidence of these benefits, BBT was considered to be a practical option for rapid and resource-aware hyperparameter optimization in deep-learning pipelines.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Novel Performance-Based Hyperparameter Optimization with the Use of Bounding Box Tuner (BBT)</dc:title>
    <dc:creator>abdulvahap mutlu</dc:creator>
    <dc:creator>şengül doğan</dc:creator>
    <dc:creator>türker tuncer</dc:creator>
    <dc:identifier>doi: 10.56578/ida040204</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-29-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-29-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>95</prism:startingPage>
    <prism:doi>10.56578/ida040204</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_2/ida040204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040203">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 2, Pages undefined: Enhancing Electoral Integrity and Accessibility: A Blockchain and Facial Recognition-Based Electronic Voting System</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_2/ida040203</link>
    <description>A novel electronic voting system (EVS) was developed by integrating blockchain technology and advanced facial recognition to enhance electoral security, transparency, and accessibility. The system integrates a public, permissionless blockchain—specifically the Ethereum platform—to ensure end-to-end transparency and immutability throughout the voting lifecycle. To reinforce identity verification while preserving voter privacy, a facial recognition technology based on the ArcFace algorithm was employed. This biometric approach enables secure, contactless voter authentication, mitigating risks associated with identity fraud and multiple voting attempts. The confluence of blockchain technology and facial recognition in a unified architecture was shown to improve system robustness against tampering, data breaches, and unauthorized access. The proposed system was designed within a rigorous research framework, and its technical implementation was critically assessed in terms of security performance, scalability, user accessibility, and system latency. Furthermore, potential ethical implications and privacy considerations were addressed through the use of decentralized identity management and encrypted biometric data storage. The integration strategy not only enhances the verifiability and auditability of election outcomes but also promotes greater inclusivity by enabling remote participation without compromising system integrity. This study contributes to the evolving field of electronic voting by demonstrating how advanced biometric verification and distributed ledger technologies can be synchronously leveraged to support democratic processes. The findings are expected to inform future deployments of secure, accessible, and transparent electoral platforms, offering practical insights for governments, policymakers, and technology developers aiming to modernize electoral systems in a post-digital era.</description>
    <pubDate>05-22-2025</pubDate>
    <content:encoded>&lt;![CDATA[ A novel electronic voting system (EVS) was developed by integrating blockchain technology and advanced facial recognition to enhance electoral security, transparency, and accessibility. The system integrates a public, permissionless blockchain—specifically the Ethereum platform—to ensure end-to-end transparency and immutability throughout the voting lifecycle. To reinforce identity verification while preserving voter privacy, a facial recognition technology based on the ArcFace algorithm was employed. This biometric approach enables secure, contactless voter authentication, mitigating risks associated with identity fraud and multiple voting attempts. The confluence of blockchain technology and facial recognition in a unified architecture was shown to improve system robustness against tampering, data breaches, and unauthorized access. The proposed system was designed within a rigorous research framework, and its technical implementation was critically assessed in terms of security performance, scalability, user accessibility, and system latency. Furthermore, potential ethical implications and privacy considerations were addressed through the use of decentralized identity management and encrypted biometric data storage. The integration strategy not only enhances the verifiability and auditability of election outcomes but also promotes greater inclusivity by enabling remote participation without compromising system integrity. This study contributes to the evolving field of electronic voting by demonstrating how advanced biometric verification and distributed ledger technologies can be synchronously leveraged to support democratic processes. The findings are expected to inform future deployments of secure, accessible, and transparent electoral platforms, offering practical insights for governments, policymakers, and technology developers aiming to modernize electoral systems in a post-digital era. ]]&gt;</content:encoded>
    <dc:title>Enhancing Electoral Integrity and Accessibility: A Blockchain and Facial Recognition-Based Electronic Voting System</dc:title>
    <dc:creator>samit paudel</dc:creator>
    <dc:creator>ashwin poudel</dc:creator>
    <dc:creator>sanjaya paudel</dc:creator>
    <dc:identifier>doi: /10.56578/ida040203</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-22-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-22-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>85</prism:startingPage>
    <prism:doi>/10.56578/ida040203</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_2/ida040203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040202">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 2, Pages undefined: A Model for Energy Consumption Estimation in Computer Systems: The CMP Approach</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_2/ida040202</link>
    <description>To address the issue of estimating energy consumption in computer systems, this study investigates the contribution of various hardware parameters to energy fluctuations, as well as the correlation between these parameters. Based on this analysis, the CM model was proposed, selecting the most representative and monitorable parameters that reflect changes in system energy consumption. The CMP (Chip Multiprocessors) model adapts to different task states of the computer system by identifying primary components driving energy consumption under varying conditions. Energy consumption estimation was then conducted by monitoring these dominant parameters. Experiments across various task states demonstrate that the CMP model outperforms traditional FAN (Fuzzy Attack Net) and Cubic models, particularly when the computer system engages in data-intensive tasks.</description>
    <pubDate>05-14-2025</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;To address the issue of estimating energy consumption in computer systems, this study investigates the contribution of various hardware parameters to energy fluctuations, as well as the correlation between these parameters. Based on this analysis, the CM model was proposed, selecting the most representative and monitorable parameters that reflect changes in system energy consumption. The CMP (Chip Multiprocessors) model adapts to different task states of the computer system by identifying primary components driving energy consumption under varying conditions. Energy consumption estimation was then conducted by monitoring these dominant parameters. Experiments across various task states demonstrate that the CMP model outperforms traditional FAN (Fuzzy Attack Net) and Cubic models, particularly when the computer system engages in data-intensive tasks.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Model for Energy Consumption Estimation in Computer Systems: The CMP Approach</dc:title>
    <dc:creator>suping yu</dc:creator>
    <dc:creator>weijing wang</dc:creator>
    <dc:creator>weiwei mao</dc:creator>
    <dc:identifier>doi: 10.56578/ida040202</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-14-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-14-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>76</prism:startingPage>
    <prism:doi>10.56578/ida040202</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_2/ida040202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_2/ida040201">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 2, Pages undefined: Crowd Density Estimation via a VGG-16-Based CSRNet Model</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_2/ida040201</link>
    <description>Accurate crowd density estimation has become critical in applications ranging from intelligent urban planning and public safety monitoring to marketing analytics and emergency response. In recent developments, various methods have been used to enhance the precision of crowd analysis systems. In this study, a Convolutional Neural Network (CNN)-based approach was presented for crowd density detection, wherein the Congested Scene Recognition Network (CSRNet) architecture was employed with a Visual Geometry Group (VGG)-16 backbone. This method was applied to two benchmark datasets—Mall and Crowd-UIT—to assess its effectiveness in real-world crowd scenarios. Density maps were generated to visualize spatial distributions, and performance was quantitatively evaluated using Mean Squared Error (MSE) and Mean Absolute Error (MAE) metrics. For the Mall dataset, the model achieved an MSE of 0.08 and an MAE of 0.10, while for the Crowd-UIT dataset, an MSE of 0.05 and an MAE of 0.15 were obtained. These results suggest that the proposed VGG-16-based CSRNet model yields high accuracy in crowd estimation tasks across varied environments and crowd densities. Additionally, the model demonstrates robustness in generalizing across different dataset characteristics, indicating its potential applicability in both surveillance systems and public space management. The outcomes of this investigation offer a promising direction for future research in data-driven crowd analysis, particularly in enhancing predictive reliability and real-time deployment capabilities of deep learning models for population monitoring tasks.</description>
    <pubDate>04-29-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate crowd density estimation has become critical in applications ranging from intelligent urban planning and public safety monitoring to marketing analytics and emergency response. In recent developments, various methods have been used to enhance the precision of crowd analysis systems. In this study, a Convolutional Neural Network (CNN)-based approach was presented for crowd density detection, wherein the Congested Scene Recognition Network (CSRNet) architecture was employed with a Visual Geometry Group (VGG)-16 backbone. This method was applied to two benchmark datasets—Mall and Crowd-UIT—to assess its effectiveness in real-world crowd scenarios. Density maps were generated to visualize spatial distributions, and performance was quantitatively evaluated using Mean Squared Error (MSE) and Mean Absolute Error (MAE) metrics. For the Mall dataset, the model achieved an MSE of 0.08 and an MAE of 0.10, while for the Crowd-UIT dataset, an MSE of 0.05 and an MAE of 0.15 were obtained. These results suggest that the proposed VGG-16-based CSRNet model yields high accuracy in crowd estimation tasks across varied environments and crowd densities. Additionally, the model demonstrates robustness in generalizing across different dataset characteristics, indicating its potential applicability in both surveillance systems and public space management. The outcomes of this investigation offer a promising direction for future research in data-driven crowd analysis, particularly in enhancing predictive reliability and real-time deployment capabilities of deep learning models for population monitoring tasks. ]]&gt;</content:encoded>
    <dc:title>Crowd Density Estimation via a VGG-16-Based CSRNet Model</dc:title>
    <dc:creator>damla tatlıcan</dc:creator>
    <dc:creator>nafiye nur apaydin</dc:creator>
    <dc:creator>orhan yaman</dc:creator>
    <dc:creator>mehmet karakose</dc:creator>
    <dc:identifier>doi: 10.56578/ida040201</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>04-29-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>04-29-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>66</prism:startingPage>
    <prism:doi>10.56578/ida040201</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_2/ida040201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040105">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 1, Pages undefined: Enhancing Image Segmentation Using Generalized Convex Fuzzy Sets and Statistical Consistency</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_1/ida040105</link>
    <description>The accurate segmentation of visual data into semantically meaningful regions remains a critical task across diverse domains, including medical diagnostics, satellite imagery interpretation, and automated inspection systems, where precise object delineation is essential for subsequent analysis and decision-making. Conventional segmentation techniques often suffer from limitations such as sensitivity to noise, intensity inhomogeneity, and weak boundary definition, resulting in reduced performance under complex imaging conditions. Although fuzzy set-based approaches have been proposed to improve adaptability under uncertainty, they frequently fail to maintain a balance between segmentation precision and robustness. To address these challenges, a novel segmentation framework was developed based on Pythagorean Fuzzy Sets (PyFSs) and local averaging, offering enhanced performance in uncertain and heterogeneous visual environments. By incorporating both membership and non-membership degrees, PyFSs allow a more flexible representation of uncertainty compared to classical fuzzy models. A local average intensity function was introduced, wherein the contribution of each pixel was adaptively weighted according to its PyFS membership degree, improving resistance to local intensity variations. An energy functional was formulated by integrating PyFS-driven intensity constraints, local statistical deviation measures, and regularization terms, ensuring precise boundary localization through level set evolution. Convexity of the energy formulation was analytically demonstrated to guarantee the stability of the optimization process. Experimental evaluations revealed that the proposed method consistently outperforms existing fuzzy and non-fuzzy segmentation algorithms, achieving superior accuracy in applications such as medical image analysis and natural scene segmentation. These results underscore the potential of PyFS-based models as a powerful and generalizable solution for uncertainty-resilient image segmentation in real-world applications.</description>
    <pubDate>03-30-2025</pubDate>
    <content:encoded>&lt;![CDATA[ The accurate segmentation of visual data into semantically meaningful regions remains a critical task across diverse domains, including medical diagnostics, satellite imagery interpretation, and automated inspection systems, where precise object delineation is essential for subsequent analysis and decision-making. Conventional segmentation techniques often suffer from limitations such as sensitivity to noise, intensity inhomogeneity, and weak boundary definition, resulting in reduced performance under complex imaging conditions. Although fuzzy set-based approaches have been proposed to improve adaptability under uncertainty, they frequently fail to maintain a balance between segmentation precision and robustness. To address these challenges, a novel segmentation framework was developed based on Pythagorean Fuzzy Sets (PyFSs) and local averaging, offering enhanced performance in uncertain and heterogeneous visual environments. By incorporating both membership and non-membership degrees, PyFSs allow a more flexible representation of uncertainty compared to classical fuzzy models. A local average intensity function was introduced, wherein the contribution of each pixel was adaptively weighted according to its PyFS membership degree, improving resistance to local intensity variations. An energy functional was formulated by integrating PyFS-driven intensity constraints, local statistical deviation measures, and regularization terms, ensuring precise boundary localization through level set evolution. Convexity of the energy formulation was analytically demonstrated to guarantee the stability of the optimization process. Experimental evaluations revealed that the proposed method consistently outperforms existing fuzzy and non-fuzzy segmentation algorithms, achieving superior accuracy in applications such as medical image analysis and natural scene segmentation. These results underscore the potential of PyFS-based models as a powerful and generalizable solution for uncertainty-resilient image segmentation in real-world applications. ]]&gt;</content:encoded>
    <dc:title>Enhancing Image Segmentation Using Generalized Convex Fuzzy Sets and Statistical Consistency</dc:title>
    <dc:creator>zakir husain</dc:creator>
    <dc:identifier>doi: 10.56578/ida040105</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>53</prism:startingPage>
    <prism:doi>10.56578/ida040105</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_1/ida040105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040104">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 1, Pages undefined: RTCNet: A Robust Hybrid Deep Learning Model for Soil Property Prediction Under Noisy Conditions</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_1/ida040104</link>
    <description>Accurate prediction of soil fertility and soil organic carbon (SOC) plays a critical role in precision agriculture and sustainable soil management. However, the high spatial-temporal variability inherent in soil properties, compounded by the prevalence of noisy data in real-world conditions, continues to pose significant modeling challenges. To address these issues, a robust hybrid deep learning model, termed RTCNet, was developed by integrating Recurrent Neural Networks (RNNs), Transformer architectures, and Convolutional Neural Networks (CNNs) into a unified predictive framework. Within RTCNet, a one-dimensional convolutional layer was employed for initial feature extraction, followed by MaxPooling for dimensionality reduction, while sequential dependencies were captured using RNN layers. A multi-head attention mechanism was embedded to enhance the representation of inter-variable relationships, thereby improving the model’s ability to handle complex soil data patterns. RTCNet was benchmarked against two conventional models—Artificial Neural Network (ANN) optimized with a Genetic Algorithm (GA), and a Transformer-CNN hybrid model. Under noise-free conditions, RTCNet achieved the lowest Mean Squared Error (MSE) of 0.1032 and Mean Absolute Error (MAE) of 0.1852. Notably, under increasing noise levels, RTCNet consistently maintained stable performance, whereas the comparative models exhibited significant performance degradation. These findings underscore RTCNet’s superior resilience and adaptability, affirming its utility in field-scale agricultural applications where sensor noise, data sparsity, and environmental fluctuations are prevalent. The demonstrated robustness and predictive accuracy of RTCNet position it as a valuable tool for optimizing nutrient management strategies, enhancing SOC monitoring, and supporting informed decision-making in sustainable farming systems.</description>
    <pubDate>03-30-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate prediction of soil fertility and soil organic carbon (SOC) plays a critical role in precision agriculture and sustainable soil management. However, the high spatial-temporal variability inherent in soil properties, compounded by the prevalence of noisy data in real-world conditions, continues to pose significant modeling challenges. To address these issues, a robust hybrid deep learning model, termed RTCNet, was developed by integrating Recurrent Neural Networks (RNNs), Transformer architectures, and Convolutional Neural Networks (CNNs) into a unified predictive framework. Within RTCNet, a one-dimensional convolutional layer was employed for initial feature extraction, followed by MaxPooling for dimensionality reduction, while sequential dependencies were captured using RNN layers. A multi-head attention mechanism was embedded to enhance the representation of inter-variable relationships, thereby improving the model’s ability to handle complex soil data patterns. RTCNet was benchmarked against two conventional models—Artificial Neural Network (ANN) optimized with a Genetic Algorithm (GA), and a Transformer-CNN hybrid model. Under noise-free conditions, RTCNet achieved the lowest Mean Squared Error (MSE) of 0.1032 and Mean Absolute Error (MAE) of 0.1852. Notably, under increasing noise levels, RTCNet consistently maintained stable performance, whereas the comparative models exhibited significant performance degradation. These findings underscore RTCNet’s superior resilience and adaptability, affirming its utility in field-scale agricultural applications where sensor noise, data sparsity, and environmental fluctuations are prevalent. The demonstrated robustness and predictive accuracy of RTCNet position it as a valuable tool for optimizing nutrient management strategies, enhancing SOC monitoring, and supporting informed decision-making in sustainable farming systems. ]]&gt;</content:encoded>
    <dc:title>RTCNet: A Robust Hybrid Deep Learning Model for Soil Property Prediction Under Noisy Conditions</dc:title>
    <dc:creator>pape el hadji abdoulaye gueye</dc:creator>
    <dc:creator>cherif bachir deme</dc:creator>
    <dc:creator>adrien basse</dc:creator>
    <dc:identifier>doi: 10.56578/ida040104</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>36</prism:startingPage>
    <prism:doi>10.56578/ida040104</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_1/ida040104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040103">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 1, Pages undefined: A Multi-Stage Fuzzy-Framework Method for Precise Blurred Image Restoration</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_1/ida040103</link>
    <description>Enhancing the sharpness of blurred images continues to be a critical and persistent issue in the domain of image restoration and processing, requiring precise techniques to recover lost details and enhance visual clarity. This study proposes a novel model combines the strengths of fuzzy systems with mathematical transformations to address the complexities of blurred image restoration. The model operates through a multi-stage framework, beginning with pixel coordinate transformations and corrections to account for geometric distortions caused by blurring. Fuzzy logic is employed to handle uncertainties in blur estimation, utilizing membership functions to categorize blur levels and a rule-based system to dynamically adapt corrective actions. The fusion of fuzzy logic and mathematical transformations ensures localized and adaptive corrections, effectively restoring sharpness in blurred regions while the preservation of regions with minimal distortion. Additionally, fuzzy edge enhancement is introduced to emphasize edges and suppress noise, further improving image quality. The final restoration process includes normalization and structural constraints to ensure the output aligns with the original unblurred image. Experimental results showcase the performance and reliability of the developed framework to restore clarity, preserve fine details, and minimize artifacts, making it a robust solution for diverse blurring scenarios. The proposed approach offers a significant advancement in blurred image restoration, combining the adaptability of fuzzy logic with the precision of mathematical computations to achieve superior results.</description>
    <pubDate>03-05-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Enhancing the sharpness of blurred images continues to be a critical and persistent issue in the domain of image restoration and processing, requiring precise techniques to recover lost details and enhance visual clarity. This study proposes a novel model combines the strengths of fuzzy systems with mathematical transformations to address the complexities of blurred image restoration. The model operates through a multi-stage framework, beginning with pixel coordinate transformations and corrections to account for geometric distortions caused by blurring. Fuzzy logic is employed to handle uncertainties in blur estimation, utilizing membership functions to categorize blur levels and a rule-based system to dynamically adapt corrective actions. The fusion of fuzzy logic and mathematical transformations ensures localized and adaptive corrections, effectively restoring sharpness in blurred regions while the preservation of regions with minimal distortion. Additionally, fuzzy edge enhancement is introduced to emphasize edges and suppress noise, further improving image quality. The final restoration process includes normalization and structural constraints to ensure the output aligns with the original unblurred image. Experimental results showcase the performance and reliability of the developed framework to restore clarity, preserve fine details, and minimize artifacts, making it a robust solution for diverse blurring scenarios. The proposed approach offers a significant advancement in blurred image restoration, combining the adaptability of fuzzy logic with the precision of mathematical computations to achieve superior results. ]]&gt;</content:encoded>
    <dc:title>A Multi-Stage Fuzzy-Framework Method for Precise Blurred Image Restoration</dc:title>
    <dc:creator>muhammad zeeshan naeem</dc:creator>
    <dc:identifier>doi: 10.56578/ida040103</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-05-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-05-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>23</prism:startingPage>
    <prism:doi>10.56578/ida040103</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_1/ida040103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040102">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 1, Pages undefined: Bridging Fundamental Physics and Practical Applications: Advances in Quantum-Enhanced Sensing</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_1/ida040102</link>
    <description>Quantum-enhanced sensing has emerged as a transformative technology with the potential to surpass classical sensing modalities in precision and sensitivity. This study explores the advancements and applications of quantum-enhanced sensing, emphasizing its capacity to bridge fundamental physics and practical implementations. The current progress in experimental demonstrations of quantum-enhanced sensing systems was reviewed, focusing on breakthroughs in metrology and the development of physically realizable sensor architectures. Two practical implementations of quantum-enhanced sensors based on trapped ions were proposed. The first design utilizes Ramsey interferometry with spin-squeezed atomic ensembles, employing laser-induced spin-exchange interactions to reconstruct the sensing Hamiltonian. This approach enables measurement rates to scale with the number of sensing atoms, achieving sensitivity enhancements beyond the standard quantum limit (SQL). The second implementation introduces mean-field interactions mediated by coupled optical cavities that share coherent atomic probes, enabling the realization of high-performance sensing systems. Both sensor systems were demonstrated to be feasible on state-of-the-art ion-trap platforms, offering promising benchmarks for future applications in metrology and imaging. Particular attention was given to the integration of quantum-enhanced sensing with complementary imaging technologies, which continues to gain traction in medical imaging and other fields. The mutual reinforcement of quantum and complementary technologies is increasingly supported by significant investments from governmental, academic, and commercial entities. The ongoing pursuit of improved measurement resolution and imaging fidelity underscores the interdependence of these innovations, advancing the transition of quantum-enhanced sensing from fundamental research to widespread practical use.</description>
    <pubDate>02-19-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Quantum-enhanced sensing has emerged as a transformative technology with the potential to surpass classical sensing modalities in precision and sensitivity. This study explores the advancements and applications of quantum-enhanced sensing, emphasizing its capacity to bridge fundamental physics and practical implementations. The current progress in experimental demonstrations of quantum-enhanced sensing systems was reviewed, focusing on breakthroughs in metrology and the development of physically realizable sensor architectures. Two practical implementations of quantum-enhanced sensors based on trapped ions were proposed. The first design utilizes Ramsey interferometry with spin-squeezed atomic ensembles, employing laser-induced spin-exchange interactions to reconstruct the sensing Hamiltonian. This approach enables measurement rates to scale with the number of sensing atoms, achieving sensitivity enhancements beyond the standard quantum limit (SQL). The second implementation introduces mean-field interactions mediated by coupled optical cavities that share coherent atomic probes, enabling the realization of high-performance sensing systems. Both sensor systems were demonstrated to be feasible on state-of-the-art ion-trap platforms, offering promising benchmarks for future applications in metrology and imaging. Particular attention was given to the integration of quantum-enhanced sensing with complementary imaging technologies, which continues to gain traction in medical imaging and other fields. The mutual reinforcement of quantum and complementary technologies is increasingly supported by significant investments from governmental, academic, and commercial entities. The ongoing pursuit of improved measurement resolution and imaging fidelity underscores the interdependence of these innovations, advancing the transition of quantum-enhanced sensing from fundamental research to widespread practical use. ]]&gt;</content:encoded>
    <dc:title>Bridging Fundamental Physics and Practical Applications: Advances in Quantum-Enhanced Sensing</dc:title>
    <dc:creator>suha mousa khorsheedi</dc:creator>
    <dc:creator>mohammed sahib mahdi altaei</dc:creator>
    <dc:identifier>doi: 10.56578/ida040102</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>02-19-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>02-19-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>12</prism:startingPage>
    <prism:doi>10.56578/ida040102</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_1/ida040102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2025_4_1/ida040101">
    <title>Information Dynamics and Applications, 2025, Volume 4, Issue 1, Pages undefined: Intelligent Road Crack Detection Using Fuzzy Logic and Multi-Scale Optimization</title>
    <link>https://www.acadlore.com/article/IDA/2025_4_1/ida040101</link>
    <description>Accurate detection of road cracks is essential for maintaining infrastructure integrity, ensuring road safety, and preventing costly structural damage. However, challenges such as varying illumination conditions, noise, irregular crack patterns, and complex background textures often hinder reliable detection. To address these issues, a novel Fuzzy-Powered Multi-Scale Optimization (FMSO) model was proposed, integrating adaptive fuzzy operators, multi-scale level set evolution, Dynamic Graph Energy Minimization (GEM), and Hybrid Swarm Optimization (HSO). The FMSO model employs multi-resolution segmentation, entropy-based fuzzy weighting, and adaptive optimization strategies to enhance detection accuracy, while adaptive fuzzy operators mitigate the impact of illumination variations. Multi-scale level set evolution refines crack boundaries with high precision, and GEM effectively separates cracks from intricate backgrounds. Furthermore, HSO dynamically optimizes segmentation parameters, ensuring improved accuracy. The model was rigorously evaluated using multiple benchmark datasets, with performance metrics including accuracy, precision, recall, and F1-score. Experimental results demonstrate that the FMSO model surpasses existing methods, achieving superior accuracy, enhanced precision, and higher recall. Notably, the model effectively reduces false positives while maintaining sensitivity to fine crack details. The integration of fuzzy logic and multi-scale optimization techniques renders the FMSO model highly adaptable to varying road conditions and imaging environments, making it a robust solution for infrastructure maintenance. This approach not only advances the field of road crack detection but also provides a scalable framework for addressing similar challenges in other domains of image analysis and pattern recognition.</description>
    <pubDate>02-09-2025</pubDate>
    <content:encoded>&lt;![CDATA[ Accurate detection of road cracks is essential for maintaining infrastructure integrity, ensuring road safety, and preventing costly structural damage. However, challenges such as varying illumination conditions, noise, irregular crack patterns, and complex background textures often hinder reliable detection. To address these issues, a novel Fuzzy-Powered Multi-Scale Optimization (FMSO) model was proposed, integrating adaptive fuzzy operators, multi-scale level set evolution, Dynamic Graph Energy Minimization (GEM), and Hybrid Swarm Optimization (HSO). The FMSO model employs multi-resolution segmentation, entropy-based fuzzy weighting, and adaptive optimization strategies to enhance detection accuracy, while adaptive fuzzy operators mitigate the impact of illumination variations. Multi-scale level set evolution refines crack boundaries with high precision, and GEM effectively separates cracks from intricate backgrounds. Furthermore, HSO dynamically optimizes segmentation parameters, ensuring improved accuracy. The model was rigorously evaluated using multiple benchmark datasets, with performance metrics including accuracy, precision, recall, and F1-score. Experimental results demonstrate that the FMSO model surpasses existing methods, achieving superior accuracy, enhanced precision, and higher recall. Notably, the model effectively reduces false positives while maintaining sensitivity to fine crack details. The integration of fuzzy logic and multi-scale optimization techniques renders the FMSO model highly adaptable to varying road conditions and imaging environments, making it a robust solution for infrastructure maintenance. This approach not only advances the field of road crack detection but also provides a scalable framework for addressing similar challenges in other domains of image analysis and pattern recognition. ]]&gt;</content:encoded>
    <dc:title>Intelligent Road Crack Detection Using Fuzzy Logic and Multi-Scale Optimization</dc:title>
    <dc:creator>rifaqat ali</dc:creator>
    <dc:identifier>doi: 10.56578/ida040101</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>02-09-2025</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>02-09-2025</prism:publicationDate>
    <prism:year>2025</prism:year>
    <prism:volume>4</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ida040101</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2025_4_1/ida040101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030405">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 4, Pages undefined: FEGAO: A Revolutionary Method for Enhancing Defective Fuzzy Images with Non-Linear Refinement</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_4/ida030405</link>
    <description>This study presents a novel image restoration method, designed to enhance defective fuzzy images, by utilizing the Fuzzy Einstein Geometric Aggregation Operator (FEGAO). The method addresses the challenges posed by non-linearity, uncertainty, and complex degradation in defective images. Traditional image enhancement approaches often struggle with the imprecision inherent in defect detection. In contrast, FEGAO employs the Einstein t-norm and t-conorm for non-linear aggregation, which refines pixel coordinates and improves the accuracy of feature extraction. The proposed approach integrates several techniques, including pixel coordinate extraction, regional intensity refinement, multi-scale Gaussian correction, and a layered enhancement framework, thereby ensuring superior preservation of details and minimization of artifacts. Experimental evaluations demonstrate that FEGAO outperforms conventional methods in terms of image resolution, edge clarity, and noise robustness, while maintaining computational efficiency. Comparative analysis further underscores the method’s ability to preserve fine details and reduce uncertainty in defective images. This work offers significant advancements in image restoration by providing an adaptive, efficient solution for defect detection, machine vision, and multimedia applications, establishing a foundation for future research in fuzzy logic-based image processing under degraded conditions.</description>
    <pubDate>12-30-2024</pubDate>
    <content:encoded>&lt;![CDATA[ This study presents a novel image restoration method, designed to enhance defective fuzzy images, by utilizing the Fuzzy Einstein Geometric Aggregation Operator (FEGAO). The method addresses the challenges posed by non-linearity, uncertainty, and complex degradation in defective images. Traditional image enhancement approaches often struggle with the imprecision inherent in defect detection. In contrast, FEGAO employs the Einstein t-norm and t-conorm for non-linear aggregation, which refines pixel coordinates and improves the accuracy of feature extraction. The proposed approach integrates several techniques, including pixel coordinate extraction, regional intensity refinement, multi-scale Gaussian correction, and a layered enhancement framework, thereby ensuring superior preservation of details and minimization of artifacts. Experimental evaluations demonstrate that FEGAO outperforms conventional methods in terms of image resolution, edge clarity, and noise robustness, while maintaining computational efficiency. Comparative analysis further underscores the method’s ability to preserve fine details and reduce uncertainty in defective images. This work offers significant advancements in image restoration by providing an adaptive, efficient solution for defect detection, machine vision, and multimedia applications, establishing a foundation for future research in fuzzy logic-based image processing under degraded conditions. ]]&gt;</content:encoded>
    <dc:title>FEGAO: A Revolutionary Method for Enhancing Defective Fuzzy Images with Non-Linear Refinement</dc:title>
    <dc:creator>ibrar hussain</dc:creator>
    <dc:identifier>doi: 10.56578/ida030405</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-30-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-30-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>258</prism:startingPage>
    <prism:doi>10.56578/ida030405</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_4/ida030405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030404">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 4, Pages undefined: Leveraging Artificial Intelligence for Blackhole Attack Detection in MANETs: A Comparative Study</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_4/ida030404</link>
    <description>Blackhole attacks represent a significant threat to the security of communication networks, particularly in emerging network architectures such as Mobile Ad Hoc Networks (MANETs). These attacks, characterized by their ability to obscure malicious behavior, evade conventional detection methods due to their loosely defined signatures and their ability to bypass traditional filtering mechanisms. This study investigates the application of machine learning techniques, specifically Support Vector Machine (SVM), Convolutional Neural Network (CNN), and Decision Tree (DT), for the detection and mitigation of blackhole attacks in MANETs. Simulations conducted in MATLAB 2023a examined network configurations with node densities of 50, 100, 250, and 500 nodes to assess the performance of these classifiers in comparison to conventional detection approaches. The results demonstrated that both SVM and CNN achieved near-perfect detection accuracy of 100% across all network configurations, outperforming traditional methods. SVM was chosen due to its efficacy in handling high-dimensional data, CNN for its ability to learn complex, nonlinear hierarchical features, and DT for its interpretability. The findings underscore the potential of these machine learning models in enhancing the precision of blackhole attack detection, thereby improving network security. Future research is recommended to explore the scalability and training efficiency of these models, particularly through the integration of advanced techniques such as model fusion and deep learning architectures. This study contributes to the growing body of literature on radar wave radio (RWR)-based and machine learning-based attack detection and highlights the potential of artificial intelligence (AI) solutions in transforming traditional emitter identification methods, offering significant improvements to network protection systems.</description>
    <pubDate>12-20-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Blackhole attacks represent a significant threat to the security of communication networks, particularly in emerging network architectures such as Mobile Ad Hoc Networks (MANETs). These attacks, characterized by their ability to obscure malicious behavior, evade conventional detection methods due to their loosely defined signatures and their ability to bypass traditional filtering mechanisms. This study investigates the application of machine learning techniques, specifically Support Vector Machine (SVM), Convolutional Neural Network (CNN), and Decision Tree (DT), for the detection and mitigation of blackhole attacks in MANETs. Simulations conducted in MATLAB 2023a examined network configurations with node densities of 50, 100, 250, and 500 nodes to assess the performance of these classifiers in comparison to conventional detection approaches. The results demonstrated that both SVM and CNN achieved near-perfect detection accuracy of 100% across all network configurations, outperforming traditional methods. SVM was chosen due to its efficacy in handling high-dimensional data, CNN for its ability to learn complex, nonlinear hierarchical features, and DT for its interpretability. The findings underscore the potential of these machine learning models in enhancing the precision of blackhole attack detection, thereby improving network security. Future research is recommended to explore the scalability and training efficiency of these models, particularly through the integration of advanced techniques such as model fusion and deep learning architectures. This study contributes to the growing body of literature on radar wave radio (RWR)-based and machine learning-based attack detection and highlights the potential of artificial intelligence (AI) solutions in transforming traditional emitter identification methods, offering significant improvements to network protection systems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Leveraging Artificial Intelligence for Blackhole Attack Detection in MANETs: A Comparative Study</dc:title>
    <dc:creator>zainab bashar ibrahim</dc:creator>
    <dc:creator>mayada faris ghanim</dc:creator>
    <dc:identifier>doi: 10.56578/ida030404</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-20-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-20-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>245</prism:startingPage>
    <prism:doi>10.56578/ida030404</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_4/ida030404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030403">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 4, Pages undefined: An Integrated BERT-XGBoost Framework for Open-Source Intelligence Classification in Aerospace Technology</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_4/ida030403</link>
    <description>Open-source intelligence in aerospace technology often contains lengthy text and numerous technical terms, which can affect classification accuracy. To enhance the precision of classifying such intelligence, a classification algorithm integrating the Bidirectional Encoder Representations from Transformers (BERT) and Extreme Gradient Boosting (XGBoost) models was proposed. Initially, key features within the intelligence were extracted through the deep structure of the BERT model. Subsequently, the XGBoost model was utilised to replace the final output layer of BERT, applying the extracted features for classification. To verify the algorithm's effectiveness, comparative experiments were conducted against prominent language models such as Text Recurrent Convolutional Neural Network (TextRCNN) and Deep Pyramid Convolutional Neural Network (DPCNN). Experimental results demonstrate that, for open-source intelligence classification in aerospace technology, this algorithm achieved accuracy improvements of 1.9% and 2.2% over the TextRCNN and DPCNN models, respectively, confirming the algorithm's efficacy in relevant classification tasks.</description>
    <pubDate>12-12-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Open-source intelligence in aerospace technology often contains lengthy text and numerous technical terms, which can affect classification accuracy. To enhance the precision of classifying such intelligence, a classification algorithm integrating the Bidirectional Encoder Representations from Transformers (BERT) and Extreme Gradient Boosting (XGBoost) models was proposed. Initially, key features within the intelligence were extracted through the deep structure of the BERT model. Subsequently, the XGBoost model was utilised to replace the final output layer of BERT, applying the extracted features for classification. To verify the algorithm's effectiveness, comparative experiments were conducted against prominent language models such as Text Recurrent Convolutional Neural Network (TextRCNN) and Deep Pyramid Convolutional Neural Network (DPCNN). Experimental results demonstrate that, for open-source intelligence classification in aerospace technology, this algorithm achieved accuracy improvements of 1.9% and 2.2% over the TextRCNN and DPCNN models, respectively, confirming the algorithm's efficacy in relevant classification tasks.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>An Integrated BERT-XGBoost Framework for Open-Source Intelligence Classification in Aerospace Technology</dc:title>
    <dc:creator>suping yu</dc:creator>
    <dc:creator>weiwei mao</dc:creator>
    <dc:identifier>doi: 10.56578/ida030403</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-12-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-12-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>234</prism:startingPage>
    <prism:doi>10.56578/ida030403</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_4/ida030403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030402">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 4, Pages undefined: Extraction of Judgment Elements from Legal Instruments Using an Attention Mechanism-Based RCNN Fusion Model</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_4/ida030402</link>
    <description>In the field of jurisprudence, judgment element extraction has become a crucial aspect of legal judgment prediction research. The introduction of pre-trained language models has provided significant momentum for the advancement of Natural Language Processing (NLP) technologies, with the Bidirectional Encoder Representations from Transformers (BERT) model being particularly notable for its ability to enhance semantic understanding in unsupervised learning. A fusion model combining BERT and an attention mechanism-based Recurrent Convolutional Neural Network (RCNN) was utilized in this study for multi-label classification tasks, aiming to further extract contextual features from legal texts. The dataset used in this research was derived from the "China Legal Research Cup" judgment element extraction competition, which includes three types of cases (divorce, labor, and lending disputes), with each case type divided into 20 label categories. Four comparative experiments were conducted to investigate the optimization of the model by placing the attention mechanism at different positions. At the same time, previous models were learned and studied and their advantages were analyzed. The results obtained from replicating and optimizing those previous models demonstrate promising legal instrument classification performance.</description>
    <pubDate>12-03-2024</pubDate>
    <content:encoded>&lt;![CDATA[ In the field of jurisprudence, judgment element extraction has become a crucial aspect of legal judgment prediction research. The introduction of pre-trained language models has provided significant momentum for the advancement of Natural Language Processing (NLP) technologies, with the Bidirectional Encoder Representations from Transformers (BERT) model being particularly notable for its ability to enhance semantic understanding in unsupervised learning. A fusion model combining BERT and an attention mechanism-based Recurrent Convolutional Neural Network (RCNN) was utilized in this study for multi-label classification tasks, aiming to further extract contextual features from legal texts. The dataset used in this research was derived from the "China Legal Research Cup" judgment element extraction competition, which includes three types of cases (divorce, labor, and lending disputes), with each case type divided into 20 label categories. Four comparative experiments were conducted to investigate the optimization of the model by placing the attention mechanism at different positions. At the same time, previous models were learned and studied and their advantages were analyzed. The results obtained from replicating and optimizing those previous models demonstrate promising legal instrument classification performance. ]]&gt;</content:encoded>
    <dc:title>Extraction of Judgment Elements from Legal Instruments Using an Attention Mechanism-Based RCNN Fusion Model</dc:title>
    <dc:creator>jin ren</dc:creator>
    <dc:identifier>doi: 10.56578/ida030402</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-03-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-03-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>223</prism:startingPage>
    <prism:doi>10.56578/ida030402</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_4/ida030402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_4/ida030401">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 4, Pages undefined: Enhanced Method for Monitoring Internet Abnormal Traffic Based on the Improved BiLSTM Network Algorithm</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_4/ida030401</link>
    <description>The complexity and variability of Internet traffic data present significant challenges in feature extraction and selection, often resulting in ineffective abnormal traffic monitoring. To address these challenges, an improved Bidirectional Long Short-Term Memory (BiLSTM) network-based approach for Internet abnormal traffic monitoring was proposed. In this method, a constrained minimum collection node coverage strategy was first applied to optimize the selection of collection nodes, ensuring comprehensive data coverage across network nodes while minimizing resource consumption. The collected traffic dataset was then transformed to enhance data validity. To enable more robust feature extraction, a combined Convolutional Neural Network (CNN) and BiLSTM model was employed, allowing for a comprehensive analysis of data characteristics. Additionally, an attention mechanism was incorporated to weigh the significance of attribute features, further enhancing classification accuracy. The final traffic monitoring results were produced through a softmax classifier, demonstrating that the proposed method yields a high monitoring accuracy with a low false positive rate of 0.2, an Area Under the Curve (AUC) of 0.95, and an average monitoring latency of 5.7 milliseconds (ms). These results indicate that the method provides an efficient and rapid response to Internet traffic anomalies, with a marked improvement in monitoring performance and resource efficiency.</description>
    <pubDate>11-24-2024</pubDate>
    <content:encoded>&lt;![CDATA[ The complexity and variability of Internet traffic data present significant challenges in feature extraction and selection, often resulting in ineffective abnormal traffic monitoring. To address these challenges, an improved Bidirectional Long Short-Term Memory (BiLSTM) network-based approach for Internet abnormal traffic monitoring was proposed. In this method, a constrained minimum collection node coverage strategy was first applied to optimize the selection of collection nodes, ensuring comprehensive data coverage across network nodes while minimizing resource consumption. The collected traffic dataset was then transformed to enhance data validity. To enable more robust feature extraction, a combined Convolutional Neural Network (CNN) and BiLSTM model was employed, allowing for a comprehensive analysis of data characteristics. Additionally, an attention mechanism was incorporated to weigh the significance of attribute features, further enhancing classification accuracy. The final traffic monitoring results were produced through a softmax classifier, demonstrating that the proposed method yields a high monitoring accuracy with a low false positive rate of 0.2, an Area Under the Curve (AUC) of 0.95, and an average monitoring latency of 5.7 milliseconds (ms). These results indicate that the method provides an efficient and rapid response to Internet traffic anomalies, with a marked improvement in monitoring performance and resource efficiency. ]]&gt;</content:encoded>
    <dc:title>Enhanced Method for Monitoring Internet Abnormal Traffic Based on the Improved BiLSTM Network Algorithm</dc:title>
    <dc:creator>li yan</dc:creator>
    <dc:creator>hongzhang han</dc:creator>
    <dc:creator>zhong li</dc:creator>
    <dc:identifier>doi: 10.56578/ida030401</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>11-24-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>11-24-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>211</prism:startingPage>
    <prism:doi>10.56578/ida030401</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_4/ida030401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030305">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 3, Pages undefined: K-Means Clustering Algorithm Based on Improved Differential Evolution</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_3/ida030305</link>
    <description>The traditional K-means clustering algorithm has unstable clustering results and low efficiency due to the random selection of initial cluster centres. To address the limitations, an improved K-means clustering algorithm based on adaptive guided differential evolution (AGDE-KM) was proposed. First, adaptive operators were designed to enhance global search capability in the early stages and accelerate convergence in later stages. Second, a multi-mutation strategy with a weighted coefficient was introduced to leverage the advantages of different mutation strategies during various evolutionary phases, balancing global and local search capabilities and expediting convergence. Third, a Gaussian perturbation crossover operation was proposed based on the best individual in the current population, providing individuals with superior evolution directions while preserving population diversity across dimensions, thereby avoiding the local optima of the algorithm. The optimal solution output at the end of the algorithm implementation was used as the initial cluster centres, replacing the cluster centres randomly selected by the traditional K-means clustering algorithm. The proposed algorithm was evaluated on public datasets from the UCI repository, including Vowel, Iris, and Glass, as well as a synthetic dataset (Jcdx). The sum of squared errors (SSE) was reduced by 5.65%, 19.59%, 13.31%, and 6.1%, respectively, compared to traditional K-means. Additionally, clustering time was decreased by 83.03%, 81.33%, 77.47%, and 92.63%, respectively. Experimental results demonstrate that the proposed improved algorithm significantly enhances convergence speed and optimisation capability, significantly improving the clustering effectiveness, efficiency, and stability.</description>
    <pubDate>09-29-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The traditional K-means clustering algorithm has unstable clustering results and low efficiency due to the random selection of initial cluster centres. To address the limitations, an improved K-means clustering algorithm based on adaptive guided differential evolution (AGDE-KM) was proposed. First, adaptive operators were designed to enhance global search capability in the early stages and accelerate convergence in later stages. Second, a multi-mutation strategy with a weighted coefficient was introduced to leverage the advantages of different mutation strategies during various evolutionary phases, balancing global and local search capabilities and expediting convergence. Third, a Gaussian perturbation crossover operation was proposed based on the best individual in the current population, providing individuals with superior evolution directions while preserving population diversity across dimensions, thereby avoiding the local optima of the algorithm. The optimal solution output at the end of the algorithm implementation was used as the initial cluster centres, replacing the cluster centres randomly selected by the traditional K-means clustering algorithm. The proposed algorithm was evaluated on public datasets from the UCI repository, including Vowel, Iris, and Glass, as well as a synthetic dataset (Jcdx). The sum of squared errors (SSE) was reduced by 5.65%, 19.59%, 13.31%, and 6.1%, respectively, compared to traditional K-means. Additionally, clustering time was decreased by 83.03%, 81.33%, 77.47%, and 92.63%, respectively. Experimental results demonstrate that the proposed improved algorithm significantly enhances convergence speed and optimisation capability, significantly improving the clustering effectiveness, efficiency, and stability.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>K-Means Clustering Algorithm Based on Improved Differential Evolution</dc:title>
    <dc:creator>lei an</dc:creator>
    <dc:creator>xiaohua sun</dc:creator>
    <dc:creator>yan wang</dc:creator>
    <dc:identifier>doi: 10.56578/ida030305</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-29-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-29-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>200</prism:startingPage>
    <prism:doi>10.56578/ida030305</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_3/ida030305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030304">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 3, Pages undefined: Detection of Fruit Ripeness and Defectiveness Using Convolutional Neural Networks</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_3/ida030304</link>
    <description>The classification of fruit ripeness and detection of defects are critical processes in the agricultural industry to minimize losses during commercialization. This study evaluated the performance of three Convolutional Neural Network (CNN) architectures—Extreme Inception Network (XceptionNet), Wide Residual Network (Wide ResNet), and Inception Version 4 (Inception V4)—in predicting the ripeness and quality of tomatoes. A dataset comprising 2,589 images of beef tomatoes was assembled from Golden Fingers Farms and Ranches Limited, Abuja, Nigeria. The samples were categorized into six classes representing five progressive ripening stages and a defect class, based on the United States Department of Agriculture (USDA) colour chart. To enhance the dataset's size and diversity, image augmentation through geometric transformations was employed, increasing the dataset to 3,000 images. Fivefold cross-validation was conducted to ensure a robust evaluation of the models' performance. The Wide ResNet model demonstrated superior performance, achieving an average accuracy of 97.87%, surpassing the 96.85% and 96.23% achieved by XceptionNet and Inception V4, respectively. These findings underscore the potential of Wide ResNet as an effective tool for accurately detecting ripeness levels and defects in tomatoes. The comparative analysis highlights the effectiveness of deep learning (DL) techniques in addressing challenges in agricultural automation and quality assessment. The proposed methodology offers a scalable solution for implementing automated ripeness and defect detection systems, with significant implications for reducing waste and improving supply chain efficiency.</description>
    <pubDate>09-22-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The classification of fruit ripeness and detection of defects are critical processes in the agricultural industry to minimize losses during commercialization. This study evaluated the performance of three Convolutional Neural Network (CNN) architectures—Extreme Inception Network (XceptionNet), Wide Residual Network (Wide ResNet), and Inception Version 4 (Inception V4)—in predicting the ripeness and quality of tomatoes. A dataset comprising 2,589 images of beef tomatoes was assembled from Golden Fingers Farms and Ranches Limited, Abuja, Nigeria. The samples were categorized into six classes representing five progressive ripening stages and a defect class, based on the United States Department of Agriculture (USDA) colour chart. To enhance the dataset's size and diversity, image augmentation through geometric transformations was employed, increasing the dataset to 3,000 images. Fivefold cross-validation was conducted to ensure a robust evaluation of the models' performance. The Wide ResNet model demonstrated superior performance, achieving an average accuracy of 97.87%, surpassing the 96.85% and 96.23% achieved by XceptionNet and Inception V4, respectively. These findings underscore the potential of Wide ResNet as an effective tool for accurately detecting ripeness levels and defects in tomatoes. The comparative analysis highlights the effectiveness of deep learning (DL) techniques in addressing challenges in agricultural automation and quality assessment. The proposed methodology offers a scalable solution for implementing automated ripeness and defect detection systems, with significant implications for reducing waste and improving supply chain efficiency.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Detection of Fruit Ripeness and Defectiveness Using Convolutional Neural Networks</dc:title>
    <dc:creator>joshua s. mommoh</dc:creator>
    <dc:creator>james l. obetta</dc:creator>
    <dc:creator>samuel n. john</dc:creator>
    <dc:creator>kennedy okokpujie</dc:creator>
    <dc:creator>osemwegie n. omoruyi</dc:creator>
    <dc:creator>ayokunle a. awelewa</dc:creator>
    <dc:identifier>doi: 10.56578/ida030304</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-22-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-22-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>184</prism:startingPage>
    <prism:doi>10.56578/ida030304</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_3/ida030304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030303">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 3, Pages undefined: Multi-Channel Scheduling for Short-Range Wireless Communication Networks Using a Q-Learning Feedback Mechanism</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_3/ida030303</link>
    <description>The traditional channel scheduling methods in short-range wireless communication networks are often constrained by fixed rules, resulting in inefficient channel resource utilization and unstable data communication. To address these limitations, a novel multi-channel scheduling approach, based on a Q-learning feedback mechanism, was proposed. The architecture of short-range wireless communication networks was analyzed, focusing on the core network system and wireless access network structures. The network channel nodes were optimized by deploying Dijkstra's algorithm in conjunction with an undirected graph representation of the communication nodes within the network. Multi-channel state characteristic parameters were computed, and a channel state prediction model was constructed to forecast the state of the network channels. The Q-learning feedback mechanism was employed to implement multi-channel scheduling, leveraging the algorithm’s reinforcement learning capabilities and framing the scheduling process as a Markov decision-making problem. Experimental results demonstrate that this method achieved a maximum average packet loss rate of 0.03 and a network throughput of up to 4.5 Mbps, indicating high channel resource utilization efficiency. Moreover, in low-traffic conditions, communication delay remained below 0.4 s, and in high-traffic scenarios, it varied between 0.26 and 0.4 s. These outcomes suggest that the proposed approach enables efficient and stable transmission of communication data, maintaining both low packet loss and high throughput.</description>
    <pubDate>09-11-2024</pubDate>
    <content:encoded>&lt;![CDATA[ The traditional channel scheduling methods in short-range wireless communication networks are often constrained by fixed rules, resulting in inefficient channel resource utilization and unstable data communication. To address these limitations, a novel multi-channel scheduling approach, based on a Q-learning feedback mechanism, was proposed. The architecture of short-range wireless communication networks was analyzed, focusing on the core network system and wireless access network structures. The network channel nodes were optimized by deploying Dijkstra's algorithm in conjunction with an undirected graph representation of the communication nodes within the network. Multi-channel state characteristic parameters were computed, and a channel state prediction model was constructed to forecast the state of the network channels. The Q-learning feedback mechanism was employed to implement multi-channel scheduling, leveraging the algorithm’s reinforcement learning capabilities and framing the scheduling process as a Markov decision-making problem. Experimental results demonstrate that this method achieved a maximum average packet loss rate of 0.03 and a network throughput of up to 4.5 Mbps, indicating high channel resource utilization efficiency. Moreover, in low-traffic conditions, communication delay remained below 0.4 s, and in high-traffic scenarios, it varied between 0.26 and 0.4 s. These outcomes suggest that the proposed approach enables efficient and stable transmission of communication data, maintaining both low packet loss and high throughput. ]]&gt;</content:encoded>
    <dc:title>Multi-Channel Scheduling for Short-Range Wireless Communication Networks Using a Q-Learning Feedback Mechanism</dc:title>
    <dc:creator>li yan</dc:creator>
    <dc:creator>hongzhang han</dc:creator>
    <dc:identifier>doi: 10.56578/ida030303</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-11-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-11-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>171</prism:startingPage>
    <prism:doi>10.56578/ida030303</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_3/ida030303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030302">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 3, Pages undefined: Enhanced Defect Detection in Insulator Iron Caps Using Improved YOLOv8n</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_3/ida030302</link>
    <description>To address the challenges in detecting surface defects on insulator iron caps, particularly due to the complex backgrounds that hinder accurate identification, an improved defect detection algorithm based on YOLOv8n, whose full name is You Only Look Once version 8 nano, was proposed. The C2f convolutional layers in both the backbone and neck networks were replaced by the C2f-Spatial and Channel Reconstruction Convolution (SCConv) convolutional network, which strengthens the model's capacity to extract detailed surface defect features. Additionally, a Convolutional Block Attention Module (CBAM) was incorporated after the Spatial Pyramid Pooling - Fast (SPPF) layer, enhancing the extraction of deep feature information. Furthermore, the original feature fusion method in YOLOv8n was replaced with a Bidirectional Feature Pyramid Network (BiFPN), significantly improving the detection accuracy. Extensive experiments conducted on a self-constructed dataset demonstrated the effectiveness of this approach, with improvements of 2.7% and 2.9% in mAP@0.5 and mAP@0.95, respectively. The results confirm that the proposed algorithm exhibits strong robustness and superior performance in detecting insulator iron cap defects under varied conditions.</description>
    <pubDate>09-04-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;To address the challenges in detecting surface defects on insulator iron caps, particularly due to the complex backgrounds that hinder accurate identification, an improved defect detection algorithm based on YOLOv8n, whose full name is You Only Look Once version 8 nano, was proposed. The C2f convolutional layers in both the backbone and neck networks were replaced by the C2f-Spatial and Channel Reconstruction Convolution (SCConv) convolutional network, which strengthens the model's capacity to extract detailed surface defect features. Additionally, a Convolutional Block Attention Module (CBAM) was incorporated after the Spatial Pyramid Pooling - Fast (SPPF) layer, enhancing the extraction of deep feature information. Furthermore, the original feature fusion method in YOLOv8n was replaced with a Bidirectional Feature Pyramid Network (BiFPN), significantly improving the detection accuracy. Extensive experiments conducted on a self-constructed dataset demonstrated the effectiveness of this approach, with improvements of 2.7% and 2.9% in mAP@0.5 and mAP@0.95, respectively. The results confirm that the proposed algorithm exhibits strong robustness and superior performance in detecting insulator iron cap defects under varied conditions.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Defect Detection in Insulator Iron Caps Using Improved YOLOv8n</dc:title>
    <dc:creator>qiming zhang</dc:creator>
    <dc:creator>ying liu</dc:creator>
    <dc:creator>song tang</dc:creator>
    <dc:creator>kui kang</dc:creator>
    <dc:identifier>doi: 10.56578/ida030302</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-04-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-04-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>162</prism:startingPage>
    <prism:doi>10.56578/ida030302</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_3/ida030302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_3/ida030301">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 3, Pages undefined: Optimizing Energy Storage and Hybrid Inverter Performance in Smart Grids Through Machine Learning</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_3/ida030301</link>
    <description>The effective integration of renewable energy sources (RES), such as solar and wind power, into smart grids is essential for advancing sustainable energy management. Hybrid inverters play a pivotal role in the conversion and distribution of this energy, but conventional approaches, including Static Resource Allocation (SRA) and Fixed Threshold Inverter Control (FTIC), frequently encounter inefficiencies, particularly in managing fluctuating renewable energy inputs and adapting to variable load demands. These inefficiencies lead to increased energy loss and a reduction in overall system performance. In response to these challenges, the Optimized Energy Storage and Hybrid Inverter Management Algorithm (OESHIMA) has been developed, employing machine learning for real-time data analysis and decision-making. By continuously monitoring energy production, storage capacity, and consumption patterns, OESHIMA dynamically optimizes energy allocation and inverter operations. Comparative analysis demonstrates that OESHIMA enhances energy efficiency by 0.25% and reduces energy loss by 0.20% when benchmarked against conventional methods. Furthermore, the algorithm extends the lifespan of energy storage systems by 0.15%, contributing to both sustainable and cost-efficient energy management within smart grids. These findings underscore the potential of OESHIMA in addressing the limitations of traditional energy management systems (EMSs) while improving hybrid inverter performance in the context of renewable energy integration.</description>
    <pubDate>08-24-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The effective integration of renewable energy sources (RES), such as solar and wind power, into smart grids is essential for advancing sustainable energy management. Hybrid inverters play a pivotal role in the conversion and distribution of this energy, but conventional approaches, including Static Resource Allocation (SRA) and Fixed Threshold Inverter Control (FTIC), frequently encounter inefficiencies, particularly in managing fluctuating renewable energy inputs and adapting to variable load demands. These inefficiencies lead to increased energy loss and a reduction in overall system performance. In response to these challenges, the Optimized Energy Storage and Hybrid Inverter Management Algorithm (OESHIMA) has been developed, employing machine learning for real-time data analysis and decision-making. By continuously monitoring energy production, storage capacity, and consumption patterns, OESHIMA dynamically optimizes energy allocation and inverter operations. Comparative analysis demonstrates that OESHIMA enhances energy efficiency by 0.25% and reduces energy loss by 0.20% when benchmarked against conventional methods. Furthermore, the algorithm extends the lifespan of energy storage systems by 0.15%, contributing to both sustainable and cost-efficient energy management within smart grids. These findings underscore the potential of OESHIMA in addressing the limitations of traditional energy management systems (EMSs) while improving hybrid inverter performance in the context of renewable energy integration.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Optimizing Energy Storage and Hybrid Inverter Performance in Smart Grids Through Machine Learning</dc:title>
    <dc:creator>kavitha hosakote shankara</dc:creator>
    <dc:creator>mallikarjunaswamy srikantaswamy</dc:creator>
    <dc:creator>sharmila nagaraju</dc:creator>
    <dc:identifier>doi: 10.56578/ida030301</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>08-24-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>08-24-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>146</prism:startingPage>
    <prism:doi>10.56578/ida030301</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_3/ida030301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030205">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 2, Pages undefined: DV-Hop Positioning Method Based on Multi-Strategy Improved Sparrow Search Algorithm</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_2/ida030205</link>
    <description>In order to address the problem of large positioning errors in non-ranging positioning algorithms for wireless sensor networks (WSN), this study proposes a Distance Vector-Hop (DV-Hop) positioning method based on the multi-strategy improved sparrow search algorithm (SSA). The method first introduces circle chaotic mapping, adaptive weighting factor, Gaussian variation and an inverse learning strategy to improve the iteration speed and optimization accuracy of the sparrow algorithm, and then uses the improved SSA to estimate the position of the unknown node. Experimental results show that, compared with the original method, the improved DV-Hop algorithm has significantly improved the positioning accuracy.</description>
    <pubDate>06-29-2024</pubDate>
    <content:encoded>&lt;![CDATA[ In order to address the problem of large positioning errors in non-ranging positioning algorithms for wireless sensor networks (WSN), this study proposes a Distance Vector-Hop (DV-Hop) positioning method based on the multi-strategy improved sparrow search algorithm (SSA). The method first introduces circle chaotic mapping, adaptive weighting factor, Gaussian variation and an inverse learning strategy to improve the iteration speed and optimization accuracy of the sparrow algorithm, and then uses the improved SSA to estimate the position of the unknown node. Experimental results show that, compared with the original method, the improved DV-Hop algorithm has significantly improved the positioning accuracy. ]]&gt;</content:encoded>
    <dc:title>DV-Hop Positioning Method Based on Multi-Strategy Improved Sparrow Search Algorithm</dc:title>
    <dc:creator>wenli lei</dc:creator>
    <dc:creator>jinping han</dc:creator>
    <dc:creator>jiawei bao</dc:creator>
    <dc:creator>kun jia</dc:creator>
    <dc:identifier>doi: 10.56578/ida030205</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-29-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-29-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>138</prism:startingPage>
    <prism:doi>10.56578/ida030205</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_2/ida030205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030204">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 2, Pages undefined: Optimizing Software Vulnerability Detection with MDSADNet: A Multi-Scale Convolutional Approach Enhanced by Mantis-Inspired Optimization</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_2/ida030204</link>
    <description>The persistent emergence of software vulnerabilities necessitates the development of effective detection methodologies. Machine learning (ML) and deep learning (DL) offer promising avenues for automating feature extraction; however, their efficacy in vulnerability detection remains insufficiently explored. This study introduces the Multi-Deep Software Automation Detection Network (MDSADNet) to enhance binary and multi-class software classification. Unlike traditional one-dimensional Convolutional Neural Networks (CNNs), MDSADNet employs a novel two-dimensional multi-scale convolutional process to capture both intra-data and inter-data $n$-gram features. Experimental evaluations conducted on binary and multi-class datasets demonstrate MDSADNet's superior performance in software automation classification. Furthermore, the Mantis Search Algorithm (MSA), inspired by the foraging and mating behaviors of mantises, was incorporated to optimize MDSADNet’s hyperparameters. This optimization process was structured into three distinct stages: sexual cannibalism, prey pursuit, and prey assault. The model's validation involved performance metrics such as F1-score, recall, accuracy, and precision. Comparative analyses with state-of-the-art DL and ML models highlight MDSADNet's enhanced classification capabilities. The results indicate that MDSADNet significantly outperforms existing models, achieving higher accuracy and robustness in detecting software vulnerabilities.</description>
    <pubDate>06-23-2024</pubDate>
    <content:encoded>&lt;![CDATA[ The persistent emergence of software vulnerabilities necessitates the development of effective detection methodologies. Machine learning (ML) and deep learning (DL) offer promising avenues for automating feature extraction; however, their efficacy in vulnerability detection remains insufficiently explored. This study introduces the Multi-Deep Software Automation Detection Network (MDSADNet) to enhance binary and multi-class software classification. Unlike traditional one-dimensional Convolutional Neural Networks (CNNs), MDSADNet employs a novel two-dimensional multi-scale convolutional process to capture both intra-data and inter-data $n$-gram features. Experimental evaluations conducted on binary and multi-class datasets demonstrate MDSADNet's superior performance in software automation classification. Furthermore, the Mantis Search Algorithm (MSA), inspired by the foraging and mating behaviors of mantises, was incorporated to optimize MDSADNet’s hyperparameters. This optimization process was structured into three distinct stages: sexual cannibalism, prey pursuit, and prey assault. The model's validation involved performance metrics such as F1-score, recall, accuracy, and precision. Comparative analyses with state-of-the-art DL and ML models highlight MDSADNet's enhanced classification capabilities. The results indicate that MDSADNet significantly outperforms existing models, achieving higher accuracy and robustness in detecting software vulnerabilities. ]]&gt;</content:encoded>
    <dc:title>Optimizing Software Vulnerability Detection with MDSADNet: A Multi-Scale Convolutional Approach Enhanced by Mantis-Inspired Optimization</dc:title>
    <dc:creator>srinivasa rao vemula</dc:creator>
    <dc:creator>maruthi vemula</dc:creator>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:identifier>doi: 10.56578/ida030204</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-23-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-23-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>125</prism:startingPage>
    <prism:doi>10.56578/ida030204</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_2/ida030204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030203">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 2, Pages undefined: Enhancing Pneumonia Diagnosis with Transfer Learning: A Deep Learning Approach</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_2/ida030203</link>
    <description>The significant impact of pneumonia on public health, particularly among vulnerable populations, underscores the critical need for early detection and treatment. This research leverages the National Institutes of Health (NIH) chest X-ray dataset, employing a comprehensive exploratory data analysis (EDA) to examine patient demographics, X-ray perspectives, and pixel-level evaluations. A pre-trained Visual Geometry Group (VGG) 16 model is integrated into the proposed architecture, emphasizing the synergy between robust machine learning techniques and EDA insights to enhance diagnostic accuracy. Rigorous data preparation methods are utilized to ensure dataset reliability, addressing missing data and sanitizing demographic information. The study not only provides valuable insights into pneumonia-related trends but also establishes a foundation for future advancements in medical diagnostics. Detailed results are presented, including disease distribution, model performance metrics, and clinical implications, highlighting the potential of machine learning models to support accurate and timely clinical decision-making. This integration of advanced technologies into traditional healthcare practices is expected to improve patient outcomes. Future directions include enhancing model sensitivity, incorporating diverse datasets, and collaborating with medical professionals to validate and implement the system in clinical settings. These efforts are anticipated to revolutionize pneumonia diagnosis and broader medical diagnostics. This work offers comprehensive code for developing and optimizing deep learning (DL) models for medical image classification, focusing on pneumonia detection in X-ray images. The code outlines the construction of the model using pre-trained architectures such as VGG16, detailing essential preparation steps including image augmentation and metadata parsing. Tools for data separation, generator creation, and callback training for monitoring are provided. Additionally, the code facilitates performance assessment through various metrics, including the receiver operating characteristic (ROC) curve and F1-score. By providing a systematic framework, this research aims to accelerate the development process for researchers in medical image processing and expedite the creation of accurate diagnostic tools.</description>
    <pubDate>06-16-2024</pubDate>
    <content:encoded>&lt;![CDATA[ The significant impact of pneumonia on public health, particularly among vulnerable populations, underscores the critical need for early detection and treatment. This research leverages the National Institutes of Health (NIH) chest X-ray dataset, employing a comprehensive exploratory data analysis (EDA) to examine patient demographics, X-ray perspectives, and pixel-level evaluations. A pre-trained Visual Geometry Group (VGG) 16 model is integrated into the proposed architecture, emphasizing the synergy between robust machine learning techniques and EDA insights to enhance diagnostic accuracy. Rigorous data preparation methods are utilized to ensure dataset reliability, addressing missing data and sanitizing demographic information. The study not only provides valuable insights into pneumonia-related trends but also establishes a foundation for future advancements in medical diagnostics. Detailed results are presented, including disease distribution, model performance metrics, and clinical implications, highlighting the potential of machine learning models to support accurate and timely clinical decision-making. This integration of advanced technologies into traditional healthcare practices is expected to improve patient outcomes. Future directions include enhancing model sensitivity, incorporating diverse datasets, and collaborating with medical professionals to validate and implement the system in clinical settings. These efforts are anticipated to revolutionize pneumonia diagnosis and broader medical diagnostics. This work offers comprehensive code for developing and optimizing deep learning (DL) models for medical image classification, focusing on pneumonia detection in X-ray images. The code outlines the construction of the model using pre-trained architectures such as VGG16, detailing essential preparation steps including image augmentation and metadata parsing. Tools for data separation, generator creation, and callback training for monitoring are provided. Additionally, the code facilitates performance assessment through various metrics, including the receiver operating characteristic (ROC) curve and F1-score. By providing a systematic framework, this research aims to accelerate the development process for researchers in medical image processing and expedite the creation of accurate diagnostic tools. ]]&gt;</content:encoded>
    <dc:title>Enhancing Pneumonia Diagnosis with Transfer Learning: A Deep Learning Approach</dc:title>
    <dc:creator>rashmi ashtagi</dc:creator>
    <dc:creator>nitin khanapurkar</dc:creator>
    <dc:creator>abhijeet r. patil</dc:creator>
    <dc:creator>vinaya sarmalkar</dc:creator>
    <dc:creator>balaji chaugule</dc:creator>
    <dc:creator>h. m. naveen</dc:creator>
    <dc:identifier>doi: 10.56578/ida030203</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-16-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-16-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>104</prism:startingPage>
    <prism:doi>10.56578/ida030203</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_2/ida030203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030202">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 2, Pages undefined: Advancements in Image Recognition: A Siamese Network Approach</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_2/ida030202</link>
    <description> In the realm of computer vision, image recognition serves as a pivotal task with extensive applications in intelligent security, autonomous driving, and robotics. Traditional methodologies for image recognition often grapple with computational inefficiencies and diminished accuracy in complex scenarios and extensive datasets. To address these challenges, an algorithm utilizing a siamese network architecture has been developed. This architecture leverages dual interconnected neural network submodules for the efficient extraction and comparison of image features. The effectiveness of this siamese network-based algorithm is demonstrated through its application to various benchmark datasets, where it consistently outperforms conventional approaches in terms of accuracy and processing speed. By employing weight-sharing techniques and optimizing neural network pathways, the proposed algorithm enhances the robustness and efficiency of image recognition tasks. The advancements presented in this study not only contribute to the theoretical understanding but also offer practical solutions, underscoring the significant potential and applicability of siamese networks in advancing image recognition technologies.</description>
    <pubDate>06-13-2024</pubDate>
    <content:encoded>&lt;![CDATA[  In the realm of computer vision, image recognition serves as a pivotal task with extensive applications in intelligent security, autonomous driving, and robotics. Traditional methodologies for image recognition often grapple with computational inefficiencies and diminished accuracy in complex scenarios and extensive datasets. To address these challenges, an algorithm utilizing a siamese network architecture has been developed. This architecture leverages dual interconnected neural network submodules for the efficient extraction and comparison of image features. The effectiveness of this siamese network-based algorithm is demonstrated through its application to various benchmark datasets, where it consistently outperforms conventional approaches in terms of accuracy and processing speed. By employing weight-sharing techniques and optimizing neural network pathways, the proposed algorithm enhances the robustness and efficiency of image recognition tasks. The advancements presented in this study not only contribute to the theoretical understanding but also offer practical solutions, underscoring the significant potential and applicability of siamese networks in advancing image recognition technologies. ]]&gt;</content:encoded>
    <dc:title>Advancements in Image Recognition: A Siamese Network Approach</dc:title>
    <dc:creator>jiaqi du</dc:creator>
    <dc:creator>wanshu fu</dc:creator>
    <dc:creator>yi zhang</dc:creator>
    <dc:creator>ziqi wang</dc:creator>
    <dc:identifier>doi: 10.56578/ida030202</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-13-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-13-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>89</prism:startingPage>
    <prism:doi>10.56578/ida030202</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_2/ida030202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_2/ida030201">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 2, Pages undefined: An Improved TextRank Keyword Extraction Method Based on the Watts-Strogatz Model</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_2/ida030201</link>
    <description>Traditional methods for keyword extraction predominantly rely on statistical relationships between words, neglecting the cohesive structure of the extracted keyword set. This study introduces an enhanced method for keyword extraction, utilizing the Watts-Strogatz model to construct a word network graph from candidate words within the text. By leveraging the characteristics of small-world networks (SWNs), i.e., short average path lengths and high clustering coefficients, the method ascertains the relevance between words and their impact on sentence cohesion. A comprehensive weight for each word is calculated through a linear weighting of features including part of speech, position, and Term Frequency-Inverse Document Frequency (TF-IDF), subsequently improving the impact factors of the TextRank algorithm for obtaining the final weight of candidate words. This approach facilitates the extraction of keywords based on the final weight outcomes. Through uncovering the deep hidden structures of feature words, the method effectively reveals the connectivity within the word network graph. Experiments demonstrate superiority over existing methods in terms of precision, recall, and F1-measure.</description>
    <pubDate>05-05-2024</pubDate>
    <content:encoded>&lt;![CDATA[ Traditional methods for keyword extraction predominantly rely on statistical relationships between words, neglecting the cohesive structure of the extracted keyword set. This study introduces an enhanced method for keyword extraction, utilizing the Watts-Strogatz model to construct a word network graph from candidate words within the text. By leveraging the characteristics of small-world networks (SWNs), i.e., short average path lengths and high clustering coefficients, the method ascertains the relevance between words and their impact on sentence cohesion. A comprehensive weight for each word is calculated through a linear weighting of features including part of speech, position, and Term Frequency-Inverse Document Frequency (TF-IDF), subsequently improving the impact factors of the TextRank algorithm for obtaining the final weight of candidate words. This approach facilitates the extraction of keywords based on the final weight outcomes. Through uncovering the deep hidden structures of feature words, the method effectively reveals the connectivity within the word network graph. Experiments demonstrate superiority over existing methods in terms of precision, recall, and F1-measure. ]]&gt;</content:encoded>
    <dc:title>An Improved TextRank Keyword Extraction Method Based on the Watts-Strogatz Model</dc:title>
    <dc:creator>aofan li</dc:creator>
    <dc:creator>lin zhang</dc:creator>
    <dc:creator>ashim khadka</dc:creator>
    <dc:identifier>doi: 10.56578/ida030201</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-05-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-05-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>77</prism:startingPage>
    <prism:doi>10.56578/ida030201</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_2/ida030201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030105">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 1, Pages undefined: Comparative Analysis of Machine Learning Algorithms for Daily Cryptocurrency Price Prediction</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_1/ida030105</link>
    <description>The decentralised nature of cryptocurrency, coupled with its potential for significant financial returns, has elevated its status as a sought-after investment opportunity on a global scale. Nonetheless, the inherent unpredictability and volatility of the cryptocurrency market present considerable challenges for investors aiming to forecast price movements and secure profitable investments. In response to this challenge, the current investigation was conducted to assess the efficacy of three Machine Learning (ML) algorithms, namely, Gradient Boosting (GB), Random Forest (RF), and Bagging, in predicting the daily closing prices of six major cryptocurrencies, namely, Binance, Bitcoin, Ethereum, Solana, USD, and XRP. The study utilised historical price data spanning from January 1, 2015 to January 26, 2024 for Bitcoin, from January 1, 2018 to January 26, 2024 for Ethereum and XRP, from January 1, 2021 to January 26, 2024 for Solana, and from January 1, 2019 to January 26, 2024 for USD. A novel approach was adopted wherein the lagging prices of the cryptocurrencies were employed as features for prediction, as opposed to the conventional method of using opening, high, and low prices, which are not predictive in nature. The data set was divided into a training set (80%) and a testing set (20%) for the evaluation of the algorithms. The performance of these ML algorithms was systematically compared using a suite of metrics, including R2, adjusted R2, Mean Square Error (MSE), Root Mean Square Error (RMSE), and Mean Absolute Error (MAE). The findings revealed that the GB algorithm exhibited superior performance in predicting the prices of Bitcoin and Solana, whereas the RF algorithm demonstrated greater efficacy for Ethereum, USD, and XRP. This comparative analysis underscores the relative advantages of RF over GB and Bagging algorithms in the context of cryptocurrency price prediction. The outcomes of this study not only contribute to the existing body of knowledge on the application of ML algorithms in financial markets but also provide actionable insights for investors navigating the volatile cryptocurrency market.</description>
    <pubDate>03-29-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The decentralised nature of cryptocurrency, coupled with its potential for significant financial returns, has elevated its status as a sought-after investment opportunity on a global scale. Nonetheless, the inherent unpredictability and volatility of the cryptocurrency market present considerable challenges for investors aiming to forecast price movements and secure profitable investments. In response to this challenge, the current investigation was conducted to assess the efficacy of three Machine Learning (ML) algorithms, namely, Gradient Boosting (GB), Random Forest (RF), and Bagging, in predicting the daily closing prices of six major cryptocurrencies, namely, Binance, Bitcoin, Ethereum, Solana, USD, and XRP. The study utilised historical price data spanning from January 1, 2015 to January 26, 2024 for Bitcoin, from January 1, 2018 to January 26, 2024 for Ethereum and XRP, from January 1, 2021 to January 26, 2024 for Solana, and from January 1, 2019 to January 26, 2024 for USD. A novel approach was adopted wherein the lagging prices of the cryptocurrencies were employed as features for prediction, as opposed to the conventional method of using opening, high, and low prices, which are not predictive in nature. The data set was divided into a training set (80%) and a testing set (20%) for the evaluation of the algorithms. The performance of these ML algorithms was systematically compared using a suite of metrics, including R2, adjusted R2, Mean Square Error (MSE), Root Mean Square Error (RMSE), and Mean Absolute Error (MAE). The findings revealed that the GB algorithm exhibited superior performance in predicting the prices of Bitcoin and Solana, whereas the RF algorithm demonstrated greater efficacy for Ethereum, USD, and XRP. This comparative analysis underscores the relative advantages of RF over GB and Bagging algorithms in the context of cryptocurrency price prediction. The outcomes of this study not only contribute to the existing body of knowledge on the application of ML algorithms in financial markets but also provide actionable insights for investors navigating the volatile cryptocurrency market.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Comparative Analysis of Machine Learning Algorithms for Daily Cryptocurrency Price Prediction</dc:title>
    <dc:creator>timothy kayode samson</dc:creator>
    <dc:identifier>doi: 10.56578/ida030105</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-29-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-29-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>64</prism:startingPage>
    <prism:doi>10.56578/ida030105</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_1/ida030105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030104">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 1, Pages undefined: Enhancing 5G LTE Communications: A Novel LDPC Decoder for Next-Generation Systems</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_1/ida030104</link>
    <description>The advent of fifth-generation (5G) long-term evolution (LTE) technology represents a critical leap forward in telecommunications, enabling unprecedented high-speed data transfer essential for today’s digital society. Despite the advantages, the transition introduces significant challenges, including elevated bit error rate (BER), diminished signal-to-noise ratio (SNR), and the risk of jitter, undermining network reliability and efficiency. In response, a novel low-density parity check (LDPC) decoder optimized for 5G LTE applications has been developed. This decoder is tailored to significantly reduce BER and improve SNR, thereby enhancing the performance and reliability of 5G communications networks. Its design accommodates advanced switching and parallel processing capabilities, crucial for handling complex data flows inherent in contemporary telecommunications systems. A distinctive feature of this decoder is its dynamic adaptability in adjusting message sizes and code rates, coupled with the augmentation of throughput via reconfigurable switching operations. These innovations allow for a versatile approach to optimizing 5G networks. Comparative analyses demonstrate the decoder’s superior performance relative to the quasi-cyclic low-density check code (QCLDC) method, evidencing marked improvements in communication quality and system efficiency. The introduction of this LDPC decoder thus marks a significant contribution to the evolution of 5G networks, offering a robust solution to the pressing challenges faced by next-generation communication systems and establishing a new standard for high-speed wireless connectivity.</description>
    <pubDate>03-21-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The advent of fifth-generation (5G) long-term evolution (LTE) technology represents a critical leap forward in telecommunications, enabling unprecedented high-speed data transfer essential for today’s digital society. Despite the advantages, the transition introduces significant challenges, including elevated bit error rate (BER), diminished signal-to-noise ratio (SNR), and the risk of jitter, undermining network reliability and efficiency. In response, a novel low-density parity check (LDPC) decoder optimized for 5G LTE applications has been developed. This decoder is tailored to significantly reduce BER and improve SNR, thereby enhancing the performance and reliability of 5G communications networks. Its design accommodates advanced switching and parallel processing capabilities, crucial for handling complex data flows inherent in contemporary telecommunications systems. A distinctive feature of this decoder is its dynamic adaptability in adjusting message sizes and code rates, coupled with the augmentation of throughput via reconfigurable switching operations. These innovations allow for a versatile approach to optimizing 5G networks. Comparative analyses demonstrate the decoder’s superior performance relative to the quasi-cyclic low-density check code (QCLDC) method, evidencing marked improvements in communication quality and system efficiency. The introduction of this LDPC decoder thus marks a significant contribution to the evolution of 5G networks, offering a robust solution to the pressing challenges faced by next-generation communication systems and establishing a new standard for high-speed wireless connectivity.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing 5G LTE Communications: A Novel LDPC Decoder for Next-Generation Systems</dc:title>
    <dc:creator>divyashree yamadur venkatesh</dc:creator>
    <dc:creator>komala mallikarjunaiah</dc:creator>
    <dc:creator>mallikarjunaswamy srikantaswamy</dc:creator>
    <dc:creator>ke huang</dc:creator>
    <dc:identifier>doi: 10.56578/ida030104</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-21-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-21-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>47</prism:startingPage>
    <prism:doi>10.56578/ida030104</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_1/ida030104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030103">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 1, Pages undefined: A Comparative Review of Internet of Things Model Workload Distribution Techniques in Fog Computing Networks</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_1/ida030103</link>
    <description>In the realm of fog computing (FC), a vast array of intelligent devices collaborates within an intricate network, a synergy that, while promising, has not been without its challenges. These challenges, including data loss, difficulties in workload distribution, a lack of parallel processing capabilities, and security vulnerabilities, have necessitated the exploration and deployment of a variety of solutions. Among these, software-defined networks (SDN), double-Q learning algorithms, service function chains (SFC), virtual network functions (VNF) stand out as significant. An exhaustive survey has been conducted to explore workload distribution methodologies within Internet of Things (IoT) architectures in FC networks. This investigation is anchored in a parameter-centric analysis, aiming to enhance the efficiency of data transmission across such networks. It delves into the architectural framework, pivotal pathways, and applications, aiming to identify bottlenecks and forge the most effective communication channels for IoT devices under substantial workload conditions. The findings of this research are anticipated to guide the selection of superior simulation tools, validate datasets, and refine strategies for data propagation. This, in turn, is expected to facilitate optimal power consumption and enhance outcomes in data transmission and propagation across multiple dimensions. The rigorous exploration detailed herein not only illuminates the complexities of workload distribution in FC networks but also charts a course towards more resilient and efficient IoT ecosystems.</description>
    <pubDate>03-17-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of fog computing (FC), a vast array of intelligent devices collaborates within an intricate network, a synergy that, while promising, has not been without its challenges. These challenges, including data loss, difficulties in workload distribution, a lack of parallel processing capabilities, and security vulnerabilities, have necessitated the exploration and deployment of a variety of solutions. Among these, software-defined networks (SDN), double-Q learning algorithms, service function chains (SFC), virtual network functions (VNF) stand out as significant. An exhaustive survey has been conducted to explore workload distribution methodologies within Internet of Things (IoT) architectures in FC networks. This investigation is anchored in a parameter-centric analysis, aiming to enhance the efficiency of data transmission across such networks. It delves into the architectural framework, pivotal pathways, and applications, aiming to identify bottlenecks and forge the most effective communication channels for IoT devices under substantial workload conditions. The findings of this research are anticipated to guide the selection of superior simulation tools, validate datasets, and refine strategies for data propagation. This, in turn, is expected to facilitate optimal power consumption and enhance outcomes in data transmission and propagation across multiple dimensions. The rigorous exploration detailed herein not only illuminates the complexities of workload distribution in FC networks but also charts a course towards more resilient and efficient IoT ecosystems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Comparative Review of Internet of Things Model Workload Distribution Techniques in Fog Computing Networks</dc:title>
    <dc:creator>nandini gowda puttaswamy</dc:creator>
    <dc:creator>anitha narasimha murthy</dc:creator>
    <dc:creator>houssem degha</dc:creator>
    <dc:identifier>doi: 10.56578/ida030103</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-17-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-17-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>21</prism:startingPage>
    <prism:doi>10.56578/ida030103</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_1/ida030103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030102">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 1, Pages undefined: Enhancing Image Captioning and Auto-Tagging Through a FCLN with Faster R-CNN Integration</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_1/ida030102</link>
    <description>In the realm of automated image captioning, which entails generating descriptive text for images, the fusion of Natural Language Processing (NLP) and computer vision techniques is paramount. This study introduces the Fully Convolutional Localization Network (FCLN), a novel approach that concurrently addresses localization and description tasks within a singular forward pass. It maintains spatial information and avoids detail loss, streamlining the training process with consistent optimization. The foundation of FCLN is laid by a Convolutional Neural Network (CNN), adept at extracting salient image features. Central to this architecture is a Localization Layer, pivotal in precise object detection and caption generation. The FCLN architecture amalgamates a region detection network, reminiscent of Faster Region-CNN (R-CNN), with a captioning network. This synergy enables the production of contextually meaningful image captions. The incorporation of the Faster R-CNN framework facilitates region-based object detection, offering precise contextual understanding and inter-object relationships. Concurrently, a Long Short-Term Memory (LSTM) network is employed for generating captions. This integration yields superior performance in caption accuracy, particularly in complex scenes. Evaluations conducted on the Microsoft Common Objects in Context (MS COCO) test server affirm the model's superiority over existing benchmarks, underscoring its efficacy in generating precise and context-rich image captions.</description>
    <pubDate>02-02-2024</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the realm of automated image captioning, which entails generating descriptive text for images, the fusion of Natural Language Processing (NLP) and computer vision techniques is paramount. This study introduces the Fully Convolutional Localization Network (FCLN), a novel approach that concurrently addresses localization and description tasks within a singular forward pass. It maintains spatial information and avoids detail loss, streamlining the training process with consistent optimization. The foundation of FCLN is laid by a Convolutional Neural Network (CNN), adept at extracting salient image features. Central to this architecture is a Localization Layer, pivotal in precise object detection and caption generation. The FCLN architecture amalgamates a region detection network, reminiscent of Faster Region-CNN (R-CNN), with a captioning network. This synergy enables the production of contextually meaningful image captions. The incorporation of the Faster R-CNN framework facilitates region-based object detection, offering precise contextual understanding and inter-object relationships. Concurrently, a Long Short-Term Memory (LSTM) network is employed for generating captions. This integration yields superior performance in caption accuracy, particularly in complex scenes. Evaluations conducted on the Microsoft Common Objects in Context (MS COCO) test server affirm the model's superiority over existing benchmarks, underscoring its efficacy in generating precise and context-rich image captions.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing Image Captioning and Auto-Tagging Through a FCLN with Faster R-CNN Integration</dc:title>
    <dc:creator>shalaka prasad deore</dc:creator>
    <dc:creator>taibah sohail bagwan</dc:creator>
    <dc:creator>prachiti sunil bhukan</dc:creator>
    <dc:creator>harsheen tejindersingh rajpal</dc:creator>
    <dc:creator>shantanu bharat gade</dc:creator>
    <dc:identifier>doi: 10.56578/ida030102</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>02-02-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>02-02-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>12</prism:startingPage>
    <prism:doi>10.56578/ida030102</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_1/ida030102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2024_3_1/ida030101">
    <title>Information Dynamics and Applications, 2024, Volume 3, Issue 1, Pages undefined: Optimizing Misinformation Control: A Cloud-Enhanced Machine Learning Approach</title>
    <link>https://www.acadlore.com/article/IDA/2024_3_1/ida030101</link>
    <description>The digital age has witnessed the rampant spread of misinformation, significantly impacting the medical and financial sectors. This phenomenon, fueled by various sources, contributes to public distress and information warfare, necessitating robust countermeasures. In response, a novel model has been developed, integrating cloud computing with advanced machine learning techniques. This model prioritizes the identification and mitigation of false information through optimized classification strategies. Utilizing diverse datasets for predictive analysis, the model employs state-of-the-art algorithms, including K-Nearest Neighbors (KNN) and Random Forest (RF), to enhance accuracy and efficiency. A distinctive feature of this approach is the implementation of cloud-empowered transfer learning, providing a scalable and optimized solution to address the challenges posed by the vast, yet often unreliable, information available online. By harnessing the potential of cloud computing and machine learning, this model offers a strategic approach to combating the prevalent issue of misinformation in the digital world.</description>
    <pubDate>01-24-2024</pubDate>
    <content:encoded>&lt;![CDATA[ The digital age has witnessed the rampant spread of misinformation, significantly impacting the medical and financial sectors. This phenomenon, fueled by various sources, contributes to public distress and information warfare, necessitating robust countermeasures. In response, a novel model has been developed, integrating cloud computing with advanced machine learning techniques. This model prioritizes the identification and mitigation of false information through optimized classification strategies. Utilizing diverse datasets for predictive analysis, the model employs state-of-the-art algorithms, including K-Nearest Neighbors (KNN) and Random Forest (RF), to enhance accuracy and efficiency. A distinctive feature of this approach is the implementation of cloud-empowered transfer learning, providing a scalable and optimized solution to address the challenges posed by the vast, yet often unreliable, information available online. By harnessing the potential of cloud computing and machine learning, this model offers a strategic approach to combating the prevalent issue of misinformation in the digital world. ]]&gt;</content:encoded>
    <dc:title>Optimizing Misinformation Control: A Cloud-Enhanced Machine Learning Approach</dc:title>
    <dc:creator>muhammad daniyal baig</dc:creator>
    <dc:creator>waseem akram</dc:creator>
    <dc:creator>hafiz burhan ul haq</dc:creator>
    <dc:creator>hassan zahoor rajput</dc:creator>
    <dc:creator>muhammad imran</dc:creator>
    <dc:identifier>doi: 10.56578/ida030101</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>01-24-2024</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>01-24-2024</prism:publicationDate>
    <prism:year>2024</prism:year>
    <prism:volume>3</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ida030101</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2024_3_1/ida030101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020405">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 4, Pages undefined: Critical Factors Influencing Cloud Security Posture of Enterprises: An Empirical Analysis</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_4/ida020405</link>
    <description>This study examines the aspects that can impact an organization's cloud security posture and the consequences for their cloud adoption strategies. Based on a thorough examination of existing literature, a conceptual framework is developed that includes several aspects such as organisational, technical, regulatory, operational, and human elements. The cloud security readiness is influenced by these five types of characteristics. A research instrument is utilised to evaluate the hypotheses pertaining to those aspects. The pilot survey showcases the research tool within the framework of a representative sample of organisations. In addition to conducting instrument testing, the initial responses also validate the importance of several elements that impact cloud security. The prominence of technical capabilities as a key factor underscores their vital contribution to bolstering cybersecurity readiness. Regulatory factors have a significant role in emphasising the necessity of compliance in cloud security. Organisational elements, such as managerial support, training, budget allocation, policy adherence, and governance, have a moderate impact. The presence of human elements also appears to contribute to and emphasise the necessity of promoting security awareness and alertness. This study enhances the existing body of knowledge on cloud security by offering insights into the various complex issues involved. The results can provide guidance to professionals seeking to enhance the cloud security of enterprises and scholars studying the changing cloud environment.</description>
    <pubDate>12-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study examines the aspects that can impact an organization's cloud security posture and the consequences for their cloud adoption strategies. Based on a thorough examination of existing literature, a conceptual framework is developed that includes several aspects such as organisational, technical, regulatory, operational, and human elements. The cloud security readiness is influenced by these five types of characteristics. A research instrument is utilised to evaluate the hypotheses pertaining to those aspects. The pilot survey showcases the research tool within the framework of a representative sample of organisations. In addition to conducting instrument testing, the initial responses also validate the importance of several elements that impact cloud security. The prominence of technical capabilities as a key factor underscores their vital contribution to bolstering cybersecurity readiness. Regulatory factors have a significant role in emphasising the necessity of compliance in cloud security. Organisational elements, such as managerial support, training, budget allocation, policy adherence, and governance, have a moderate impact. The presence of human elements also appears to contribute to and emphasise the necessity of promoting security awareness and alertness. This study enhances the existing body of knowledge on cloud security by offering insights into the various complex issues involved. The results can provide guidance to professionals seeking to enhance the cloud security of enterprises and scholars studying the changing cloud environment.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Critical Factors Influencing Cloud Security Posture of Enterprises: An Empirical Analysis</dc:title>
    <dc:creator>vidura jayasinghe</dc:creator>
    <dc:creator>emre erturk</dc:creator>
    <dc:creator>zhe li</dc:creator>
    <dc:identifier>doi: 10.56578/ida020405</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>210</prism:startingPage>
    <prism:doi>10.56578/ida020405</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_4/ida020405</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020404">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 4, Pages undefined: Advancements in Cow Health Monitoring: A Systematic Literature Review of IoT Applications</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_4/ida020404</link>
    <description>The landscape of livestock farming is undergoing a significant transformation, primarily influenced by the integration of the Internet of Things (IoT) technology. This systematic literature review (SLR) critically examines the role of IoT in enhancing cow health monitoring, a burgeoning field of research drawing considerable attention in recent years. Spanning articles published from 2017 to 2023 in eminent academic forums, this study meticulously selected and analyzed thirty publications. These were chosen through a structured process, evaluating each for its relevance based on title and abstract. The review encapsulates a thorough investigation of the applications, sensors, and devices underpinning IoT-based cow health monitoring systems. It is observed that the current research landscape is dynamically evolving, marked by emerging trends and noticeable gaps in technology and application. This synthesis of existing literature offers an insightful overview of the potential and limitations inherent in current IoT solutions, highlighting their efficacy in real-world scenarios. Furthermore, this review delineates the challenges faced and posits future research directions to address unresolved issues in cow health monitoring. The primary objective of this systematic analysis is to consolidate research findings, thereby advancing the understanding of IoT's impact in this field. It also aims to foster a comprehensive dialogue on the technological advancements and their implications for future research endeavors in livestock farming.</description>
    <pubDate>12-18-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The landscape of livestock farming is undergoing a significant transformation, primarily influenced by the integration of the Internet of Things (IoT) technology. This systematic literature review (SLR) critically examines the role of IoT in enhancing cow health monitoring, a burgeoning field of research drawing considerable attention in recent years. Spanning articles published from 2017 to 2023 in eminent academic forums, this study meticulously selected and analyzed thirty publications. These were chosen through a structured process, evaluating each for its relevance based on title and abstract. The review encapsulates a thorough investigation of the applications, sensors, and devices underpinning IoT-based cow health monitoring systems. It is observed that the current research landscape is dynamically evolving, marked by emerging trends and noticeable gaps in technology and application. This synthesis of existing literature offers an insightful overview of the potential and limitations inherent in current IoT solutions, highlighting their efficacy in real-world scenarios. Furthermore, this review delineates the challenges faced and posits future research directions to address unresolved issues in cow health monitoring. The primary objective of this systematic analysis is to consolidate research findings, thereby advancing the understanding of IoT's impact in this field. It also aims to foster a comprehensive dialogue on the technological advancements and their implications for future research endeavors in livestock farming.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Advancements in Cow Health Monitoring: A Systematic Literature Review of IoT Applications</dc:title>
    <dc:creator>muhammad hassaan</dc:creator>
    <dc:identifier>doi: 10.56578/ida020404</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-18-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-18-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>199</prism:startingPage>
    <prism:doi>10.56578/ida020404</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_4/ida020404</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020403">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 4, Pages undefined: Enhanced Detection of COVID-19 in Chest X-ray Images: A Comparative Analysis of CNNs and the DL+ Ensemble Technique</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_4/ida020403</link>
    <description>The swift global spread of Corona Virus Disease 2019 (COVID-19), identified merely four months prior, necessitates rapid and precise diagnostic methods. Currently, the diagnosis largely depends on computed tomography (CT) image interpretation by medical professionals, a process susceptible to human error. This research delves into the utility of Convolutional Neural Networks (CNNs) in automating the classification of COVID-19 from medical images. An exhaustive evaluation and comparison of prominent CNN architectures, namely Visual Geometry Group (VGG), Residual Network (ResNet), MobileNet, Inception, and Xception, are conducted. Furthermore, the study investigates ensemble approaches to harness the combined strengths of these models. Findings demonstrate the distinct advantage of ensemble models, with the novel deep learning (DL)+ ensemble technique notably surpassing the accuracy, precision, recall, and F-score of individual CNNs, achieving an exceptional rate of 99.5%. This remarkable performance accentuates the transformative potential of CNNs in COVID-19 diagnostics. The significance of this advancement lies not only in its reliability and automated nature, surpassing traditional, subjective human interpretation but also in its contribution to accelerating the diagnostic process. This acceleration is pivotal for the effective implementation of containment and mitigation strategies against the pandemic. The abstract delineates the methodological choices, highlights the unparalleled efficacy of the DL+ ensemble technique, and underscores the far-reaching implications of employing CNNs for COVID-19 detection.</description>
    <pubDate>11-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The swift global spread of Corona Virus Disease 2019 (COVID-19), identified merely four months prior, necessitates rapid and precise diagnostic methods. Currently, the diagnosis largely depends on computed tomography (CT) image interpretation by medical professionals, a process susceptible to human error. This research delves into the utility of Convolutional Neural Networks (CNNs) in automating the classification of COVID-19 from medical images. An exhaustive evaluation and comparison of prominent CNN architectures, namely Visual Geometry Group (VGG), Residual Network (ResNet), MobileNet, Inception, and Xception, are conducted. Furthermore, the study investigates ensemble approaches to harness the combined strengths of these models. Findings demonstrate the distinct advantage of ensemble models, with the novel deep learning (DL)+ ensemble technique notably surpassing the accuracy, precision, recall, and F-score of individual CNNs, achieving an exceptional rate of 99.5%. This remarkable performance accentuates the transformative potential of CNNs in COVID-19 diagnostics. The significance of this advancement lies not only in its reliability and automated nature, surpassing traditional, subjective human interpretation but also in its contribution to accelerating the diagnostic process. This acceleration is pivotal for the effective implementation of containment and mitigation strategies against the pandemic. The abstract delineates the methodological choices, highlights the unparalleled efficacy of the DL+ ensemble technique, and underscores the far-reaching implications of employing CNNs for COVID-19 detection.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Detection of COVID-19 in Chest X-ray Images: A Comparative Analysis of CNNs and the DL+ Ensemble Technique</dc:title>
    <dc:creator>bwanali haji ntaibu jereni</dc:creator>
    <dc:creator>iota sundire</dc:creator>
    <dc:identifier>doi: 10.56578/ida020403</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>11-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>11-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>186</prism:startingPage>
    <prism:doi>10.56578/ida020403</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_4/ida020403</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020402">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 4, Pages undefined: Enhancing Healthcare Data Security in IoT Environments Using Blockchain and DCGRU with Twofish Encryption</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_4/ida020402</link>
    <description>In the rapidly evolving landscape of digital healthcare, the integration of cloud computing, Internet of Things (IoT), and advanced computational methodologies such as machine learning and artificial intelligence (AI) has significantly enhanced early disease detection, accessibility, and diagnostic scope. However, this progression has concurrently elevated concerns regarding the safeguarding of sensitive patient data. Addressing this challenge, a novel secure healthcare system employing a blockchain-based IoT framework, augmented by deep learning and biomimetic algorithms, is presented. The initial phase encompasses a blockchain-facilitated mechanism for secure data storage, authentication of users, and prognostication of health status. Subsequently, the modified Jellyfish Search Optimization (JSO) algorithm is employed for optimal feature selection from datasets. A unique health status prediction model is introduced, leveraging a Deep Convolutional Gated Recurrent Unit (DCGRU) approach. This model ingeniously combines Convolutional Neural Network (CNN) and Gated Recurrent Unit (GRU) processes, where the GRU network extracts pivotal directional characteristics, and the CNN architecture discerns complex interrelationships within the data. Security of the data management system is fortified through the implementation of the twofish encryption algorithm. The efficacy of the proposed model is rigorously evaluated using standard medical datasets, including Diabetes and EEG Eyestate, employing diverse performance metrics. Experimental results demonstrate the model's superiority over existing best practices, achieving a notable accuracy of 0.884. Furthermore, comparative analyses with the Advanced Encryption Standard (AES) and Elliptic Curve Cryptography (ECC) models reveal enhanced performance metrics, with the proposed model achieving a processing time and throughput of 40 and 45.42, respectively.</description>
    <pubDate>11-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In the rapidly evolving landscape of digital healthcare, the integration of cloud computing, Internet of Things (IoT), and advanced computational methodologies such as machine learning and artificial intelligence (AI) has significantly enhanced early disease detection, accessibility, and diagnostic scope. However, this progression has concurrently elevated concerns regarding the safeguarding of sensitive patient data. Addressing this challenge, a novel secure healthcare system employing a blockchain-based IoT framework, augmented by deep learning and biomimetic algorithms, is presented. The initial phase encompasses a blockchain-facilitated mechanism for secure data storage, authentication of users, and prognostication of health status. Subsequently, the modified Jellyfish Search Optimization (JSO) algorithm is employed for optimal feature selection from datasets. A unique health status prediction model is introduced, leveraging a Deep Convolutional Gated Recurrent Unit (DCGRU) approach. This model ingeniously combines Convolutional Neural Network (CNN) and Gated Recurrent Unit (GRU) processes, where the GRU network extracts pivotal directional characteristics, and the CNN architecture discerns complex interrelationships within the data. Security of the data management system is fortified through the implementation of the twofish encryption algorithm. The efficacy of the proposed model is rigorously evaluated using standard medical datasets, including Diabetes and EEG Eyestate, employing diverse performance metrics. Experimental results demonstrate the model's superiority over existing best practices, achieving a notable accuracy of 0.884. Furthermore, comparative analyses with the Advanced Encryption Standard (AES) and Elliptic Curve Cryptography (ECC) models reveal enhanced performance metrics, with the proposed model achieving a processing time and throughput of 40 and 45.42, respectively.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhancing Healthcare Data Security in IoT Environments Using Blockchain and DCGRU with Twofish Encryption</dc:title>
    <dc:creator>kumar raja depa ramachandraiah</dc:creator>
    <dc:creator>naga jagadesh bommagani</dc:creator>
    <dc:creator>praveen kumar jayapal</dc:creator>
    <dc:identifier>doi: 10.56578/ida020402</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>11-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>11-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>173</prism:startingPage>
    <prism:doi>10.56578/ida020402</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_4/ida020402</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_4/ida020401">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 4, Pages undefined: Comparative Analysis of Seizure Manifestations in Alzheimer’s and Glioma Patients via Magnetic Resonance Imaging</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_4/ida020401</link>
    <description>A notable association between Alzheimer's Disease and Epilepsy, two divergent neurological conditions, has been established through previous research, illustrating an elevated seizure development risk in individuals diagnosed with Alzheimer’s Disease (AD). The hippocampus, fundamental in both seizure and tumour pathology, is intricately investigated herein. The subsequent aberrant electrical activity within this brain region, frequently implicated in seizure onset and propagation, underpins a complex relationship between degenerative cerebral changes and seizure incidence. Symptomatic manifestations in hippocampal glioma include, but are not limited to, seizures, memory deficits, and language difficulties, contingent upon the tumour's location and size. Thus, the cruciality of proficient seizure detection and analysis is underscored. Employing canny edge detection and thresholding to delineate contours and boundaries within images, an analysis was conducted by transmuting grayscale or colour images into a binary format. The input dataset, utilised for the training and testing of machine and deep-learning models, comprised images of seizures. These models were subsequently trained to discern patterns and features within the images, facilitating the differentiation between two predefined classes. Resultantly, the models predicted, with a defined accuracy level, the presence or absence of a seizure within a new image. The Support Vector Machine (SVM) and Convolutional Neural Network (CNN) models demonstrated classification accuracies of 96% and 95%, respectively. By analysing performance metrics on a per-slice basis, the localization of seizure activity within the brain could be visualised, offering valuable insights into regions affected by this activity. The amalgamation of edge detection, feature extraction, and classification models proficiently discriminated between seizure and non-seizure activities, providing pivotal insights for the diagnosis and therapeutic strategies for epilepsy. Further, studying these neurological alterations can illuminate the progression and severity of cognitive and emotional deficits within affected individuals, whilst advancements in diagnostic methodologies, such as Magnetic Resonance Imaging (MRI), facilitate an enriched comparative analysis.</description>
    <pubDate>10-24-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;A notable association between Alzheimer's Disease and Epilepsy, two divergent neurological conditions, has been established through previous research, illustrating an elevated seizure development risk in individuals diagnosed with Alzheimer’s Disease (AD). The hippocampus, fundamental in both seizure and tumour pathology, is intricately investigated herein. The subsequent aberrant electrical activity within this brain region, frequently implicated in seizure onset and propagation, underpins a complex relationship between degenerative cerebral changes and seizure incidence. Symptomatic manifestations in hippocampal glioma include, but are not limited to, seizures, memory deficits, and language difficulties, contingent upon the tumour's location and size. Thus, the cruciality of proficient seizure detection and analysis is underscored. Employing canny edge detection and thresholding to delineate contours and boundaries within images, an analysis was conducted by transmuting grayscale or colour images into a binary format. The input dataset, utilised for the training and testing of machine and deep-learning models, comprised images of seizures. These models were subsequently trained to discern patterns and features within the images, facilitating the differentiation between two predefined classes. Resultantly, the models predicted, with a defined accuracy level, the presence or absence of a seizure within a new image. The Support Vector Machine (SVM) and Convolutional Neural Network (CNN) models demonstrated classification accuracies of 96% and 95%, respectively. By analysing performance metrics on a per-slice basis, the localization of seizure activity within the brain could be visualised, offering valuable insights into regions affected by this activity. The amalgamation of edge detection, feature extraction, and classification models proficiently discriminated between seizure and non-seizure activities, providing pivotal insights for the diagnosis and therapeutic strategies for epilepsy. Further, studying these neurological alterations can illuminate the progression and severity of cognitive and emotional deficits within affected individuals, whilst advancements in diagnostic methodologies, such as Magnetic Resonance Imaging (MRI), facilitate an enriched comparative analysis.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Comparative Analysis of Seizure Manifestations in Alzheimer’s and Glioma Patients via Magnetic Resonance Imaging</dc:title>
    <dc:creator>jayanthi vajiram</dc:creator>
    <dc:creator>sivakumar shanmugasundaram</dc:creator>
    <dc:creator>rajeswaran rangasami</dc:creator>
    <dc:creator>utkarsh maurya</dc:creator>
    <dc:identifier>doi: 10.56578/ida020401</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>10-24-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>10-24-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>4</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>162</prism:startingPage>
    <prism:doi>10.56578/ida020401</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_4/ida020401</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020305">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 3, Pages undefined: Classification of Cyclin Proteins Using Amino Acid Composition and an SVM Approach: An In-Depth Analysis</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_3/ida020305</link>
    <description>Cyclins, commonly referred to as co-enzymes, are a pivotal family of proteins that modulate cellular growth by activating cell-cycle mediators, proving essential for the cell cycle. Due to the marked dissimilarity in their sequences, effective differentiation among cyclins remains a challenging endeavour. In this study, an innovative methodology was proposed, wherein the amino acid composition was utilized to inform an SVM-based classification approach. SVMs, being supervised machine learning algorithms, are typically employed for classification and regression tasks. From the data analyzed, eighteen (18) feature labels were extracted, culminating in an extensive set of thirteen thousand one hundred and fifty-one (13,151) discernible features. Employing the jackknife cross-validation technique revealed that this SVM-informed approach facilitated the identification of cyclins with an accuracy rate of 91.9%, a notable improvement from prior studies. Such advancements underscore the potential for more accurate and efficient differentiation of cyclins in future endeavours.</description>
    <pubDate>09-27-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Cyclins, commonly referred to as co-enzymes, are a pivotal family of proteins that modulate cellular growth by activating cell-cycle mediators, proving essential for the cell cycle. Due to the marked dissimilarity in their sequences, effective differentiation among cyclins remains a challenging endeavour. In this study, an innovative methodology was proposed, wherein the amino acid composition was utilized to inform an SVM-based classification approach. SVMs, being supervised machine learning algorithms, are typically employed for classification and regression tasks. From the data analyzed, eighteen (18) feature labels were extracted, culminating in an extensive set of thirteen thousand one hundred and fifty-one (13,151) discernible features. Employing the jackknife cross-validation technique revealed that this SVM-informed approach facilitated the identification of cyclins with an accuracy rate of 91.9%, a notable improvement from prior studies. Such advancements underscore the potential for more accurate and efficient differentiation of cyclins in future endeavours.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Classification of Cyclin Proteins Using Amino Acid Composition and an SVM Approach: An In-Depth Analysis</dc:title>
    <dc:creator>muhammad hassaan</dc:creator>
    <dc:identifier>doi: 10.56578/ida020305</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-27-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-27-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>153</prism:startingPage>
    <prism:doi>10.56578/ida020305</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_3/ida020305</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020304">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 3, Pages undefined: MR Image Feature Analysis for Alzheimer’s Disease Detection Using Machine Learning Approaches</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_3/ida020304</link>
    <description>Alzheimer’s disease (AD), a progressive neurological disorder, predominantly impacts cognitive functions, manifesting as memory loss and deteriorating thinking abilities. Recognized as the primary form of dementia, this affliction subtly commences within brain cells and gradually aggravates over time. In 2023, dementia's financial burden for elderly adults aged 65 and older was projected to reach \$345 billion, encompassing health care, long-term care, and hospice services. Alarmingly, Alzheimer's disease claims one in three seniors, outnumbering combined fatalities from breast and prostate cancer. Currently, the diagnostic landscape for Alzheimer's lacks definitive tests, and diagnoses based purely on biological definitions have been observed to possess low predictive accuracy. In the presented study, a diagnostic methodology has been proposed using machine learning models that harness image features derived from brain MRI scans. Specifically, nine salient image features, grounded in color, texture, shape, and orientation, were extracted for the study. Four classifiers — Naïve-Bayes, Logistic regression, XGBoost, and AdaBoost — were employed, as the challenge presented a binary classification scenario. A grid search parameter optimization technique was employed to fine-tune model configurations, ensuring optimal predictive outcomes. Conducted experiments utilizing the Kaggle dataset, and for each model, parameters were rigorously optimized. The XGBoost classifier demonstrated superior performance, achieving a test accuracy of 92%, while Naïve Bayes, Logistic Regression, and AdaBoost registered accuracies of 63%, 70%, and 72%, respectively. Relative to contemporary methods, the proposed diagnostic approach exhibits commendable accuracy in predicting AD. If AI-based predictive diagnostics for AD are realized using the strategies delineated in this study, significant benefits may be anticipated for healthcare practitioners.</description>
    <pubDate>09-26-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Alzheimer’s disease (AD), a progressive neurological disorder, predominantly impacts cognitive functions, manifesting as memory loss and deteriorating thinking abilities. Recognized as the primary form of dementia, this affliction subtly commences within brain cells and gradually aggravates over time. In 2023, dementia's financial burden for elderly adults aged 65 and older was projected to reach \$345 billion, encompassing health care, long-term care, and hospice services. Alarmingly, Alzheimer's disease claims one in three seniors, outnumbering combined fatalities from breast and prostate cancer. Currently, the diagnostic landscape for Alzheimer's lacks definitive tests, and diagnoses based purely on biological definitions have been observed to possess low predictive accuracy. In the presented study, a diagnostic methodology has been proposed using machine learning models that harness image features derived from brain MRI scans. Specifically, nine salient image features, grounded in color, texture, shape, and orientation, were extracted for the study. Four classifiers — Naïve-Bayes, Logistic regression, XGBoost, and AdaBoost — were employed, as the challenge presented a binary classification scenario. A grid search parameter optimization technique was employed to fine-tune model configurations, ensuring optimal predictive outcomes. Conducted experiments utilizing the Kaggle dataset, and for each model, parameters were rigorously optimized. The XGBoost classifier demonstrated superior performance, achieving a test accuracy of 92%, while Naïve Bayes, Logistic Regression, and AdaBoost registered accuracies of 63%, 70%, and 72%, respectively. Relative to contemporary methods, the proposed diagnostic approach exhibits commendable accuracy in predicting AD. If AI-based predictive diagnostics for AD are realized using the strategies delineated in this study, significant benefits may be anticipated for healthcare practitioners.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>MR Image Feature Analysis for Alzheimer’s Disease Detection Using Machine Learning Approaches</dc:title>
    <dc:creator>d. s. a. aashiqur reza</dc:creator>
    <dc:creator>sadia afrin</dc:creator>
    <dc:creator>md. ahsan ullah</dc:creator>
    <dc:creator>sourav kumar kha</dc:creator>
    <dc:creator>sadia chowdhury toma</dc:creator>
    <dc:creator>raju roy</dc:creator>
    <dc:creator>lasker ershad ali</dc:creator>
    <dc:identifier>doi: 10.56578/ida020304</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-26-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-26-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>143</prism:startingPage>
    <prism:doi>10.56578/ida020304</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_3/ida020304</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020303">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 3, Pages undefined: Enhanced Channel Estimation in Multiple-Input Multiple-Output Systems: A Dual Quadratic Decomposition Algorithm Approach for Interference Cancellation</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_3/ida020303</link>
    <description>In Multiple-Input Multiple-Output (MIMO) systems, a considerable number of antennas are deployed at each base station, utilizing Time-shifted pilot contamination strategies. It was observed that Time-shifted pilot contamination can mitigate the adverse effects of pilot contamination, subsequently reducing Inter-group interference. However, constraints are introduced in the channel estimation process when pilots are time-shifted. To address the challenge of increasing mean square channel estimation errors in finite antenna massive MIMO systems, a novel approach using a Dual Quadratic Decomposition Algorithm for Interference Cancellation (DQDA-IC) is introduced. Through this methodology, data interference gets effectively canceled when base stations collaborate. Furthermore, compressive sensing techniques are employed, resulting in enhanced channel compensation and reduced pilot contamination in massive MIMO systems. Comparative experimental analysis, conducted using the MATLAB tool, pitted this method against two conventional techniques: Integer Linear Programming (ILP) and Q-Learning based Interference Control (QLIC). Results indicated that the DQDA-IC model surpassed its counterparts by achieving a 63% improvement in Signal to Noise Ratio (SNR), a 56% reduction in Bit Error Rate (BER), and a 92% enhancement in spectral efficiency, all within a 40 ms computational timeframe.</description>
    <pubDate>09-20-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;In Multiple-Input Multiple-Output (MIMO) systems, a considerable number of antennas are deployed at each base station, utilizing Time-shifted pilot contamination strategies. It was observed that Time-shifted pilot contamination can mitigate the adverse effects of pilot contamination, subsequently reducing Inter-group interference. However, constraints are introduced in the channel estimation process when pilots are time-shifted. To address the challenge of increasing mean square channel estimation errors in finite antenna massive MIMO systems, a novel approach using a Dual Quadratic Decomposition Algorithm for Interference Cancellation (DQDA-IC) is introduced. Through this methodology, data interference gets effectively canceled when base stations collaborate. Furthermore, compressive sensing techniques are employed, resulting in enhanced channel compensation and reduced pilot contamination in massive MIMO systems. Comparative experimental analysis, conducted using the MATLAB tool, pitted this method against two conventional techniques: Integer Linear Programming (ILP) and Q-Learning based Interference Control (QLIC). Results indicated that the DQDA-IC model surpassed its counterparts by achieving a 63% improvement in Signal to Noise Ratio (SNR), a 56% reduction in Bit Error Rate (BER), and a 92% enhancement in spectral efficiency, all within a 40 ms computational timeframe.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Enhanced Channel Estimation in Multiple-Input Multiple-Output Systems: A Dual Quadratic Decomposition Algorithm Approach for Interference Cancellation</dc:title>
    <dc:creator>sakkaravarthi ramanathan</dc:creator>
    <dc:creator>tirupathaiah kanaparthi</dc:creator>
    <dc:creator>ravi sekhar yarrabothu</dc:creator>
    <dc:creator>ramesh sundar</dc:creator>
    <dc:identifier>doi: 10.56578/ida020303</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-20-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-20-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>135</prism:startingPage>
    <prism:doi>10.56578/ida020303</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_3/ida020303</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020302">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 3, Pages undefined: Cryptocurrency Investigations in Digital Forensics: Contemporary Challenges and Methodological Advances</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_3/ida020302</link>
    <description>Digital forensics, a crucial subset of cybersecurity, encompasses sophisticated tools and methodologies for the interpretation, analysis, and investigation of digital evidence, facilitating the identification and mitigation of cybercrimes and security breaches. With the advent of cryptocurrencies, an array of unique challenges has emerged in the domain of digital forensic investigations. This review elucidates the prevailing state of digital forensic practices vis-à-vis cryptocurrencies, emphasizing the obstacles and limitations inherent in probing decentralized and intricate technologies. Notable deficiencies in extant investigative practices were observed. Solutions proffered encompass the formulation of novel software applications tailored for cryptocurrency analyses, the integration of machine learning and artificial intelligence capabilities, and the employment of advanced analytics to discern patterns and irregularities within blockchain transactions. Furthermore, a pioneering methodology, merging traditional digital forensic strategies with blockchain-specific techniques, is posited for efficacious cryptocurrency inquiries. The analysis underscores the imperative for a renewed paradigm in digital forensic examinations to surmount the challenges integral to cryptocurrency probes. By forging novel methodologies and standardizing investigative procedures, support for legal enforcement endeavors can be enhanced, facilitating the efficacious detection and prosecution of cryptocurrency-associated misdemeanors.</description>
    <pubDate>09-07-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Digital forensics, a crucial subset of cybersecurity, encompasses sophisticated tools and methodologies for the interpretation, analysis, and investigation of digital evidence, facilitating the identification and mitigation of cybercrimes and security breaches. With the advent of cryptocurrencies, an array of unique challenges has emerged in the domain of digital forensic investigations. This review elucidates the prevailing state of digital forensic practices vis-à-vis cryptocurrencies, emphasizing the obstacles and limitations inherent in probing decentralized and intricate technologies. Notable deficiencies in extant investigative practices were observed. Solutions proffered encompass the formulation of novel software applications tailored for cryptocurrency analyses, the integration of machine learning and artificial intelligence capabilities, and the employment of advanced analytics to discern patterns and irregularities within blockchain transactions. Furthermore, a pioneering methodology, merging traditional digital forensic strategies with blockchain-specific techniques, is posited for efficacious cryptocurrency inquiries. The analysis underscores the imperative for a renewed paradigm in digital forensic examinations to surmount the challenges integral to cryptocurrency probes. By forging novel methodologies and standardizing investigative procedures, support for legal enforcement endeavors can be enhanced, facilitating the efficacious detection and prosecution of cryptocurrency-associated misdemeanors.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Cryptocurrency Investigations in Digital Forensics: Contemporary Challenges and Methodological Advances</dc:title>
    <dc:creator>syed atir raza</dc:creator>
    <dc:creator>mehwish shaikh</dc:creator>
    <dc:creator>khadija tahira</dc:creator>
    <dc:identifier>doi: 10.56578/ida020302</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-07-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-07-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>126</prism:startingPage>
    <prism:doi>10.56578/ida020302</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_3/ida020302</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_3/ida020301">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 3, Pages undefined: An Optimized Algorithm for Peak to Average Power Ratio Reduction in Orthogonal Frequency Division Multiplexing Communication Systems: An Integrated Approach</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_3/ida020301</link>
    <description>The impact of the peak to Average Power Ratio (PAPR) on the efficiency of an Orthogonal Frequency Division Multiplexing (OFDM) communication system is significantly mitigated through an innovative Reconfigurable Integrated Algorithm (RIA). In this study, the RIA combines the advantages of Partial Transmit Sequence (PTS) and Companding Transformation (CT) techniques, enhancing the overall efficiency while reducing the signal distortion inherent in linear transformation methods. A unique reconfiguration process enables integration of PTS and CT to minimize PAPR. This process considers key parameters including multi-channel inputs and delay attenuation factors. Comparison of the RIA with conventional methods such as PTS, CT, selective mapping (SLM), and Tone Reservation (TR) reveals superior performance, as evidenced by the Complementary Cumulative Distribution Function (CCDFs) curve. Implementations of the algorithm using MATLAB R2022a demonstrate significant improvements in PAPR performance, showing gains of 0.55dB and 0.656dB compared to the PTS and CT methods respectively. Moreover, the novel RIA methodology exhibits enhanced transmission rates and lower Bit Error Rates (BER) relative to conventional methods. In conclusion, the proposed RIA offers a promising approach for optimizing OFDM system performance through efficient PAPR reduction. Its implementation can drive the advancement of telecommunications technologies and further understanding of OFDM communication systems.</description>
    <pubDate>09-05-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The impact of the peak to Average Power Ratio (PAPR) on the efficiency of an Orthogonal Frequency Division Multiplexing (OFDM) communication system is significantly mitigated through an innovative Reconfigurable Integrated Algorithm (RIA). In this study, the RIA combines the advantages of Partial Transmit Sequence (PTS) and Companding Transformation (CT) techniques, enhancing the overall efficiency while reducing the signal distortion inherent in linear transformation methods. A unique reconfiguration process enables integration of PTS and CT to minimize PAPR. This process considers key parameters including multi-channel inputs and delay attenuation factors. Comparison of the RIA with conventional methods such as PTS, CT, selective mapping (SLM), and Tone Reservation (TR) reveals superior performance, as evidenced by the Complementary Cumulative Distribution Function (CCDFs) curve. Implementations of the algorithm using MATLAB R2022a demonstrate significant improvements in PAPR performance, showing gains of 0.55dB and 0.656dB compared to the PTS and CT methods respectively. Moreover, the novel RIA methodology exhibits enhanced transmission rates and lower Bit Error Rates (BER) relative to conventional methods. In conclusion, the proposed RIA offers a promising approach for optimizing OFDM system performance through efficient PAPR reduction. Its implementation can drive the advancement of telecommunications technologies and further understanding of OFDM communication systems.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>An Optimized Algorithm for Peak to Average Power Ratio Reduction in Orthogonal Frequency Division Multiplexing Communication Systems: An Integrated Approach</dc:title>
    <dc:creator>rathod shivaji</dc:creator>
    <dc:creator>nataraj kanathur ramaswamy</dc:creator>
    <dc:creator>mallikarjunaswamy srikantaswamy</dc:creator>
    <dc:creator>rekha kanathur ramaswamy</dc:creator>
    <dc:identifier>doi: 10.56578/ida020301</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>09-05-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>09-05-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>3</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>115</prism:startingPage>
    <prism:doi>10.56578/ida020301</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_3/ida020301</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020205">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 2, Pages undefined: Examining Public Perceptions of UK Rail Strikes: A Text Analytics Approach Using Twitter Data</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_2/ida020205</link>
    <description>Social media, particularly Twitter, has emerged as a vital platform for understanding public opinion on contemporary issues. This study investigates public attitudes towards UK rail strikes by analyzing Twitter data and provides a framework to assist policymakers in the RMT Union and the government in managing social media information. A dataset comprising tweets related to rail strikes from 25 June 2022 to 7 October 2022 was collected and multidimensional scaling and sentiment analysis techniques were employed to examine public opinions and sentiments. The analysis revealed that the predominant trends in tweets were dissatisfaction and negativity, with users expressing inconvenience caused by the rail strikes. Interestingly, the public also questioned the government's capabilities, with some suggesting that rail strikes were politically motivated events orchestrated by the government. Sentiment analysis results indicated that approximately 85% of tweets displayed negative sentiment towards the rail strikes. This research contributes to the understanding of public attitudes derived from tweet mining and offers valuable insights for academics and policymakers in interpreting public reactions to current events. Based on the findings, recommendations for the RMT Union are proposed through the lenses of stakeholder orientation theory and signaling theory. For instance, fostering public engagement can help reduce information asymmetry between the RMT Union and the public, enabling the union to better comprehend public sentiment towards rail strikes. The approach amalgamates these two theories, presenting a novel theoretical perspective for such investigations and extending their applicability, while also providing clear and in-depth recommendations for the RMT Union.</description>
    <pubDate>06-25-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Social media, particularly Twitter, has emerged as a vital platform for understanding public opinion on contemporary issues. This study investigates public attitudes towards UK rail strikes by analyzing Twitter data and provides a framework to assist policymakers in the RMT Union and the government in managing social media information. A dataset comprising tweets related to rail strikes from 25 June 2022 to 7 October 2022 was collected and multidimensional scaling and sentiment analysis techniques were employed to examine public opinions and sentiments. The analysis revealed that the predominant trends in tweets were dissatisfaction and negativity, with users expressing inconvenience caused by the rail strikes. Interestingly, the public also questioned the government's capabilities, with some suggesting that rail strikes were politically motivated events orchestrated by the government. Sentiment analysis results indicated that approximately 85% of tweets displayed negative sentiment towards the rail strikes. This research contributes to the understanding of public attitudes derived from tweet mining and offers valuable insights for academics and policymakers in interpreting public reactions to current events. Based on the findings, recommendations for the RMT Union are proposed through the lenses of stakeholder orientation theory and signaling theory. For instance, fostering public engagement can help reduce information asymmetry between the RMT Union and the public, enabling the union to better comprehend public sentiment towards rail strikes. The approach amalgamates these two theories, presenting a novel theoretical perspective for such investigations and extending their applicability, while also providing clear and in-depth recommendations for the RMT Union.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Examining Public Perceptions of UK Rail Strikes: A Text Analytics Approach Using Twitter Data</dc:title>
    <dc:creator>kyra dong</dc:creator>
    <dc:creator>ying kei tse</dc:creator>
    <dc:identifier>doi: 10.56578/ida020205</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-25-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-25-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>101</prism:startingPage>
    <prism:doi>10.56578/ida020205</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_2/ida020205</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020204">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 2, Pages undefined: An IoT-Based Multimodal Real-Time Home Control System for the Physically Challenged: Design and Implementation</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_2/ida020204</link>
    <description>Physical impairments affect a significant proportion of the global populace, emphasizing the need for assistive technologies to increase the ability of these individuals to perform daily activities autonomously. This study discusses the development and implementation of a multimodal home control system, designed to afford physically challenged individuals greater control over their home environments. This system utilizes the Internet of Things (IoT) for its functionality. The system is primarily based on the utilization of the Amazon Alexa Echo Dot, which facilitates speech-based control, and a sequential clap recognition system, both made possible through an internet connection. These methods are further supplemented by an additional manual switching option, thereby ensuring a diverse range of control methods. The processing core of this system consists of an Arduino Uno and an ESP32 Devkit module. In conjunction with these, a sound detector is employed to discern and process a variety of clap patterns, which is set to function at a predefined threshold. The Amazon Alexa Echo Dot serves as the primary interface for voice commands and real-time information retrieval. Furthermore, an Android smartphone, equipped with the Alexa application, provides alternate interfaces for appliance control, through both soft buttons and voice commands. Based on an analysis of this system, it is suggested that it is not only viable but also effective. Key attributes of the system include rapid response times, aesthetic appeal, secure operation, low energy consumption, and most importantly, increased accessibility for physically disabled individuals.</description>
    <pubDate>06-15-2023</pubDate>
    <content:encoded>&lt;![CDATA[ Physical impairments affect a significant proportion of the global populace, emphasizing the need for assistive technologies to increase the ability of these individuals to perform daily activities autonomously. This study discusses the development and implementation of a multimodal home control system, designed to afford physically challenged individuals greater control over their home environments. This system utilizes the Internet of Things (IoT) for its functionality. The system is primarily based on the utilization of the Amazon Alexa Echo Dot, which facilitates speech-based control, and a sequential clap recognition system, both made possible through an internet connection. These methods are further supplemented by an additional manual switching option, thereby ensuring a diverse range of control methods. The processing core of this system consists of an Arduino Uno and an ESP32 Devkit module. In conjunction with these, a sound detector is employed to discern and process a variety of clap patterns, which is set to function at a predefined threshold. The Amazon Alexa Echo Dot serves as the primary interface for voice commands and real-time information retrieval. Furthermore, an Android smartphone, equipped with the Alexa application, provides alternate interfaces for appliance control, through both soft buttons and voice commands. Based on an analysis of this system, it is suggested that it is not only viable but also effective. Key attributes of the system include rapid response times, aesthetic appeal, secure operation, low energy consumption, and most importantly, increased accessibility for physically disabled individuals. ]]&gt;</content:encoded>
    <dc:title>An IoT-Based Multimodal Real-Time Home Control System for the Physically Challenged: Design and Implementation</dc:title>
    <dc:creator>kennedy okokpujie</dc:creator>
    <dc:creator>david jacinth</dc:creator>
    <dc:creator>gabriel ameh james</dc:creator>
    <dc:creator>imhade p. okokpujie</dc:creator>
    <dc:creator>akingunsoye adenugba vincent</dc:creator>
    <dc:identifier>doi: 10.56578/ida020204</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>06-15-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>06-15-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>90</prism:startingPage>
    <prism:doi>10.56578/ida020204</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_2/ida020204</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020203">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 2, Pages undefined: A Cervical Lesion Recognition Method Based on ShuffleNetV2-CA</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_2/ida020203</link>
    <description>Cervical cancer is the second most common cancer among women globally. Colposcopy plays a vital role in assessing cervical intraepithelial neoplasia (CIN) and screening for cervical cancer. However, existing colposcopy methods mainly rely on physician experience, leading to misdiagnosis and limited medical resources. This study proposes a cervical lesion recognition method based on ShuffleNetV2-CA. A dataset of 6,996 cervical images was created from Hebei University Affiliated Hospital, including normal, cervical cancer, low-grade squamous intraepithelial lesions (LSIL, CIN 1), high-grade squamous intraepithelial lesions (HSIL, CIN 2/CIN 3), and cervical tumor data. Images were preprocessed using data augmentation, and the dataset was divided into training and validation sets at a 9:1 ratio during the training phase. This study introduces a coordinate attention mechanism (CA) to the original ShuffleNetV2 model, enabling the model to focus on larger areas during the image feature extraction process. Experimental results show that compared to other classic networks, the ShuffleNetV2-CA network achieves higher recognition accuracy with smaller model parameters and computation, making it suitable for resource-limited embedded devices such as mobile terminals and offering high clinical applicability.</description>
    <pubDate>05-24-2023</pubDate>
    <content:encoded>&lt;![CDATA[ Cervical cancer is the second most common cancer among women globally. Colposcopy plays a vital role in assessing cervical intraepithelial neoplasia (CIN) and screening for cervical cancer. However, existing colposcopy methods mainly rely on physician experience, leading to misdiagnosis and limited medical resources. This study proposes a cervical lesion recognition method based on ShuffleNetV2-CA. A dataset of 6,996 cervical images was created from Hebei University Affiliated Hospital, including normal, cervical cancer, low-grade squamous intraepithelial lesions (LSIL, CIN 1), high-grade squamous intraepithelial lesions (HSIL, CIN 2/CIN 3), and cervical tumor data. Images were preprocessed using data augmentation, and the dataset was divided into training and validation sets at a 9:1 ratio during the training phase. This study introduces a coordinate attention mechanism (CA) to the original ShuffleNetV2 model, enabling the model to focus on larger areas during the image feature extraction process. Experimental results show that compared to other classic networks, the ShuffleNetV2-CA network achieves higher recognition accuracy with smaller model parameters and computation, making it suitable for resource-limited embedded devices such as mobile terminals and offering high clinical applicability. ]]&gt;</content:encoded>
    <dc:title>A Cervical Lesion Recognition Method Based on ShuffleNetV2-CA</dc:title>
    <dc:creator>chunhui liu</dc:creator>
    <dc:creator>jiahui yang</dc:creator>
    <dc:creator>ying liu</dc:creator>
    <dc:creator>ying zhang</dc:creator>
    <dc:creator>shuang liu</dc:creator>
    <dc:creator>tetiana chaikovska</dc:creator>
    <dc:creator>chan liu</dc:creator>
    <dc:identifier>doi: 10.56578/ida020203</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-24-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-24-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>77</prism:startingPage>
    <prism:doi>10.56578/ida020203</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_2/ida020203</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020202">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 2, Pages undefined: Enhancing Data Storage and Access in CSN Labs with Raspberry Pi 3B+ and Open Media Vault NAS</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_2/ida020202</link>
    <description>The purpose of this study was to devise a more efficient system for data storage and exchange in the Computer System and Network (CSN) Laboratory at Ibn Khaldun Bogor University. Open Media Vault (OMV) software and Raspberry Pi 3B+ were employed to establish a Network Attached Storage (NAS) system. The performance and file transfer speeds of the Raspberry Pi were evaluated in the context of this implementation. The implementation of the NAS system was intended to offer students of the CSN laboratory swifter and more efficient access to data, thereby reducing dependence on USB media. The findings of this study could hold substantial implications for enhancing the efficiency and effectiveness of data storage and exchange in educational environments.</description>
    <pubDate>05-23-2023</pubDate>
    <content:encoded>&lt;![CDATA[ The purpose of this study was to devise a more efficient system for data storage and exchange in the Computer System and Network (CSN) Laboratory at Ibn Khaldun Bogor University. Open Media Vault (OMV) software and Raspberry Pi 3B+ were employed to establish a Network Attached Storage (NAS) system. The performance and file transfer speeds of the Raspberry Pi were evaluated in the context of this implementation. The implementation of the NAS system was intended to offer students of the CSN laboratory swifter and more efficient access to data, thereby reducing dependence on USB media. The findings of this study could hold substantial implications for enhancing the efficiency and effectiveness of data storage and exchange in educational environments. ]]&gt;</content:encoded>
    <dc:title>Enhancing Data Storage and Access in CSN Labs with Raspberry Pi 3B+ and Open Media Vault NAS</dc:title>
    <dc:creator>ritzkal ritzkal</dc:creator>
    <dc:creator>kodarsyah kodarsyah</dc:creator>
    <dc:creator>asep ramdan sopyan nudin</dc:creator>
    <dc:creator>ibnu hanafi setiadi</dc:creator>
    <dc:creator>freza riana</dc:creator>
    <dc:creator>berlina wulandari</dc:creator>
    <dc:identifier>doi: 10.56578/ida020202</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-23-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-23-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>63</prism:startingPage>
    <prism:doi>10.56578/ida020202</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_2/ida020202</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_2/ida020201">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 2, Pages undefined: An Enhanced QoS-Aware Multipath Routing Protocol for Real-Time IoT Applications in MANETs</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_2/ida020201</link>
    <description>Recent transmission of large volumes of data through mobile ad hoc networks (MANETs) has resulted in degraded Quality of Service (QoS) due to factors such as packet loss, delay, and packet drop in multipath routing. To address this issue, a traffic-aware Enhanced QoS-Aware Multipath Routing Protocol (EQMRP) has been proposed for real-time IoT applications in MANETs. EQMRP efficiently switches between multiple paths and monitors traffic conditions to maintain an optimal data transmission rate. The proposed method considers different delay sensitivity levels and link expiration time (LTE) to maintain QoS in each path. Through IoT application data analysis, EQMRP maintains QoS in each path more efficiently than conventional methods. The proposed method has been simulated and validated using MATLAB, and the performance analysis shows that EQMRP achieves a higher packet delivery ratio, lower delay, and reduced packet drop compared to conventional methods. In conclusion, the traffic-aware EQMRP protocol offers a significant improvement in QoS for real-time IoT applications in MANETs.</description>
    <pubDate>05-16-2023</pubDate>
    <content:encoded>&lt;![CDATA[ Recent transmission of large volumes of data through mobile ad hoc networks (MANETs) has resulted in degraded Quality of Service (QoS) due to factors such as packet loss, delay, and packet drop in multipath routing. To address this issue, a traffic-aware Enhanced QoS-Aware Multipath Routing Protocol (EQMRP) has been proposed for real-time IoT applications in MANETs. EQMRP efficiently switches between multiple paths and monitors traffic conditions to maintain an optimal data transmission rate. The proposed method considers different delay sensitivity levels and link expiration time (LTE) to maintain QoS in each path. Through IoT application data analysis, EQMRP maintains QoS in each path more efficiently than conventional methods. The proposed method has been simulated and validated using MATLAB, and the performance analysis shows that EQMRP achieves a higher packet delivery ratio, lower delay, and reduced packet drop compared to conventional methods. In conclusion, the traffic-aware EQMRP protocol offers a significant improvement in QoS for real-time IoT applications in MANETs. ]]&gt;</content:encoded>
    <dc:title>An Enhanced QoS-Aware Multipath Routing Protocol for Real-Time IoT Applications in MANETs</dc:title>
    <dc:creator>venkata reddy pathapalli srinivasappa</dc:creator>
    <dc:creator>nandini prasad kanakapura shivaprasad</dc:creator>
    <dc:creator>puttamadappa chaluvegowda</dc:creator>
    <dc:identifier>doi: 10.56578/ida020201</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>05-16-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>05-16-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>2</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>51</prism:startingPage>
    <prism:doi>10.56578/ida020201</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_2/ida020201</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020105">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 1, Pages undefined: A Deep Convolutional Neural Network Framework for Enhancing Brain Tumor Diagnosis on MRI Scans</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_1/ida020105</link>
    <description>Brain tumors are a critical public health concern, often resulting in limited life expectancy for patients. Accurate diagnosis of brain tumors is crucial to develop effective treatment strategies and improve patients' quality of life. Computer-aided diagnosis (CAD) systems that accurately classify tumor images have been challenging to develop. Deep convolutional neural network (DCNN) models have shown significant potential for tumor detection, and outperform traditional deep neural network models. In this study, a novel framework based on two pre-trained deep convolutional architectures (VGG16 and EfficientNetB0) is proposed for classifying different types of brain tumors, including meningioma, glioma, and pituitary tumors. Features are extracted from MR images using each architecture and merged before feeding them into machine learning algorithms for tumor classification. The proposed approach achieves a training accuracy of 98% and a test accuracy of 99% on the brain-tumor-classification-mri dataset available on Kaggle and btc_navoneel. The model shows promise to improve the accuracy and generalizability of medical image classification for better clinical decision support, ultimately leading to improved patient outcomes.</description>
    <pubDate>03-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Brain tumors are a critical public health concern, often resulting in limited life expectancy for patients. Accurate diagnosis of brain tumors is crucial to develop effective treatment strategies and improve patients' quality of life. Computer-aided diagnosis (CAD) systems that accurately classify tumor images have been challenging to develop. Deep convolutional neural network (DCNN) models have shown significant potential for tumor detection, and outperform traditional deep neural network models. In this study, a novel framework based on two pre-trained deep convolutional architectures (VGG16 and EfficientNetB0) is proposed for classifying different types of brain tumors, including meningioma, glioma, and pituitary tumors. Features are extracted from MR images using each architecture and merged before feeding them into machine learning algorithms for tumor classification. The proposed approach achieves a training accuracy of 98% and a test accuracy of 99% on the brain-tumor-classification-mri dataset available on Kaggle and btc_navoneel. The model shows promise to improve the accuracy and generalizability of medical image classification for better clinical decision support, ultimately leading to improved patient outcomes.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Deep Convolutional Neural Network Framework for Enhancing Brain Tumor Diagnosis on MRI Scans</dc:title>
    <dc:creator>jyostna devi bodapati</dc:creator>
    <dc:creator>shaik feroz ahmed</dc:creator>
    <dc:creator>yarra yashwant chowdary</dc:creator>
    <dc:creator>konda raja sekhar</dc:creator>
    <dc:identifier>doi: 10.56578/ida020105</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>42</prism:startingPage>
    <prism:doi>10.56578/ida020105</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_1/ida020105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020104">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 1, Pages undefined: Routing Attack Detection Using Ensemble Deep Learning Model for IIoT</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_1/ida020104</link>
    <description>Smart cities, ITS, supply chains, and smart industries may all be developed with minimal human interaction thanks to the increasing prevalence of automation enabled by machine-type communication (MTC). Yet, MTC has substantial security difficulties because of diverse data, public network access, and an insufficient security mechanism. In this study, we develop a novel IIOT attack detection basis by joining the following four main steps: (a) data collection, (b) pre-processing, (c) attack detection, and (d) optimisation for high classification accuracy. At the initial stage of processing, known as "pre-processing," the collected raw data (input) is normalised. Attack detection requires the creation of an intelligent security architecture for IIoT networks. In this work, we present a learning model that can recognise previously unrecognised attacks on an IIoT network without the use of a labelled training set. An IoT network intrusion detection system-generated labelled dataset. The study also introduces a hybrid optimisation algorithm for pinpointing the optimal LSTM weight when it comes to intrusion detection. When trained on the labelled dataset provided by the proposed method, the improved LSTM outperforms the other models with a finding accuracy of 95%, as exposed in the research.</description>
    <pubDate>03-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Smart cities, ITS, supply chains, and smart industries may all be developed with minimal human interaction thanks to the increasing prevalence of automation enabled by machine-type communication (MTC). Yet, MTC has substantial security difficulties because of diverse data, public network access, and an insufficient security mechanism. In this study, we develop a novel IIOT attack detection basis by joining the following four main steps: (a) data collection, (b) pre-processing, (c) attack detection, and (d) optimisation for high classification accuracy. At the initial stage of processing, known as "pre-processing," the collected raw data (input) is normalised. Attack detection requires the creation of an intelligent security architecture for IIoT networks. In this work, we present a learning model that can recognise previously unrecognised attacks on an IIoT network without the use of a labelled training set. An IoT network intrusion detection system-generated labelled dataset. The study also introduces a hybrid optimisation algorithm for pinpointing the optimal LSTM weight when it comes to intrusion detection. When trained on the labelled dataset provided by the proposed method, the improved LSTM outperforms the other models with a finding accuracy of 95%, as exposed in the research.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Routing Attack Detection Using Ensemble Deep Learning Model for IIoT</dc:title>
    <dc:creator>ramesh vatambeti</dc:creator>
    <dc:creator>gowtham mamidisetti</dc:creator>
    <dc:identifier>doi: 10.56578/ida020104</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>31</prism:startingPage>
    <prism:doi>10.56578/ida020104</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_1/ida020104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020103">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 1, Pages undefined: The Need to Improve DNS Security Architecture: An Adaptive Security Approach</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_1/ida020103</link>
    <description>The Domain Name System (DNS) is an essential component of the internet infrastructure. Due to its importance, securing DNS becomes a necessity for current and future networks. Various DNS security architecture have been developed in order to provide security services; such as DNS over HTTPS (DoH), DNS over TLS (DoT), and DNS over QUIC (DoQ). Unfortunately, these security architectures, especially DoT, are limited and are open to a number of performance issues. In this paper, we evaluate the present state of DNS security architecture, and we would see clearly that existing DNS security architectures are insufficient to secure DNS data transiting over the network; considering the growing cybersecurity landscape. On this note, we propose the need and adoption of a security architecture named Adaptive Security Architecture. Adaptive Security Architecture is devised to guard against identified threats, and anticipate unidentified threats in a manner similar to the immune-response system of human. Basically, mimicking nature’s biodiversity as the fundamental means of effective attack responses. Finally, we conclude by an analysis to prove the need to improve DNS security architecture.</description>
    <pubDate>03-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The Do&lt;span&gt;main Name System (DNS) is an essential component of the internet infrastructure. Due to its importance, securing DNS becomes a necessity for current and future networks. Various DNS security architecture have been developed in order to provide security services; such as DNS over HTTPS (DoH), DNS over TLS (DoT), and DNS over QUIC (DoQ). Unfortunately, these security architectures, especially DoT, are limited and are open to a number of performance issues. In this paper, we evaluate the present state of DNS security architecture, and we would see clearly that existing DNS security architectures are insufficient to secure DNS data transiting over the network; considering the growing cybersecurity landscape. On this note, we propose the need and adoption of a security architecture named Adaptive Security Architecture. Adaptive Security Architecture is devised to guard against identified threats, and anticipate unidentified threats in a manner similar to the immune-response system of human. Basically, mimicking nature’s biodiversity as the fundamental means of effective attack responses. Finally, we conclude by an analysis to prove the need to improve DNS security architecture&lt;/span&gt;.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>The Need to Improve DNS Security Architecture: An Adaptive Security Approach</dc:title>
    <dc:creator>daniel o. alao</dc:creator>
    <dc:creator>folasade y. ayankoya</dc:creator>
    <dc:creator>oluwabukola f. ajayi</dc:creator>
    <dc:creator>onome b. ohwo</dc:creator>
    <dc:identifier>doi: 10.56578/ida020103</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>19</prism:startingPage>
    <prism:doi>10.56578/ida020103</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_1/ida020103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020102">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 1, Pages undefined: An Enhanced Convolutional Neural Network for Accurate Classification of Grape Leaf Diseases</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_1/ida020102</link>
    <description>Grape leaf diseases can significantly reduce grape yield and quality, making accurate and efficient identification of these diseases crucial for improving grape production. This study proposes a novel classification method for grape leaf disease images using an improved convolutional neural network. The Xception network serves as the base model, with the original ReLU activation function replaced by Mish to improve classification accuracy. An improved channel attention mechanism is integrated into the network, enabling it to automatically learn important information from each channel, and the fully connected layer is redesigned for optimal classification performance. Experimental results demonstrate that the proposed model (MS-Xception) achieves high accuracy with fewer parameters, achieving a recognition accuracy of 98.61% for grape leaf disease images. Compared to other state-of-the-art models such as ResNet50 and Swim-Transformer, the proposed model shows superior classification performance, providing an efficient method for intelligent diagnosis of grape leaf diseases. The proposed method significantly improves the accuracy and efficiency of grape leaf disease diagnosis and has potential for practical application in the field of grape production.</description>
    <pubDate>03-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ Grape leaf diseases can significantly reduce grape yield and quality, making accurate and efficient identification of these diseases crucial for improving grape production. This study proposes a novel classification method for grape leaf disease images using an improved convolutional neural network. The Xception network serves as the base model, with the original ReLU activation function replaced by Mish to improve classification accuracy. An improved channel attention mechanism is integrated into the network, enabling it to automatically learn important information from each channel, and the fully connected layer is redesigned for optimal classification performance. Experimental results demonstrate that the proposed model (MS-Xception) achieves high accuracy with fewer parameters, achieving a recognition accuracy of 98.61% for grape leaf disease images. Compared to other state-of-the-art models such as ResNet50 and Swim-Transformer, the proposed model shows superior classification performance, providing an efficient method for intelligent diagnosis of grape leaf diseases. The proposed method significantly improves the accuracy and efficiency of grape leaf disease diagnosis and has potential for practical application in the field of grape production. ]]&gt;</content:encoded>
    <dc:title>An Enhanced Convolutional Neural Network for Accurate Classification of Grape Leaf Diseases</dc:title>
    <dc:creator>yinglai huang</dc:creator>
    <dc:creator>ning li</dc:creator>
    <dc:creator>zhenbo liu</dc:creator>
    <dc:identifier>doi: 10.56578/ida020102</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>8</prism:startingPage>
    <prism:doi>10.56578/ida020102</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_1/ida020102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2023_2_1/ida020101">
    <title>Information Dynamics and Applications, 2023, Volume 2, Issue 1, Pages undefined: ECO-LEACH: A Blockchain-Based Distributed Routing Protocol for Energy-Efficient Wireless Sensor Networks</title>
    <link>https://www.acadlore.com/article/IDA/2023_2_1/ida020101</link>
    <description>This paper proposes a novel architecture based on blockchain technology to enhance the dependability and safety of wireless sensor networks (WSN) by authenticating WSN nodes. In a WSN, sensor nodes collect and transmit data to cluster heads (CHs) for further processing. The proposed model employs the distance and residual energy-based low-energy adaptive clustering hierarchy (ECO-LEACH) protocol to replace CHs with ordinary nodes and the Interplanetary File System (IPFS) for storing data. In addition, consensus based on proof of authority (PoA) is used to validate transactions, reducing the computational cost associated with proof of work. The proposed system was evaluated using simulations with 300 sensor nodes and compared with other protocols, including LEACH, DDR-LEACH, PEGASIS, and LEACH-PSO. The simulation results showed that the proposed ECO-LEACH outperformed the other protocols in terms of energy consumption, throughput achieved, and network lifetime improvement. Specifically, the proposed system consumed 23.5J for 300 sensor nodes, achieved 687.5 kbps, and improved the network's lifetime by 4.12 seconds for 50 rounds. Overall, this paper provides a reliable and secure solution for authenticating WSN nodes, enhancing data transfer safety, and dependability. The proposed architecture offers a promising approach for addressing the challenges of WSN design using blockchain technology and PoA consensus. The comparative analysis shows that the proposed ECO-LEACH protocol outperforms other protocols in terms of energy consumption, throughput achieved, and network lifetime improvement for 300 sensor nodes.</description>
    <pubDate>03-30-2023</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This paper proposes a novel architecture based on blockchain technology to enhance the dependability and safety of wireless sensor networks (WSN) by authenticating WSN nodes. In a WSN, sensor nodes collect and transmit data to cluster heads (CHs) for further processing. The proposed model employs the distance and residual energy-based low-energy adaptive clustering hierarchy (ECO-LEACH) protocol to replace CHs with ordinary nodes and the Interplanetary File System (IPFS) for storing data. In addition, consensus based on proof of authority (PoA) is used to validate transactions, reducing the computational cost associated with proof of work. The proposed system was evaluated using simulations with 300 sensor nodes and compared with other protocols, including LEACH, DDR-LEACH, PEGASIS, and LEACH-PSO. The simulation results showed that the proposed ECO-LEACH outperformed the other protocols in terms of energy consumption, throughput achieved, and network lifetime improvement. Specifically, the proposed system consumed 23.5J for 300 sensor nodes, achieved 687.5 kbps, and improved the network's lifetime by 4.12 seconds for 50 rounds. Overall, this paper provides a reliable and secure solution for authenticating WSN nodes, enhancing data transfer safety, and dependability. The proposed architecture offers a promising approach for addressing the challenges of WSN design using blockchain technology and PoA consensus. The comparative analysis shows that the proposed ECO-LEACH protocol outperforms other protocols in terms of energy consumption, throughput achieved, and network lifetime improvement for 300 sensor nodes.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>ECO-LEACH: A Blockchain-Based Distributed Routing Protocol for Energy-Efficient Wireless Sensor Networks</dc:title>
    <dc:creator>feroz khan a.b</dc:creator>
    <dc:identifier>doi: 10.56578/ida020101</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>03-30-2023</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>03-30-2023</prism:publicationDate>
    <prism:year>2023</prism:year>
    <prism:volume>2</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ida020101</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2023_2_1/ida020101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010106">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: Ensemble Learning Applications in Multiple Industries: A Review</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010106</link>
    <description>This study proposes a systematic review of the application of Ensemble learning (EL) in multiple industries. This study aims to review prevailing application in multiple industries to guide for the future landing application. This study also proposes a research method based on Systematic Literature Review (SLR) to address EL literature and help advance our understanding of EL for future optimization. The literature is divided three categories by the National Bureau of Statistics of China (NBSC): the primary industry, the secondary industry and the tertiary industry. Among existing problems in industrial management systems, the frequently discussed are quality control, prediction, detection, efficiency and satisfaction. In addition, given the huge potential in various fields, the gap and further directions are also suggested. This study is essential to industry managers and cross-disciplinary scholars to lead a guideline to solve the issues in practical work, as it provided a panorama of application domains and current problems. This is the first review of the application of EL in multiple industries in the literature. The paper has potential values to broaden the application area of EL, and to proposed a novel research method based SLR to sort out literature.</description>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;This study proposes a systematic review of the application of Ensemble learning (EL) in multiple industries. This study aims to review prevailing application in multiple industries to guide for the future landing application. This study also proposes a research method based on Systematic Literature Review (SLR) to address EL literature and help advance our understanding of EL for future optimization. The literature is divided three categories by the National Bureau of Statistics of China (NBSC): the primary industry, the secondary industry and the tertiary industry. Among existing problems in industrial management systems, the frequently discussed are quality control, prediction, detection, efficiency and satisfaction. In addition, given the huge potential in various fields, the gap and further directions are also suggested. This study is essential to industry managers and cross-disciplinary scholars to lead a guideline to solve the issues in practical work, as it provided a panorama of application domains and current problems. This is the first review of the application of EL in multiple industries in the literature. The paper has potential values to broaden the application area of EL, and to proposed a novel research method based SLR to sort out literature.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Ensemble Learning Applications in Multiple Industries: A Review</dc:title>
    <dc:creator>kuo-yi lin</dc:creator>
    <dc:creator>chancy huang</dc:creator>
    <dc:identifier>doi: 10.56578/ida010106</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>44</prism:startingPage>
    <prism:doi>10.56578/ida010106</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010106</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010105">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: A Data-Driven Innovation Model of Big Data Digital Learning and Its Empirical Study</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010105</link>
    <description>Digital learning is the use of telecommunication technology to deliver information for education and training. As the increased acceleration of the propagation speed of the web, a lot of data collected by automated or semi-automated way. The 4s (Volume, Velocity, Variety and Veracity) of big data increase the challenge to extract useful value via systemic framework. This study aims to construct the data model of big data digital learning. Based on the digital learning data, data-driven innovation framework was proposed to identify data form and collect data. Bayesian network was proposed to capture learning model to extract user experience of students to enhance learning efficiency. Empirical study was conducted on a university to validate the proposed approach. The results have been implemented to support the strategies to improve student learning outcomes and competitiveness.</description>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;Digital learning is the use of telecommunication technology to deliver information for education and training. As the increased acceleration of the propagation speed of the web, a lot of data collected by automated or semi-automated way. The 4s (Volume, Velocity, Variety and Veracity) of big data increase the challenge to extract useful value via systemic framework. This study aims to construct the data model of big data digital learning. Based on the digital learning data, data-driven innovation framework was proposed to identify data form and collect data. Bayesian network was proposed to capture learning model to extract user experience of students to enhance learning efficiency. Empirical study was conducted on a university to validate the proposed approach. The results have been implemented to support the strategies to improve student learning outcomes and competitiveness.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Data-Driven Innovation Model of Big Data Digital Learning and Its Empirical Study</dc:title>
    <dc:creator>jin he</dc:creator>
    <dc:creator>kuo-yi lin</dc:creator>
    <dc:creator>ya dai</dc:creator>
    <dc:identifier>doi: 10.56578/ida010105</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>35</prism:startingPage>
    <prism:doi>10.56578/ida010105</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010105</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010104">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: A Scalable Framework to Analyze Data from Heterogeneous Sources at Different Levels of Granularity</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010104</link>
    <description>There is an enormous amount of data present in many different formats, including databases (MsSql, MySQL, etc.), data repositories (.txt, html, pdf, etc.), and MongoDB (NoSQL, etc.). The processing, storing, and management of the data are complicated by the varied locations in which the data is stored. If combined, this data from several sites can yield a lot of important information. Since many researchers have suggested different methods to extract, examine, and integrate the data. To manage heterogeneous data, researchers propose data warehouse and big data as solutions. However, when it comes to handling a variety of data, each of these methods have limitations. It is necessary to comprehend and use this information, as well as to evaluate the massive quantities that are increasing day by day. We propose a solution that facilitates data extraction from a variety of sources. It involves two steps: first, it extracts the pertinent data, and second, then to identify the machine learning algorithm to analyze the data. This paper proposes a system for retrieving data from many sources, such as databases, data sources, and NoSQL. Later, the framework was put to the test on a variety of datasets to extract and integrate data from diverse sources, and it was found that the integrated dataset performed better than the individual datasets in terms of accuracy, management, storage, and other factors. Thus, our prototype scales and functions effectively as the number of heterogeneous data sources increases.</description>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;There is an enormous amount of data present in many different formats, including databases (MsSql, MySQL, etc.), data repositories (.txt, html, pdf, etc.), and MongoDB (NoSQL, etc.). The processing, storing, and management of the data are complicated by the varied locations in which the data is stored. If combined, this data from several sites can yield a lot of important information. Since many researchers have suggested different methods to extract, examine, and integrate the data. To manage heterogeneous data, researchers propose data warehouse and big data as solutions. However, when it comes to handling a variety of data, each of these methods have limitations. It is necessary to comprehend and use this information, as well as to evaluate the massive quantities that are increasing day by day. We propose a solution that facilitates data extraction from a variety of sources. It involves two steps: first, it extracts the pertinent data, and second, then to identify the machine learning algorithm to analyze the data. This paper proposes a system for retrieving data from many sources, such as databases, data sources, and NoSQL. Later, the framework was put to the test on a variety of datasets to extract and integrate data from diverse sources, and it was found that the integrated dataset performed better than the individual datasets in terms of accuracy, management, storage, and other factors. Thus, our prototype scales and functions effectively as the number of heterogeneous data sources increases.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Scalable Framework to Analyze Data from Heterogeneous Sources at Different Levels of Granularity</dc:title>
    <dc:creator>iqbal hasan</dc:creator>
    <dc:creator>s.a.m. rizvi</dc:creator>
    <dc:creator>majid zaman</dc:creator>
    <dc:creator>waseem jeelani bakshi</dc:creator>
    <dc:creator>sheikh amir fayaz</dc:creator>
    <dc:identifier>doi: 10.56578/ida010104</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>26</prism:startingPage>
    <prism:doi>10.56578/ida010104</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010104</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010103">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: A Comprehensive Review of Geographic Routing Protocols in Wireless Sensor Network</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010103</link>
    <description>To analyses the impact of high mobility, dynamic topologies, scalability and routing due to the more dynamic changes in network. To enhance mobile Ad-hoc network (MANET) self-organization capabilities by geographical routing algorithm during mobility. In this paper, a survey has been carried out on geographic routing protocols, such as hybrid routing, Greedy Routing, face-2 Algorithm, Perimeter Routing, quasi random deployment (QRD) techniques and time of arrival (TOA). An optimized multipath routing in wireless sensor network (WSN), energy utilization, detection of anonymous routing, node mobility prediction, data packet distribution strategies in WSN is analyzed. Geographic routing offers previous data packet information such as physical locations, packet elimination dependencies, storage capacity of topology, Associate costs and also identifies the dynamic behavior of nodes with respect to packets frequencies.</description>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;To analyses the impact of high mobility, dynamic topologies, scalability and routing due to the more dynamic changes in network. To enhance mobile Ad-hoc network (MANET) self-organization capabilities by geographical routing algorithm during mobility. In this paper, a survey has been carried out on geographic routing protocols, such as hybrid routing, Greedy Routing, face-2 Algorithm, Perimeter Routing, quasi random deployment (QRD) techniques and time of arrival (TOA). An optimized multipath routing in wireless sensor network (WSN), energy utilization, detection of anonymous routing, node mobility prediction, data packet distribution strategies in WSN is analyzed. Geographic routing offers previous data packet information such as physical locations, packet elimination dependencies, storage capacity of topology, Associate costs and also identifies the dynamic behavior of nodes with respect to packets frequencies.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>A Comprehensive Review of Geographic Routing Protocols in Wireless Sensor Network</dc:title>
    <dc:creator>mamtha m. pandith</dc:creator>
    <dc:creator>nataraj kanathur ramaswamy</dc:creator>
    <dc:creator>mallikarjunaswamy srikantaswamy</dc:creator>
    <dc:creator>rekha kanathur ramaswamy</dc:creator>
    <dc:identifier>doi: 10.56578/ida010103</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>14</prism:startingPage>
    <prism:doi>10.56578/ida010103</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010103</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010102">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: Integration of Ontology Transformation into Hidden Markov Model</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010102</link>
    <description>The goal of this study is to suggest a method for turning an ontology into a hidden Markov model (HMM). Ontology properties (relationships between classes) and ontology classes are taken as HMM symbols and states, respectively. Knowledge is represented in many different fields using the central element of the Semantic Web dubbed ontology. The authors employed machine learning technologies like HMM to add knowledge to these ontologies or to extract knowledge from within them. The meaning obtained from ontologies is not described during this task. The ontology triples that were extracted using SPARQL queries are used in this paper to transform the ontology into an HMM in order to handle this semantic. The Pizza ontology has been used to implement this method, which is based on lightweight ontologies.</description>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[ &lt;p&gt;The goal of this study is to suggest a method for turning an ontology into a hidden Markov model (HMM). Ontology properties (relationships between classes) and ontology classes are taken as HMM symbols and states, respectively. Knowledge is represented in many different fields using the central element of the Semantic Web dubbed ontology. The authors employed machine learning technologies like HMM to add knowledge to these ontologies or to extract knowledge from within them. The meaning obtained from ontologies is not described during this task. The ontology triples that were extracted using SPARQL queries are used in this paper to transform the ontology into an HMM in order to handle this semantic. The Pizza ontology has been used to implement this method, which is based on lightweight ontologies.&lt;/p&gt; ]]&gt;</content:encoded>
    <dc:title>Integration of Ontology Transformation into Hidden Markov Model</dc:title>
    <dc:creator>lazarre warda</dc:creator>
    <dc:creator>guidedi kaladzavi</dc:creator>
    <dc:creator>amaria samdalle</dc:creator>
    <dc:creator>kolyang</dc:creator>
    <dc:identifier>doi: 10.56578/ida010102</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>2</prism:startingPage>
    <prism:doi>10.56578/ida010102</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010102</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <item rdf:resource="https://www.acadlore.com/article/IDA/2022_1_1/ida010101">
    <title>Information Dynamics and Applications, 2022, Volume 1, Issue 1, Pages undefined: Editorial to the Inaugural Issue</title>
    <link>https://www.acadlore.com/article/IDA/2022_1_1/ida010101</link>
    <description/>
    <pubDate>12-26-2022</pubDate>
    <content:encoded>&lt;![CDATA[  ]]&gt;</content:encoded>
    <dc:title>Editorial to the Inaugural Issue</dc:title>
    <dc:creator>kuo-yi lin</dc:creator>
    <dc:identifier>doi: 10.56578/ida010101</dc:identifier>
    <dc:source>Information Dynamics and Applications</dc:source>
    <dc:date>12-26-2022</dc:date>
    <prism:publicationName>Information Dynamics and Applications</prism:publicationName>
    <prism:publicationDate>12-26-2022</prism:publicationDate>
    <prism:year>2022</prism:year>
    <prism:volume>1</prism:volume>
    <prism:number>1</prism:number>
    <prism:section>Article</prism:section>
    <prism:startingPage>1</prism:startingPage>
    <prism:doi>10.56578/ida010101</prism:doi>
    <prism:url>https://www.acadlore.com/article/IDA/2022_1_1/ida010101</prism:url>
    <cc:license rdf:resource="CC BY 4.0"/>
  </item>
  <cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
    <cc:permits rdf:resource="http://creativecommons.org/ns#Reproduction"/>
    <cc:permits rdf:resource="http://creativecommons.org/ns#Distribution"/>
    <cc:permits rdf:resource="http://creativecommons.org/ns#DerivativeWorks"/>
  </cc:License>
</rdf:RDF>