<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Publishing DTD v1.1d1 20130915//EN" "JATS-journalpublishing1.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta id="journal-meta-1">
      <journal-id journal-id-type="nlm-ta">Biomedical Research and Therapy</journal-id>
      <journal-id journal-id-type="publisher-id">Biomedical Research and Therapy</journal-id>
      <journal-id journal-id-type="journal_submission_guidelines">http://www.bmrat.org/</journal-id>
      <journal-title-group>
        <journal-title>Biomedical Research and Therapy</journal-title>
      </journal-title-group>
      <issn publication-format="print"/>
    </journal-meta>
    <article-meta id="article-meta-1">
      <article-id pub-id-type="doi">10.15419/bmrat.v10i9.832</article-id>
      <title-group>
        <article-title id="at-0882641ff80d">Early Alzheimer’s disease diagnosis using an XG-Boost model applied to MRI images</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-12bb9c571709">
            <surname>Nguyen</surname>
            <given-names>Khoi</given-names>
          </name>
          <xref id="x-2685652fd544" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-11089404a814" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid">0000-0002-6016-5929</contrib-id>
          <name id="n-820f1ce1f2ee">
            <surname>Nguyen</surname>
            <given-names>My</given-names>
          </name>
          <xref id="x-2702c828734a" rid="a-773c23407ae3" ref-type="aff">3</xref>
          <xref id="x-5635a9924aea" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-6f0b3aa36d37">
            <surname>Dang</surname>
            <given-names>Khiet</given-names>
          </name>
          <xref id="x-1a52ac4ee807" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-1dd88d7fbbee" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-1f4fcc018d85">
            <surname>Pham</surname>
            <given-names>Bao</given-names>
          </name>
          <xref id="x-cbae40cb608b" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-704ed30567f2" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-afd32a6aef1b">
            <surname>Huynh</surname>
            <given-names>Vy</given-names>
          </name>
          <xref id="x-1d9b8ab2cb22" rid="a-4865cca37dd6" ref-type="aff">2</xref>
          <xref id="x-f3e21282555a" rid="a-773c23407ae3" ref-type="aff">3</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-4ebcb41f5815">
            <surname>Vo</surname>
            <given-names>Toi</given-names>
          </name>
          <xref id="x-06f91878d903" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-540ad78cc653" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-efccedbd582a">
            <surname>Ngo</surname>
            <given-names>Lua</given-names>
          </name>
          <xref id="x-15411d7fc2e0" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-76f4ef7fa3e9" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <contrib contrib-type="author" corresp="yes">
          <contrib-id contrib-id-type="orcid"/>
          <name id="n-f9d10b537de7">
            <surname>Ha</surname>
            <given-names>Huong</given-names>
          </name>
          <email>htthuong@hcmiu.edu.vn</email>
          <xref id="x-f290a17b39bf" rid="a-3a0b5ca5df42" ref-type="aff">1</xref>
          <xref id="x-3182077c6b49" rid="a-4865cca37dd6" ref-type="aff">2</xref>
        </contrib>
        <aff id="a-3a0b5ca5df42">
          <institution>School of Biomedical Engineering, International University, Viet Nam</institution>
        </aff>
        <aff id="a-4865cca37dd6">
          <institution>Vietnam National University Ho Chi Minh City, Ho Chi Minh City, Viet Nam</institution>
        </aff>
        <aff id="a-773c23407ae3">
          <institution>Faculty of Biology – Biotechnology, University of Science, Viet Nam</institution>
        </aff>
      </contrib-group>
      <volume>10</volume>
      <issue>9</issue>
      <fpage>5929</fpage>
      <lpage>5944</lpage>
      <permissions/>
      <abstract id="abstract-dfdbb203b1b2">
        <title id="abstract-title-1696c5124346">Abstract</title>
        <p id="paragraph-9d6e068793c2"><bold id="strong-1">Introduction:</bold> Early Alzheimer’s disease (AD) diagnosis is critical to improving the success of new treatments in clinical trials, especially at the early mild cognitive impairment (EMCI) stage. This study aimed to tackle this problem by developing an accurate classification model for early AD detection at the EMCI stage based on magnetic resonance imaging (MRI). <bold id="strong-2">Methods:</bold> This study developed the proposed classification model through a machine-learning pipeline with three main steps. First, features were extracted from MRI images using FreeSurfer. Second, the extracted features were filtered using principal component analysis (PCA), backward elimination (BE), and extreme gradient (XG)-Boost importance (XGBI), the efficiency of which was evaluated. Finally, the selected features were combined with cognitive scores (Mini Mental State Examination [MMSE] and Clinical Dementia Rating [CDR]) to create an XG-Boost three-class classifier: AD vs. EMCI vs. cognitively normal (CN). <bold id="strong-3">Results:</bold> The MMSE and CDR had the highest importance weights, followed by the thickness of the left superior temporal sulcus and banks of the superior temporal lobe. Without feature selection, the model had the lowest accuracy of 69.0%. After feature selection and the addition of cognitive scores, the accuracy of the PCA, BE, and XGBI approaches improved to 74.0%, 90.9%, and 91.5%, respectively. The BE with tuning parameters model was chosen as the final model since it had the highest accuracy of 92.0%. The area under the receiver operating characteristic curve for the CN, AD, and EMCI classes were 0.98, 0.94, and 0.88, respectively. <bold id="strong-4">Conclusion:</bold> Our proposed model shows promise in early AD diagnosis and can be fine-tuned in the future through testing on a multi-dataset.</p>
      </abstract>
      <kwd-group id="kwd-group-1">
        <title>Keywords</title>
        <kwd>Alzheimer’s disease</kwd>
        <kwd>Early mild cognitive impairment</kwd>
        <kwd>early diagnosis</kwd>
        <kwd>three-class classification</kwd>
        <kwd>XG- Boost</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec>
      <title id="t-23d457a82eb1">Introduction</title>
      <p id="p-2f55a78bb99e">Alzheimer’s disease (AD) is the most common neurodegenerative disorder that greatly reduces patients’ quality of life and makes them utterly dependent on their caregivers<bold id="s-92f5fc9ea02b"><xref rid="R208498129137412" ref-type="bibr">1</xref>, <xref rid="R208498129137413" ref-type="bibr">2</xref></bold>. Prolonged medical treatment and care exert a substantial economic strain on patients and their families, potentially costing &gt;1.1 trillion US dollars worldwide<bold id="s-a5d79953656c"><xref id="x-d7395a3f5c1d" rid="R208498129137412" ref-type="bibr">1</xref></bold>. Unfortunately, once cognitive symptoms manifest, current medications cannot reverse disease progression due to the continued loss of neurons without replacement by cell division<bold id="s-14fd32053658"><xref rid="R208498129137414" ref-type="bibr">3</xref>, <xref rid="R208498129137415" ref-type="bibr">4</xref></bold>. Therefore, identifying patients at the early mild cognitive impairment (EMCI) stage is critical to improving the success of new treatments or interventions in clinical trials.</p>
      <p id="p-4f026e758b35">Several breakthrough approaches have attempted to predict AD at its preclinical stage, which could allow the application of medications to halt AD development from its onset<bold id="s-7bfeca3aac34"><xref rid="R208498129137414" ref-type="bibr">3</xref>, <xref rid="R208498129137416" ref-type="bibr">5</xref>, <xref rid="R208498129137417" ref-type="bibr">6</xref>, <xref rid="R208498129137418" ref-type="bibr">7</xref>, <xref rid="R208498129137419" ref-type="bibr">8</xref></bold>. About 80% of patients diagnosed with mild cognitive impairment (MCI) convert to AD within six years<bold id="s-419095ec198b"><xref id="x-357e0ecf72e4" rid="R208498129137420" ref-type="bibr">9</xref></bold>. Recent studies have focused on this transitional phase to detect the preclinical AD stage, particularly EMCI<bold id="s-776bc972f7f1"><xref id="x-e8648806d40f" rid="R208498129137416" ref-type="bibr">5</xref></bold>. One promising approach to detect EMCI is identifying brain morphological changes through neuroimaging data, such as magnetic resonance imaging (MRI).</p>
      <p id="p-2c33dcf7907a">Early AD detection using brain MRI data remains clinically challenging since the subtle changes during its transitional period cannot be assessed manually<bold id="s-969eb13fb48f"><xref id="x-9b7c5dfd1a88" rid="R208498129137414" ref-type="bibr">3</xref></bold>. Automatic computation and artificial intelligence (AI) approaches such as deep learning (DL) or machine learning (ML) are required to identify brain structural features at the EMCI stage. Of numerous AI-assisted methods, DL has been broadly used because of its high performance, especially the convolutional neural network (CNN)<bold id="s-f153ac40923d"><xref rid="R208498129137416" ref-type="bibr">5</xref>, <xref rid="R208498129137421" ref-type="bibr">10</xref></bold>. Kang <italic id="e-c9d22bf2ecb7">et al</italic>. combined a 2D CNN with transfer learning to identify EMCI by processing a multi-modal dataset (MRI and diffusion tensor imaging data), achieving the highest accuracy of 94.2% for cognitively normal (CN) <italic id="e-13027a88993f">vs</italic>. EMCI patients<bold id="s-33a3d132668d"><xref id="x-a38142f75d70" rid="R208498129137416" ref-type="bibr">5</xref></bold>. In addition, Kolahkaj <italic id="e-6bcd3f4309de">et al</italic>. built a DL architecture based on the BrainNet CNN model to detect EMCI, achieving high accuracies for binary classification: 0.96, 0.98, and 0.95 for NC/EMCI, NC/MCI, and EMCI/MCI, respectively<bold id="s-7e9c08da5eb0"><xref id="x-035c1c73b130" rid="R208498129137422" ref-type="bibr">11</xref></bold>.</p>
      <p id="p-bbb4cd2193d5">Despite its significant results, DL has several limitations that could hinder clinical applications. Firstly, DL models are prone to encounter overfitting due to the many parameters considered<bold id="s-301ccc1225b4"><xref id="x-65c352ef257a" rid="R208498129137423" ref-type="bibr">12</xref></bold>. Secondly, analysts cannot provide a plausible explanation for the algorithm’s performance, which is called a black box. Therefore, to build an understandable prediction model, making the shift to ML for early AD detection is beneficial for neurologists and doctors.</p>
      <p id="p-19179cfb1038">While most ML studies have focused on binary classification, some have focused on multi-class classification. However, there is a growing need for a multi-class algorithm that can effectively distinguish the prodromal stage (EMCI) from the array of other stages (late MCI [LMCI], AD, and CN), enabling an early AD diagnosis. Moreover, it is important to note that existing multi-class ML models have low accuracies. In 2022, Techa<italic id="e-fd290a76b83b"> et al</italic>. showed that a new model based on three CNN architectures (DenseNet196, VGG16, and ResNet50) achieved 89% accuracy in discriminating normal, very mild dementia, mild dementia, moderate dementia, and AD<bold id="s-7871b144773c"><xref id="x-eea22469785f" rid="R208498129137424" ref-type="bibr">13</xref></bold>. Alorf <italic id="e-637848cebaeb">et al.</italic> implemented a Brain Connectivity-Based Convolutional Network in 2022, which provided 84.03% accuracy for six-class classification (AD, LMCI, MCI, EMCI, subjective memory complaints, and CN)<bold id="s-6191f5f28971"><xref id="x-2748ed68b39f" rid="R208498129137425" ref-type="bibr">14</xref></bold>. Another major difficulty when identifying the initial AD stages is the subtle structural change in subjects with EMCI. EMCI is elusive and cannot be recognized by the diagnostic criteria for AD<bold id="s-e9089ee96225"><xref id="x-5cd7f1ca1886" rid="R208498129137426" ref-type="bibr">15</xref></bold>. Furthermore, EMCI and MCI are highly heterogeneous since they can be easily mistaken for multiple pathological conditions, especially other neurodegenerative diseases<bold id="s-c2992d0f8199"><xref rid="R208498129137427" ref-type="bibr">16</xref>, <xref rid="R208498129137428" ref-type="bibr">17</xref></bold>. Therefore, EMCI classification requires further evaluation and approaches to optimize its efficiency.</p>
      <p id="p-42eacbe91932">One potential ML model to address the early AD detection challenge is extreme gradient boosting (XG-Boost). XG-Boost is a scalable tree-based ensemble learning implemented from the gradient boosting system. It introduces errors from the previous weak learner to the latter learner, improving its learning accuracy<bold id="s-66ce7028410a"><xref id="x-93913d421ed6" rid="R208498129137429" ref-type="bibr">18</xref></bold>. Since its results depend on many decision trees, XG-Boost shows high compatibility, competitive execution speed, and accuracy when applied to large data sets, making it suitable for clinical application<bold id="s-4cfa3ba12b24"><xref id="x-2ca78e0f1d80" rid="R208498129137430" ref-type="bibr">19</xref></bold>. While few studies have used XG-Boost for AD diagnosis, the preliminary results are promising. Ong <italic id="e-4c3e72bd0483">et al</italic>. proposed an XG-Boost model to classify AD and CN subjects using the FreeSurfer library to extract insight features from MRI, achieving an area under the receiver operative characteristic (ROC) curve (AUC) of 91%<bold id="s-b97a2380e56d"><xref id="x-0bc2ffbe48cd" rid="R208498129137431" ref-type="bibr">20</xref></bold>. Tuan <italic id="e-87ec44b6bc53">et al</italic>. presented an XG-Boost model to classify AD and normal subjects based on the tissues segmented by a CNN and Gaussian mixture model<bold id="s-7d6c8010ca24"><xref id="x-fcacc65a6c4c" rid="R208498129137432" ref-type="bibr">21</xref></bold>. Their highest accuracy was 89% when combined with a support vector machine (SVM) and CNN<bold id="s-c371e3a58abd"><xref id="x-160535dcb3fd" rid="R208498129137432" ref-type="bibr">21</xref></bold>. However, both models had several limitations, such as high computation cost and susceptibility to sample size and complexity. They also did not attempt to classify three classes. Therefore, future improvement is required to enhance the models’ accuracy and validity.</p>
      <p id="p-4e298ae5595b">This study used XG-Boost for three-class classification, primarily focusing on distinguishing CN, EMCI, and AD. It also evaluated and optimized three feature selection methods—backward elimination, XG-Boost importance (XGBI), and principal component analysis (PCA)—to identify the most suitable method for the XG-Boost model. When combined with the Mini Mental State Examination (MMSE) and Clinical Dementia Rating (CDR) scores, our model achieved the highest accuracy of 92% for distinguishing AD, EMCI, and CN. Only three features overlapped between the BE and XGBI feature selection methods: MMSE, CDR, and left hippocampus volume. While these results showed that the model still depends on the cognitive symptoms of AD rather than its brain structural changes, our model has great potential as an assistive tool for AD diagnosis with high performance, especially when considering its multi-class classification.</p>
    </sec>
    <sec>
      <title id="t-f1e3528afc9f">Methods</title>
      <sec>
        <title id="t-99e169cf55fc">
          <bold id="s-fc3ba8dfac16">Participants</bold>
        </title>
        <p id="p-c877d265b3a5">This study obtained its data from the Alzheimer’s Disease Neuroimaging Initiative (ADNI) database (http://adni.loni.usc.edu)<bold id="s-03dec8edad1d"><xref id="x-71dcfb1986a6" rid="R208498129137433" ref-type="bibr">22</xref></bold>. The ADNI was launched in 2003 as a public-private partnership led by Principal Investigator Michael W. Weiner, MD. Its primary goal has been to test whether serial MRI, positron emission tomography, biological markers, and clinical and neuropsychological assessments can be combined to measure MCI and EMCI progression<bold id="s-a56c2719f612"><xref id="x-50f06548a812" rid="R208498129137433" ref-type="bibr">22</xref></bold>.</p>
        <p id="p-3d0de62884f9">The data comprised 663 subjects who were equally grouped into three classes: CN, EMCI, and AD. Their demographic information is summarized in <bold id="s-11f2825ad267"><xref id="x-b55544e94b21" rid="tw-8e1f71bacff1" ref-type="table">Table 1</xref></bold>.</p>
        <p id="p-34e38d5f32f1"/>
        <table-wrap id="tw-8e1f71bacff1" orientation="portrait">
          <label>Table 1</label>
          <caption id="c-6b7a567342e2">
            <title id="t-a59e426c7288">
              <bold id="s-990352789b52">Demographic information 663 recruited subjects from ADNI</bold>
            </title>
          </caption>
          <table id="table-1" rules="rows">
            <colgroup/>
            <thead id="table-section-header-aace14443637">
              <tr id="tr-4b1a03d5f4c9">
                <th id="tc-3ce490c08e80" align="left">
                  <p id="p-90653eabc238"/>
                </th>
                <th id="tc-e660aa86cc36" align="center">
                  <p id="p-d685d6f16137">CN </p>
                  <p id="p-eeea62d39f3a"/>
                </th>
                <th id="tc-3587d09feb84" align="center">
                  <p id="p-5610e24e15a2">EMCI </p>
                  <p id="p-39e7da1ef8e7">(n = 221) </p>
                </th>
                <th id="tc-83d8f3551dbe" align="center">
                  <p id="p-b9081015ac94">AD </p>
                  <p id="p-530db23c050a">(n = 221) </p>
                </th>
                <th id="tc-159fd0f6ec81" align="center">
                  <p id="p-facf60952348">p</p>
                </th>
              </tr>
            </thead>
            <tbody id="table-section-1">
              <tr id="table-row-2">
                <td id="table-cell-6" align="left">
                  <p id="p-41986d0757d6">Age </p>
                </td>
                <td id="table-cell-7" align="center">
                  <p id="p-8249a22dbe32">75.28 ± 5.76 </p>
                </td>
                <td id="table-cell-8" align="center">
                  <p id="p-eb369f61641e">71.45 ± 7.23* </p>
                </td>
                <td id="table-cell-9" align="center">
                  <p id="p-976e1a1134e9">75.4 ± 7.702  </p>
                </td>
                <td id="table-cell-10" align="center">
                  <p id="p-dc78228d1217">&lt; 0.0001 </p>
                </td>
              </tr>
              <tr id="table-row-3">
                <td id="table-cell-11" align="left">
                  <p id="p-c8cc1c18c3f6">Sex (M/F) </p>
                </td>
                <td id="table-cell-12" align="center">
                  <p id="p-74c023fb0b32">120/101 </p>
                </td>
                <td id="table-cell-13" align="center">
                  <p id="p-37eb05f5fc0b">118/103 </p>
                </td>
                <td id="table-cell-14" align="center">
                  <p id="p-f9534f807105">120/101 </p>
                </td>
                <td id="table-cell-15" align="center">
                  <p id="p-603f7140474f">0.9760 </p>
                </td>
              </tr>
              <tr id="table-row-4">
                <td id="table-cell-16" align="left">
                  <p id="p-f4f14661eaf5">MMSE Score </p>
                </td>
                <td id="table-cell-17" align="center">
                  <p id="p-b35c50179c57">29.06 ± 1.1 </p>
                </td>
                <td id="table-cell-18" align="center">
                  <p id="p-f970bbe66691">28.12 ± 1.66* </p>
                </td>
                <td id="table-cell-19" align="center">
                  <p id="p-7dccd477b701">22.8 ± 2.63* </p>
                </td>
                <td id="table-cell-20" align="center">
                  <p id="paragraph-23">&lt; 0.0001 </p>
                </td>
              </tr>
              <tr id="table-row-5">
                <td id="table-cell-21" align="left">
                  <p id="paragraph-24">CDR Score </p>
                </td>
                <td id="table-cell-22" align="center">
                  <p id="p-1f7ba95e4051">0.03 ± 0.11 </p>
                </td>
                <td id="table-cell-23" align="center">
                  <p id="p-6a9c60e8b81a">0.47 ± 0.16* </p>
                </td>
                <td id="table-cell-24" align="center">
                  <p id="p-b3c661effdc5">0.81 ± 0.32* </p>
                </td>
                <td id="table-cell-25" align="center">
                  <p id="p-27f0d9143727">&lt; 0.0001 </p>
                </td>
              </tr>
              <tr id="table-row-6">
                <td id="table-cell-26" align="left">
                  <p id="p-1c1a5074acbf">Education (Years)</p>
                </td>
                <td id="table-cell-27" align="center">
                  <p id="p-2699cfca4687">16.18 ± 3.88</p>
                </td>
                <td id="table-cell-28" align="center">
                  <p id="p-3568dcbe6bfc">16.09 ± 2.65 </p>
                </td>
                <td id="table-cell-29" align="center">
                  <p id="p-c7c705262c5f">14.65 ± 4.35*</p>
                </td>
                <td id="table-cell-30" align="center">
                  <p id="p-741012d40529">&lt; 0.0001</p>
                </td>
              </tr>
              <tr id="table-row-7">
                <td id="table-cell-31" align="left">
                  <p id="p-76457892d894">ApoE4 (+/-)</p>
                </td>
                <td id="table-cell-32" align="center">
                  <p id="p-19db1012b83e">157/64 </p>
                </td>
                <td id="table-cell-33" align="center">
                  <p id="p-30eeea8ae420">82/139 </p>
                </td>
                <td id="table-cell-34" align="center">
                  <p id="paragraph-37">58/163 </p>
                </td>
                <td id="table-cell-35" align="center">
                  <p id="paragraph-38">&lt; 0.0001</p>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn-group>
              <fn id="f-65c31cc2c174">
                <p id="p-919dbfb522ba">Chi-square test was conducted for gender and genotype. ANOVA one-way was conducted for age, MMSE, CDR, and educational years. * p &lt; 0.0001, compared to CN group</p>
              </fn>
              <fn id="f-65ab5bb852ab">
                <p id="p-8c779da2c23a"><bold id="s-321466bfdf7f">Abbreviations</bold>: <bold id="s-c3f7e96fa18b">ns</bold>: not significant, compared to CN group (p = 0.1234) (Bonferroni ‘s comparison). Data is illustrated as mean ± standard deviation or number/number. <bold id="s-b9a7da616668">CN</bold>: Normal Cognitive; <bold id="s-8775a6898f99">EMCI</bold>: Early Mild Cognitive Impairment; <bold id="s-7bfc75b41e30">AD</bold>: Alzheimer’s disease; <bold id="s-8ccbaeca7f87">MMSE</bold>: Mini-Mental State Examination; <bold id="s-43eeec588869">CDR</bold>: Clinical Dementia Rating; (+) positive; (-) negative where available.</p>
              </fn>
            </fn-group>
          </table-wrap-foot>
        </table-wrap>
        <p id="p-e2fa87b97b9e"/>
        <p id="p-11a33fa2f305"/>
        <fig id="f-b5dbb6622b89" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 1 </label>
          <caption id="c-e0b8285b326e">
            <title id="t-b63b9338829f"><bold id="s-f44824122f67">A study framework of AD detection, which includes three main steps</bold>. T1-weighted MRI data were collected from the ANDI database (step 1) and preprocessed through FreeSurfer software to obtain brain structure features. Sequentially they were combined with two cognitive scores and tuned by three selection methods to construct six approaches for input features (step 2). Finally, generated inputs passed through the XG-Boost model to create the decision tree for AD status (ternary classification), which are CN, EMCI, and AD in step 3. The outcome also showed the accuracies of the respective input. </title>
            <p id="p-0d166da8fa52"><bold id="s-c32972ce8034">Abbreviations</bold>: <bold id="s-af9b529a633f">CN</bold>: Normal Cognitive; <bold id="s-2536b1d921ba">EMCI</bold>: Early Mild Cognitive Impairment; <bold id="s-5f8066b90436">AD</bold>: Alzheimer’s disease; <bold id="s-4deb151da417">PCA</bold>: Principle Component Analysis; <bold id="s-9ed9e0f0b9cb">XG-Boost</bold>: Extreme Gradient Boosting. </p>
          </caption>
          <graphic id="g-62c2adaa5142" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/8afe4153-a023-4e7a-94ea-793acceee1ea-uimage.png"/>
        </fig>
        <p id="p-79fcdb70870c"/>
        <p id="p-67ddd59bf894"/>
        <p id="p-1fdcd9ff2b2c"/>
        <fig id="f-508527275f6f" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 2 </label>
          <caption id="c-94c60b941a13">
            <title id="t-a370a8855356"><bold id="s-5e385fd92c76">The process of FreeSurfer in features extraction</bold>. MRI Preprocess: Including image registration, skull stripping and intensity normalization. Cortical reconstruction and subcortical segmentation: (<bold id="s-90b9a3cc4f22">1</bold>) Convert a three-dimensional anatomical volume into a two-dimensional surface; (<bold id="s-c6afb89a48f4">2</bold>) Segment gray matter and white matter to create the brain mask file for visualizing after. Region determination and brain parameters analysis: (<bold id="s-8047120a5043">1</bold>) Inflate the surfaces into a sphere and map cortical parcel actions back onto individual subjects using two atlases (Killiany and Destrieux atlas) (<bold id="s-fc3bed300e9d">2</bold>) Establish the boundary between white matter and cortex and compute gray matter thickness.</title>
          </caption>
          <graphic id="g-b3d2a2ab291a" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/606df864-435a-42bf-9692-b1fbf1cf96f5-uimage.png"/>
        </fig>
        <p id="p-d86f66d280b9"/>
        <p id="p-4608e7ae632d"/>
        <table-wrap id="tw-72ff92f741fe" orientation="portrait">
          <label>Table 2</label>
          <caption id="c-c1dd20e80d1e">
            <title id="t-42c50cd7674f">
              <bold id="s-59c1c1362228">358 features were extracted by Freesurfer from 663 ADNI subjects, particular dimension for each brain region</bold>
            </title>
          </caption>
          <table id="t-c5c11c0d67a3" rules="rows">
            <colgroup>
              <col width="6.750000000000001"/>
              <col width="13.040000000000003"/>
              <col width="16.21"/>
              <col width="13.24"/>
              <col width="13.24"/>
              <col width="15.639999999999999"/>
              <col width="5.880000000000001"/>
              <col width="16"/>
            </colgroup>
            <thead id="table-section-header-217ec5a4585e">
              <tr id="tr-81dc7360ae3b">
                <th id="tc-ae9a731ac52d" align="left">
                  <p id="p-511120f72747">No.</p>
                </th>
                <th id="tc-98f8f48fc5b7" align="center">
                  <p id="p-8737c1be30a6">Subject ID</p>
                </th>
                <th id="tc-e011f4a0f3cf" align="center">
                  <p id="p-4392e0e389e4">Brain Segmentati-on Volume Without Ventricles</p>
                </th>
                <th id="tc-015e0d2cdd41" align="center">
                  <p id="p-01f78d741eb3">Left Entorhinal Cortex (temporal lobe)</p>
                </th>
                <th id="tc-eef9735bf234" align="center">
                  <p id="p-e6bed7ece6cf">White Surface Total Area in the left hemisphere</p>
                </th>
                <th id="tc-1875b750b993" align="center">
                  <p id="p-815a0d98f17f">Banks of Superior Temporal Sulcus in the left hemisphere</p>
                </th>
                <th id="tc-94155129e3e9" align="center">
                  <p id="p-b01b5e8c9deb">...</p>
                </th>
                <th id="tc-335188105de1" align="center">
                  <p id="p-bd00fe18d0a1">Number of Defect Holes in right hemispherical Surface Prior to fixin</p>
                </th>
              </tr>
            </thead>
            <tbody id="ts-9f8af3ee5091">
              <tr id="tr-32917ff0fe19">
                <td id="tc-a3c9afca1dcc" align="left">
                  <p id="p-49709a59564e">1</p>
                </td>
                <td id="tc-1251bcf4c460" align="center">
                  <p id="p-ea44e6882902">135_S_4598</p>
                </td>
                <td id="tc-b57570077692" align="center">
                  <p id="p-5858f5572d1c">1076438.0</p>
                </td>
                <td id="tc-74504bd54f1b" align="center">
                  <p id="p-b68159673234">285.0</p>
                </td>
                <td id="tc-8cdc33e4ac74" align="center">
                  <p id="p-c81dc10c25ab">84644.5</p>
                </td>
                <td id="tc-bfbce66ce27f" align="center">
                  <p id="p-2f33f61e53d1">996.0</p>
                </td>
                <td id="tc-9fa3344ea154" align="center">
                  <p id="p-d4e26f8c5d87">...</p>
                </td>
                <td id="tc-8950f306155f" align="center">
                  <p id="p-67b1c062925f">17.0</p>
                </td>
              </tr>
              <tr id="tr-2ead638e2969">
                <td id="tc-c4cd47072866" align="left">
                  <p id="p-6aa6e41d3c32">2</p>
                </td>
                <td id="tc-9aa708596d8a" align="center">
                  <p id="p-2370585cc949">099_S_4480</p>
                </td>
                <td id="tc-ef20e92ab70b" align="center">
                  <p id="p-d423ba0f24bf">945976.0</p>
                </td>
                <td id="tc-2b657a75c0aa" align="center">
                  <p id="p-f2df3f69e049">310.0</p>
                </td>
                <td id="tc-2dafdfba286c" align="center">
                  <p id="p-0269790a8536">76032.8</p>
                </td>
                <td id="tc-ef720ddb7096" align="center">
                  <p id="p-95f83cb6f036">744.0</p>
                </td>
                <td id="tc-9bf0cfb2a1e8" align="center">
                  <p id="p-f606ec3724bc">...</p>
                </td>
                <td id="tc-59943ab17cdc" align="center">
                  <p id="p-004c1eaa9975">33.0</p>
                </td>
              </tr>
              <tr id="tr-d816ac0b03ec">
                <td id="tc-0dc52a608c7b" align="left">
                  <p id="p-6b0b8074baa0">3</p>
                </td>
                <td id="tc-4d377028ed2b" align="center">
                  <p id="p-c4c70a6e3d14">099_S_2146</p>
                </td>
                <td id="tc-d8ebc1f03c07" align="center">
                  <p id="p-73edb1c1761d">1138086.0</p>
                </td>
                <td id="tc-9e5995cb5a71" align="center">
                  <p id="p-c40e6285ae38">453.0</p>
                </td>
                <td id="tc-ac77e1feb8b3" align="center">
                  <p id="p-9ddca77f4926">88770.5</p>
                </td>
                <td id="tc-c1229852162e" align="center">
                  <p id="p-3a2b9f8e9214">1118.0</p>
                </td>
                <td id="tc-9e1832a9abaf" align="center">
                  <p id="p-cec2a90ebd96">...</p>
                </td>
                <td id="tc-bf9c8eeddc02" align="center">
                  <p id="p-25ff563f90e1">46.0</p>
                </td>
              </tr>
              <tr id="tr-3647eeffd16e">
                <td id="tc-1a3b7e8f34d2" align="left">
                  <p id="p-115f3be3ac55">...</p>
                </td>
                <td id="tc-6b43cfecd48b" align="center">
                  <p id="p-7912169fc12e">...</p>
                </td>
                <td id="tc-b7fb49519263" align="center">
                  <p id="p-92f8d9c781fa">...</p>
                </td>
                <td id="table-cell-36" align="center">
                  <p id="p-d541608dbd23">...</p>
                </td>
                <td id="table-cell-37" align="center">
                  <p id="p-74215b156f1b">...</p>
                </td>
                <td id="table-cell-38" align="center">
                  <p id="p-37da86024fcf">...</p>
                </td>
                <td id="table-cell-39" align="center">
                  <p id="paragraph-39">...</p>
                </td>
                <td id="table-cell-40" align="center">
                  <p id="paragraph-40">...</p>
                </td>
              </tr>
              <tr id="tr-f1c7c5766d75">
                <td id="table-cell-41" align="left">
                  <p id="paragraph-41">662</p>
                </td>
                <td id="table-cell-42" align="center">
                  <p id="paragraph-42">082_S_1079</p>
                </td>
                <td id="table-cell-43" align="center">
                  <p id="paragraph-43">1131880.0</p>
                </td>
                <td id="table-cell-44" align="center">
                  <p id="paragraph-44">446.0</p>
                </td>
                <td id="table-cell-45" align="center">
                  <p id="paragraph-45">94008.6</p>
                </td>
                <td id="table-cell-46" align="center">
                  <p id="paragraph-46">1244.0</p>
                </td>
                <td id="table-cell-47" align="center">
                  <p id="paragraph-47">...</p>
                </td>
                <td id="table-cell-48" align="center">
                  <p id="paragraph-48">73.0</p>
                </td>
              </tr>
              <tr id="tr-479205415aef">
                <td id="table-cell-49" align="left">
                  <p id="paragraph-49">663</p>
                </td>
                <td id="table-cell-50" align="center">
                  <p id="paragraph-50">130_S_5059</p>
                </td>
                <td id="table-cell-51" align="center">
                  <p id="paragraph-51">1160101.0</p>
                </td>
                <td id="table-cell-52" align="center">
                  <p id="paragraph-52">601.0</p>
                </td>
                <td id="table-cell-53" align="center">
                  <p id="paragraph-53">85947.9</p>
                </td>
                <td id="table-cell-54" align="center">
                  <p id="paragraph-54">862.0</p>
                </td>
                <td id="table-cell-55" align="center">
                  <p id="paragraph-55">...</p>
                </td>
                <td id="table-cell-56" align="center">
                  <p id="paragraph-56">49.0</p>
                </td>
              </tr>
              <tr id="table-row-8">
                <td id="table-cell-57" colspan="8" align="left">
                  <p id="paragraph-57">* Where area in mm<sup id="s-39806a6b4abf">2</sup>, volume in mm<sup id="s-8f78ac170809">3</sup> </p>
                </td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p id="p-9e2ce785ecad"/>
        <p id="p-faaf625ca34b"/>
        <fig id="f-81997c1ebe71" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 3 </label>
          <caption id="c-a877109c322b">
            <title id="t-ecfe9e3c1ae6"><bold id="s-48fd06b5d6fe">Density plots showing the distribution among three classes (AD, EMCI, CN) of two cognitive scores and several MRI features</bold>. (<bold id="s-4df99b739164">A</bold>) Global CDR Scores, (<bold id="s-b030d8fa35e0">B</bold>) MMSE Scores, (<bold id="s-082ae0b694e1">C</bold>) Left hemisphere bankssts thickness, (<bold id="s-b5182862bf5c">D</bold>) Right hemisphere fusiform volume, (<bold id="s-ec054112e950">E</bold>) eTIV, (<bold id="s-3f58649a3cd1">F</bold>) Left Hippocampus volume. Blue: AD, orange: EMCI, green: CN.</title>
            <p id="p-62df791fa838"><bold id="s-34b3638a0cd0">Abbreviations</bold>: <bold id="s-2159ccc0e618">CN</bold>: Normal Cognitive; <bold id="s-5bd0b4e26a4b">EMCI</bold>: Early Mild Cognitive Impairment; <bold id="s-06cd76643433">AD</bold>: Alzheimer’s disease; <bold id="s-ee8b165c01fa">PCA</bold>: Principle Component Analysis</p>
          </caption>
          <graphic id="g-be465ada2c47" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/99dbb7d2-189d-434c-9c68-d6f1ce1d3f82-uimage.png"/>
        </fig>
        <p id="p-38a62b164da3"/>
        <p id="p-f346da314dff"/>
        <fig id="f-c4908520505b" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 4 </label>
          <caption id="c-5a6e91a70c51">
            <title id="t-8e196e4cff9c">
              <bold id="s-6dbdc813aba0">Venn diagram showing the total number from overlapping features between two different selection methods.</bold>
            </title>
          </caption>
          <graphic id="g-d28f4195dd28" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/655bd312-c827-44c0-8d66-2012175162c7-uimage.png"/>
        </fig>
        <p id="p-a43e20a6dff1"/>
        <p id="p-fd421484c15b"/>
        <table-wrap id="tw-432e2555a671" orientation="portrait">
          <label>Table 3</label>
          <caption id="c-26e70d36ebaf">
            <title id="t-531536a03234">
              <bold id="s-46124b4d6b76">The results of feature selection by Approach 3, Approach 4, and Approach 5</bold>
            </title>
          </caption>
          <table id="t-6af6db1c16c9" rules="rows">
            <colgroup>
              <col width="25"/>
              <col width="27.490000000000002"/>
              <col width="26.14"/>
              <col width="21.369999999999997"/>
            </colgroup>
            <thead id="table-section-header-271796f7d549">
              <tr id="tr-53ae8e26366d">
                <th id="tc-54b8de96978a" align="left">
                  <p id="p-e432b312bbad">Method </p>
                </th>
                <th id="tc-d5c06173035d" align="center">
                  <p id="p-1c04b43b0954">Backward Elimination (Approach 3) </p>
                </th>
                <th id="tc-66b3d4a33930" align="center">
                  <p id="p-a7c354326876">XGBoost Importance (Approach 4) </p>
                </th>
                <th id="tc-183c799f9588" align="center">
                  <p id="p-20eca5e6422e">PCA (Approach 5)</p>
                </th>
              </tr>
            </thead>
            <tbody id="ts-6cfe1f9a18fb">
              <tr id="tr-655a21568519">
                <td id="tc-abec013eacdb" align="left">
                  <p id="p-36413b2232ae">Number of features after selection </p>
                </td>
                <td id="tc-243ffcc98ea9" align="center">
                  <p id="p-94612d573d2d">29 </p>
                </td>
                <td id="tc-e0c285fc6f08" align="center">
                  <p id="p-d5eae0b3c213">228 </p>
                </td>
                <td id="tc-ea6a95e3f97b" align="center">
                  <p id="p-5d5b681e3eb2">71 </p>
                </td>
              </tr>
              <tr id="tr-928b38a326eb">
                <td id="tc-92765f9331ac" align="left">
                  <p id="p-9880dc8fe919">Type of features </p>
                </td>
                <td id="tc-3e9c2c04d891" align="center">
                  <p id="p-d3e4c6e35fb2">Brain features and cognitive scores </p>
                </td>
                <td id="tc-dda6c646d27f" align="center">
                  <p id="p-575c25810f68">Brain features and cognitive scores </p>
                </td>
                <td id="tc-b82f31c1ae0c" align="center">
                  <p id="p-6ef595ec2282">PCA features </p>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn-group>
              <fn id="f-003f6719dcb2">
                <p id="p-786d8d97bdd4"><bold id="s-1e15663f351d">Abbreviations</bold>: <bold id="s-30e4cf8c757e">PCA</bold>: Principle Component Analysis; <bold id="s-fe3a28388c7f">XG-Boost</bold>: Extreme Gradient Boosting. </p>
              </fn>
            </fn-group>
          </table-wrap-foot>
        </table-wrap>
        <p id="p-e40cdb45aa2f"/>
        <fig id="f-6f84e200eaaa" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 5 </label>
          <caption id="c-e681fb919428">
            <title id="t-f71f60882d42"><bold id="s-15db09631460">Feature weights after backward elimination and trained by XGBoost</bold>.</title>
            <p id="p-24c16da50c86"><bold id="s-cde70fd299c2">Abbreviations</bold>: <bold id="s-e9bfe2df6ece">XG-Boost</bold>: Extreme Gradient Boosting. </p>
          </caption>
          <graphic id="g-0b772219a073" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/ab604e34-c33a-48d6-abd6-62f7252ce6e9-uimage.png"/>
        </fig>
        <p id="p-fc4bc52be844"/>
        <p id="p-3911f184f3fe"/>
        <fig id="f-594fe7c11047" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 6 </label>
          <caption id="c-6e7e46cb01d6">
            <title id="t-e295b0b664b1"><bold id="s-aeba4cc99b4b">Accuracy of six approaches with 10-fold cross-validation</bold>. Approach 1: Brain structure features, Approach 2: Brain structural features and two cognitive scores, Approach 3: XG-Boost Importance and two cognitive scores, Approach 4: Backward Elimination and two cognitive scores, Approach 5: PCA features, Approach 6: Backward Elimination and two cognitive scores with tuning parameters.</title>
            <p id="p-f53605d17a6f"><bold id="s-f96f12140655">Abbreviations</bold>: <bold id="s-4bf5a6ad09cf">PCA</bold>: Principle Component Analysis; <bold id="s-7bbbd1804b74">XG-Boost</bold>: Extreme Gradient Boosting. </p>
          </caption>
          <graphic id="g-da61bd77daea" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/559efc87-a6b7-4aac-b750-8dd4bd18199e-uimage.png"/>
        </fig>
        <p id="p-65eeaff3bdd9"/>
        <p id="p-150691775330"/>
        <table-wrap id="tw-c2334e1d3e26" orientation="portrait">
          <label>Table 4</label>
          <caption id="c-f79943581b3e">
            <title id="t-464cb0f72a86">
              <bold id="s-327603577736">The performance results of six approaches for three-class classification</bold>
            </title>
          </caption>
          <table id="t-d6bb6ac842c0" rules="rows">
            <colgroup>
              <col width="11.79"/>
              <col width="18.04"/>
              <col width="14.589999999999996"/>
              <col width="19.580000000000002"/>
              <col width="16"/>
              <col width="20"/>
            </colgroup>
            <thead id="table-section-header-aacbffc4e98e">
              <tr id="tr-da16b246f657">
                <th id="tc-eba463654caf" align="left">
                  <p id="p-33c5a1d496f2">Approach</p>
                </th>
                <th id="tc-ce457ed33b39" align="center">
                  <p id="p-a1fa5af59d4d">Class </p>
                </th>
                <th id="tc-85af64dde3b4" align="center">
                  <p id="p-c628c1f915ab">Accuracy </p>
                </th>
                <th id="tc-8bdd76363459" align="center">
                  <p id="p-980b99d55ac7">Precision</p>
                </th>
                <th id="tc-e7a406cd8213" align="center">
                  <p id="p-a0b67f71d2b6">Recall</p>
                </th>
                <th id="tc-87edb0bf9f6c" align="center">
                  <p id="p-f30be0c93be3">F1 score</p>
                </th>
              </tr>
            </thead>
            <tbody id="ts-1761b3632dcc">
              <tr id="tr-d020f11f47ba">
                <td id="tc-aee786c4ac0f" rowspan="3" align="left">
                  <p id="p-e02889c5043b">1</p>
                </td>
                <td id="tc-7128fbdbe6a0" align="center">
                  <p id="p-e2e80212e13f">CN</p>
                </td>
                <td id="tc-2084ec0f5f83" rowspan="3" align="center">
                  <p id="p-8c9c511f36e2">68.8 %</p>
                </td>
                <td id="tc-6290bff87884" align="center">
                  <p id="p-54154d82bd7b">64 %</p>
                </td>
                <td id="tc-e1a7a803168a" align="center">
                  <p id="p-c77cec18c554">56 %</p>
                </td>
                <td id="tc-41e6134cffce" align="center">
                  <p id="p-c081a0392b7b">60 %</p>
                </td>
              </tr>
              <tr id="tr-2db001070dfe">
                <td id="tc-2fde9ea8de51" align="center">
                  <p id="p-1bab5150c3c5">EMCI</p>
                </td>
                <td id="tc-82fc6760a47d" align="center">
                  <p id="p-20a52040b440">64 %</p>
                </td>
                <td id="tc-2837b4113db7" align="center">
                  <p id="p-724167b1cdf7">75 %</p>
                </td>
                <td id="tc-0fce254ce1f5" align="center">
                  <p id="p-1af4b9f2e564">69 %</p>
                </td>
              </tr>
              <tr id="tr-384f90d270f5">
                <td id="tc-a456b0412c03" align="center">
                  <p id="p-6f9048e90347">AD</p>
                </td>
                <td id="tc-d3b9a9d28e4a" align="center">
                  <p id="p-3366573a5716">79 %</p>
                </td>
                <td id="tc-05c445e5a07a" align="center">
                  <p id="p-7b344305cfb7">74 %</p>
                </td>
                <td id="tc-840e2f730f87" align="center">
                  <p id="p-2de418b631b3">77 %</p>
                </td>
              </tr>
              <tr id="tr-19555dbfeb3c">
                <td id="tc-a43ca5500cef" rowspan="3" align="left">
                  <p id="p-058f1a89127d">2</p>
                </td>
                <td id="tc-2b04af564ec9" align="center">
                  <p id="p-3a16a7e92b1b">CN</p>
                </td>
                <td id="tc-2b20e7bdcd19" rowspan="3" align="center">
                  <p id="p-eb409498c40d">86 % </p>
                </td>
                <td id="tc-99df7d27f353" align="center">
                  <p id="p-3016bd62c7c1">80 % </p>
                </td>
                <td id="tc-1f7f436c7e03" align="center">
                  <p id="p-8acbc6e1e5e6">97 % </p>
                </td>
                <td id="tc-0f8af3e11336" align="center">
                  <p id="p-e7bac276104f">88 % </p>
                </td>
              </tr>
              <tr id="tr-d73fd8c0e7d4">
                <td id="tc-ba3080995fbe" align="center">
                  <p id="p-1a3d24c7aaa2">EMCI</p>
                </td>
                <td id="tc-a8d8be76214f" align="center">
                  <p id="p-7a3f41485617">97 % </p>
                </td>
                <td id="tc-afaa9118e938" align="center">
                  <p id="p-09ead93cced9">71 % </p>
                </td>
                <td id="tc-965c608e7c96" align="center">
                  <p id="p-c5e834c3d048">82 % </p>
                </td>
              </tr>
              <tr id="tr-6d9cdfad4b29">
                <td id="tc-99999119050d" align="center">
                  <p id="p-afada2dcd625">AD</p>
                </td>
                <td id="tc-69261bc78c1c" align="center">
                  <p id="p-66440e04f86d">83 % </p>
                </td>
                <td id="tc-d31958ccc02e" align="center">
                  <p id="p-9688d671d47d">98 % </p>
                </td>
                <td id="tc-4ccd8ef19dbe" align="center">
                  <p id="p-0bfc8881246c">90 % </p>
                </td>
              </tr>
              <tr id="tr-8417efc07594">
                <td id="tc-53803534f387" rowspan="3" align="left">
                  <p id="p-d79fa6dfb636">3 </p>
                </td>
                <td id="tc-eac89967da40" align="center">
                  <p id="p-7486a07dedf6">CN</p>
                </td>
                <td id="tc-dc19c5b9d593" rowspan="3" align="center">
                  <p id="p-ce9a56ac8481">91.05 %</p>
                </td>
                <td id="tc-58c78b257dd7" align="center">
                  <p id="p-967524c611ae">89 %</p>
                </td>
                <td id="tc-1953235f4a03" align="center">
                  <p id="p-03fe04333fc9">98 %</p>
                </td>
                <td id="tc-547f63c19650" align="center">
                  <p id="p-bda7a68277f7">93 %</p>
                </td>
              </tr>
              <tr id="table-row-9">
                <td id="tc-52dbd5c8419a" align="center">
                  <p id="p-16db87c68217">EMCI</p>
                </td>
                <td id="tc-a05cb3d81129" align="center">
                  <p id="p-315718741ba4">95 %</p>
                </td>
                <td id="tc-b6113ff47c06" align="center">
                  <p id="p-b79105b08472">83 %</p>
                </td>
                <td id="tc-4dd51b026cdf" align="center">
                  <p id="p-60eb887a9b38">89 %</p>
                </td>
              </tr>
              <tr id="table-row-10">
                <td id="tc-660a55e57feb" align="center">
                  <p id="p-2b07d9ee090a">AD</p>
                </td>
                <td id="tc-287e93859875" align="center">
                  <p id="p-7fa9469b8b51">91 %</p>
                </td>
                <td id="tc-3c19352dfb93" align="center">
                  <p id="p-4313f027bf96">95 %</p>
                </td>
                <td id="tc-8d18139d67e7" align="center">
                  <p id="p-e3a9f8eeac4f">93 %</p>
                </td>
              </tr>
              <tr id="table-row-11">
                <td id="tc-feaa1e686a67" rowspan="3" align="left">
                  <p id="p-51d039455cbd">4 </p>
                </td>
                <td id="tc-9fbbbcef6f9d" align="center">
                  <p id="p-f9f08e19f39f">CN</p>
                </td>
                <td id="tc-5736fc5a2171" rowspan="3" align="center">
                  <p id="p-ba40ca7c2828">90.9 %</p>
                </td>
                <td id="tc-f6722976873c" align="center">
                  <p id="p-ea9070822966">91 %</p>
                </td>
                <td id="tc-91020f887e59" align="center">
                  <p id="p-6c0d88124f60">98 %</p>
                </td>
                <td id="tc-09ff12c452fd" align="center">
                  <p id="p-6a2d5b9971f2">95 %</p>
                </td>
              </tr>
              <tr id="table-row-12">
                <td id="tc-0382b1523933" align="center">
                  <p id="p-a26e7a889fa8">EMCI</p>
                </td>
                <td id="tc-b262ee7cc47b" align="center">
                  <p id="p-5486d16147d6">92 %</p>
                </td>
                <td id="tc-2883afca7a4b" align="center">
                  <p id="p-a9f238e146e7">79 %</p>
                </td>
                <td id="table-cell-58" align="center">
                  <p id="paragraph-58">85 %</p>
                </td>
              </tr>
              <tr id="table-row-13">
                <td id="table-cell-59" align="center">
                  <p id="paragraph-59">AD</p>
                </td>
                <td id="table-cell-60" align="center">
                  <p id="paragraph-60">90 %</p>
                </td>
                <td id="table-cell-61" align="center">
                  <p id="paragraph-61">96 %</p>
                </td>
                <td id="table-cell-62" align="center">
                  <p id="paragraph-62">93 %</p>
                </td>
              </tr>
              <tr id="table-row-14">
                <td id="table-cell-63" rowspan="3" align="left">
                  <p id="paragraph-63">5 </p>
                </td>
                <td id="table-cell-64" align="center">
                  <p id="paragraph-64">CN</p>
                </td>
                <td id="table-cell-65" rowspan="3" align="center">
                  <p id="paragraph-65">74 %</p>
                </td>
                <td id="table-cell-66" align="center">
                  <p id="paragraph-66">68 %</p>
                </td>
                <td id="table-cell-67" align="center">
                  <p id="paragraph-67">59 %</p>
                </td>
                <td id="table-cell-68" align="center">
                  <p id="paragraph-68">63 %</p>
                </td>
              </tr>
              <tr id="table-row-15">
                <td id="table-cell-69" align="center">
                  <p id="paragraph-69">EMCI</p>
                </td>
                <td id="table-cell-70" align="center">
                  <p id="paragraph-70">75 %</p>
                </td>
                <td id="table-cell-71" align="center">
                  <p id="paragraph-71">77 %</p>
                </td>
                <td id="table-cell-72" align="center">
                  <p id="paragraph-72">76 %</p>
                </td>
              </tr>
              <tr id="table-row-16">
                <td id="table-cell-73" align="center">
                  <p id="paragraph-73">AD</p>
                </td>
                <td id="table-cell-74" align="center">
                  <p id="paragraph-74">78 %</p>
                </td>
                <td id="table-cell-75" align="center">
                  <p id="paragraph-75">86 %</p>
                </td>
                <td id="table-cell-76" align="center">
                  <p id="paragraph-76">82 %</p>
                </td>
              </tr>
              <tr id="table-row-17">
                <td id="table-cell-77" rowspan="3" align="left">
                  <p id="paragraph-77">6 </p>
                </td>
                <td id="table-cell-78" align="center">
                  <p id="paragraph-78">CN</p>
                </td>
                <td id="table-cell-79" rowspan="3" align="center">
                  <p id="paragraph-79">92 % </p>
                </td>
                <td id="table-cell-80" align="center">
                  <p id="paragraph-80">88 % </p>
                </td>
                <td id="table-cell-81" align="center">
                  <p id="paragraph-81">97 % </p>
                </td>
                <td id="table-cell-82" align="center">
                  <p id="paragraph-82">93 % </p>
                </td>
              </tr>
              <tr id="table-row-18">
                <td id="table-cell-83" align="center">
                  <p id="paragraph-83">EMCI</p>
                </td>
                <td id="table-cell-84" align="center">
                  <p id="paragraph-84">91 % </p>
                </td>
                <td id="table-cell-85" align="center">
                  <p id="paragraph-85">85 % </p>
                </td>
                <td id="table-cell-86" align="center">
                  <p id="paragraph-86">88 % </p>
                </td>
              </tr>
              <tr id="table-row-19">
                <td id="table-cell-87" align="center">
                  <p id="paragraph-87">AD</p>
                </td>
                <td id="table-cell-88" align="center">
                  <p id="paragraph-88">96 % </p>
                </td>
                <td id="table-cell-89" align="center">
                  <p id="paragraph-89">94 % </p>
                </td>
                <td id="table-cell-90" align="center">
                  <p id="paragraph-90">95 % </p>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn-group>
              <fn id="f-dd12d3a0f8c6">
                <p id="p-2df5e3cd5698">The data included sMRI from 663 subjects, which were divided into 80 % for training and 20 % for testing.</p>
              </fn>
            </fn-group>
          </table-wrap-foot>
        </table-wrap>
        <p id="p-9bd3b44ff6cb"/>
        <p id="p-5bdfcf2927e2"/>
        <fig id="f-4128e68b8a91" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 7 </label>
          <caption id="c-c0e97a179855">
            <title id="t-5ff1c0d11d3d"><bold id="s-ab26bf28154f">Receiver Operating Characteristic (ROC) curves of Approach 1 and Approach 6 for three classes classification</bold>. The green line corresponds to AD, the blue line represents for EMCI, and the red line shows CN.</title>
            <p id="p-8e2e417b7baa"><bold id="s-e5277fc0ac0b">Abbreviations</bold>: <bold id="s-dccf37b64288">CN</bold>: Normal Cognitive; <bold id="s-a30ce6d7055d">EMCI</bold>: Early Mild Cognitive Impairment; <bold id="s-65acf1572daa">AD</bold>: Alzheimer’s disease; <bold id="s-7ecec6b6e428">PCA</bold>: Principle Component Analysis</p>
          </caption>
          <graphic id="g-fabbe5029691" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/ece9e9e3-05a2-43c7-b3c0-027b15f0a5ff-uimage.png"/>
        </fig>
        <p id="p-fe3747bc07d0"/>
        <p id="p-5237cb905667"/>
        <fig id="f-5387e71a6702" orientation="portrait" fig-type="graphic" position="anchor">
          <label>Figure 8 </label>
          <caption id="c-7f9868eea61a">
            <title id="t-1dc9848fdc1c"><bold id="s-6cc1eda396b4">Visualization results for the ground truths andthe corresponding predictions in three classes (CN, EMCI, AD)</bold>. The first and second columns illustrate the correctly-predicted examples, while the last column shows the wrongly-predicted ones. <bold id="s-df6775ed34d7">Abbreviations</bold>: <bold id="s-f82b2b5073be">CN</bold>: Normal Cognitive; <bold id="s-262d916d97f5">EMCI</bold>: Early Mild Cognitive Impairment; <bold id="s-68926994d390">AD</bold>: Alzheimer’s disease; <bold id="s-8ef1f1a56171">PCA</bold>: Principle Component Analysis</title>
          </caption>
          <graphic id="g-0b9f0e2775ab" xlink:href="https://typeset-prod-media-server.s3.amazonaws.com/article_uploads/b355182f-1342-40cb-aae1-acd9b044ab34/image/a9183f7f-dafc-416f-82a5-859394f60673-uimage.png"/>
        </fig>
        <p id="p-29ef0b9e7ed2"/>
      </sec>
      <sec>
        <title id="t-c867e9553edc">
          <bold id="s-90aeaf53fd1b">Structural MRI data</bold>
        </title>
        <p id="p-72882b69f713">The structural MRI scans used in this study were the T1-weighted magnetization prepared-rapid gradient echo scans from ADNI 1 and ADNI GO/2. Various MRI scanner models were used for MRI acquisition; details of the acquisition protocol for the MRI data can be found on the ADNI website (http://adni.loni.usc.edu)<bold id="s-6cea921e7f7f"><xref id="x-de7876dff1a0" rid="R208498129137433" ref-type="bibr">22</xref></bold>.</p>
      </sec>
      <sec>
        <title id="t-d59e9118c1a1">
          <bold id="s-1590dd33cc89">Study design</bold>
        </title>
        <p id="p-4fbb54ae00ef">An overview of the study design is shown in <bold id="s-802517017b52"><xref id="x-2af97d5b1a62" rid="f-b5dbb6622b89" ref-type="fig">Figure 1</xref></bold>. Firstly, the MRI images were preprocessed with FreeSurfer to extract 358 features, including volumetric and thickness measurements. Three feature selection methods were used, and their efficiencies were compared. This step determined the optimal features from the 360 elements (FreeSurfer features, MMSE score, and CDR score). The data were divided into two sets with a ratio of 80% training to 20% testing using Python’s Scikit-learn library. Finally, the proposed models were evaluated using the performance metrics of accuracy, precision, recall, F1-score, and ROC curves with AUCs to identify the most efficient classification algorithm.</p>
      </sec>
      <sec>
        <title id="t-d5559f039ad6">
          <bold id="strong-6">Feature extraction</bold>
        </title>
        <p id="p-1ba3acb15da0">Six hundred sixty-three MRI images were reconstructed and segmented using FreeSurfer (version 5.3; http://surfer.nmr.mgh.harvard.edu). This open-source software measures and visualizes the human brain’s functional, connective, and structural characteristics to extract brain structural features<bold id="s-0d01b3cdc9d5"><xref id="x-039a0a4e095a" rid="R208498129137434" ref-type="bibr">23</xref></bold>. This software’s processing operations have two major stages (<bold id="s-cd341c9b1ded"><xref id="x-3aa4f4a2b641" rid="f-508527275f6f" ref-type="fig">Figure 2</xref></bold>).</p>
      </sec>
      <sec>
        <title id="t-d0a4c49c52fc">
          <bold id="strong-8">Feature selection</bold>
        </title>
        <p id="p-72ab8044282c">Feature selection plays a significant role in ML and pattern recognition. Pearson’s product-moment correlation coefficient (<italic id="emphasis-1">r</italic>) was first applied to remove all linearly related features with a <italic id="emphasis-2">r</italic> &gt; 0.9. The reason for using this method is that several features extracted by Freesurfer are sub-regions or different measurements of the same brain region. Therefore, including highly relevant features in a particular brain-diagnosed area is redundant from a neuroscience perspective. Moreover, highly correlated features may lead to overfitting, impacting model performance. Therefore, applying non-linear feature selection can improve model performance and reduce training time efficiently. The next step was performed with three feature selection methods to compare their efficiency.</p>
        <sec>
          <title id="t-146c8b013841">
            <italic id="emphasis-3">
              <bold id="strong-9">PCA</bold>
            </italic>
          </title>
          <p id="paragraph-13">PCA is a multivariate exploratory analysis approach that reduces the complexity of multidimensional data while preserving trends and key patterns<bold id="s-285d9f2bf382"><xref rid="R208498129137435" ref-type="bibr">24</xref>, <xref rid="R208498129137436" ref-type="bibr">25</xref></bold>. PCA was applied using Python’s Scikit-learn library with different numbers of principal components (PCs; 1–321) to determine the optimal set of features for the classification model. Then, in each model, the PCs were incrementally included in 10 PC increments to observe changes in accuracy with Python’s Matplotlib library.</p>
        </sec>
        <sec>
          <title id="t-c5acc6d0037e">
            <italic id="emphasis-4">
              <bold id="strong-10">BE</bold>
            </italic>
          </title>
          <p id="paragraph-15">BE is a feature selection strategy that excludes characteristics strongly associated with the exposure without significantly influencing dependent variables or predicted outputs<bold id="s-b2d045c0f607"><xref rid="R208498129137437" ref-type="bibr">26</xref>, <xref rid="R208498129137438" ref-type="bibr">27</xref></bold>. BE was applied in five main steps: (i) select a significance level (SL) that is suitable for the model (SL = 0.05), (ii) calculate original least squares with Python’s Statsmodels library before determining the <italic id="e-34390b935f18">p</italic>-values of all features, (iii) compare the calculated <italic id="e-709cd8baae0d">p</italic>-value with the SL, (iv) remove features and predictors with a <italic id="e-d0dd37d639ce">p</italic>-value greater than the SL, and (v) modify it to fit the model with the remaining variables.</p>
        </sec>
        <sec>
          <title id="t-480308e87d5b">
            <italic id="emphasis-8">
              <bold id="strong-11">XGBI</bold>
            </italic>
          </title>
          <p id="paragraph-17">XG-Boost has the advantage of extracting importance scores for each feature in the predictive problem, enabling the determination of the highest importance score. The next step removes all unusable features with zero importance coefficients depending on their ranking. This action is repeatedly performed until stable accuracy and non-zero importance coefficients are achieved.</p>
        </sec>
        <sec>
          <title id="t-11ff3552378c">
            <italic id="emphasis-9">
              <bold id="strong-12">Six features selection approaches</bold>
            </italic>
          </title>
          <p id="paragraph-19">This study investigated six approaches for feature selection. Feature selection was not applied in the first and second approaches. The first approach used all 358 features extracted by Freesurfer to train the model. The second approach added the two cognitive scores to the 358 Freesurfer features. The third approach used XGBI to filter the Freesurfer features and included the two cognitive scores when training the model. The fourth and sixth approaches used BE for feature selection and included the two cognitive scores; however, the sixth approach also applied parameter tuning. Finally, the fifth approach used PCA for feature selection.</p>
        </sec>
      </sec>
      <sec>
        <title id="t-2b85204e1a86"><bold id="strong-13">Classification</bold> </title>
        <p id="paragraph-21">XG-Boost is a scalable and efficient gradient-boosting framework used to combine a series of weak base learners (small decision trees) into a single powerful learner (a big tree)<bold id="s-c3e119ba9142"><xref rid="R208498129137439" ref-type="bibr">28</xref>, <xref rid="R208498129137440" ref-type="bibr">29</xref></bold>. The enhanced performance of XG-Boost has been shown in several major areas. Firstly, XG-Boost introduces a regularization component into the objective function, making the model less prone to overfitting. Secondly, it conducts a second-order rather than first-order Taylor expansion on the objective function, enabling it to specify the loss function more accurately. Thirdly, XG-Boost has a fast training speed due to data compression, multithreading, and GPU acceleration<bold id="s-3b6270089b8b"><xref rid="R208498129137441" ref-type="bibr">30</xref>, <xref rid="R208498129137442" ref-type="bibr">31</xref></bold>.</p>
        <p id="paragraph-22">The objective function is defined as:</p>
        <disp-formula-group id="dfg-c3090eefa011"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>O</mml:mi><mml:mi>b</mml:mi><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mi>L</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>t</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula></disp-formula-group>
        <p id="p-973412a87cab">where <inline-formula id="if-159d0691830a"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msubsup></mml:math></inline-formula> represents the prediction for the <italic id="emphasis-10">t</italic><sup id="superscript-1">th</sup> round, <italic id="e-c6cfbdfcae55">f<sub id="s-d1e4d3b85e89">t</sub></italic> represents the structure of a decision tree, and <inline-formula id="if-be1ea90eeaf7"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></inline-formula> represents the regularization component. <inline-formula id="if-ab88a4432162"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></inline-formula> is given by:</p>
        <p id="paragraph-25"> </p>
        <p id="p-062b59015398"> </p>
        <disp-formula-group id="dfg-5d00243fd96b"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo><mml:mo>=</mml:mo><mml:mi>γ</mml:mi><mml:mi>T</mml:mi><mml:mo>+</mml:mo><mml:mstyle displaystyle="false"><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mstyle><mml:mi>λ</mml:mi><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>T</mml:mi></mml:munderover><mml:msubsup><mml:mi>ω</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:math></disp-formula></disp-formula-group>
        <p id="p-2e3c4c97ef2a"/>
        <p id="p-07dfa3f26bda"/>
        <p id="paragraph-26">where <inline-formula id="if-427932f5bff7"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>λ</mml:mi></mml:math></inline-formula> represents the penalty coefficient and <inline-formula id="if-6ea9e9ab1f7d"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mstyle displaystyle="false"><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac><mml:mi>λ</mml:mi><mml:msubsup><mml:mo>∑</mml:mo><mml:mi>j</mml:mi><mml:mi>T</mml:mi></mml:msubsup><mml:mo>=</mml:mo><mml:mn>1</mml:mn><mml:msubsup><mml:mi>ω</mml:mi><mml:mi>j</mml:mi><mml:mn>2</mml:mn></mml:msubsup></mml:mstyle></mml:math></inline-formula> represents the L2 norm of leaf scores. After <italic id="emphasis-11">t</italic> iterations, the model’s function is added to a new decision tree:</p>
        <p id="paragraph-27"> </p>
        <disp-formula-group id="dfg-f995caad92f2"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula></disp-formula-group>
        <p id="p-094ceb61073e"/>
        <p id="paragraph-28">and the objective function is updated:</p>
        <p id="paragraph-29"> </p>
        <disp-formula-group id="dfg-7611e645b3e5"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>O</mml:mi><mml:mi>b</mml:mi><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msup><mml:mo>=</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mi>L</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>+</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo><mml:mo>)</mml:mo><mml:mo>+</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula></disp-formula-group>
        <p id="p-656e02d9f007"/>
        <p id="paragraph-30">with the Taylor expansion specification:</p>
        <p id="paragraph-31"> </p>
        <disp-formula-group id="dfg-55b0c87619e1"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>O</mml:mi><mml:mi>b</mml:mi><mml:msup><mml:mi>j</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:msup><mml:mo>≃</mml:mo><mml:munderover><mml:mo>∑</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mi>n</mml:mi></mml:munderover><mml:mfenced close="]" open="["><mml:mrow><mml:mi>L</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>)</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo><mml:mo>+</mml:mo><mml:mstyle displaystyle="false"><mml:mfrac><mml:mn>1</mml:mn><mml:mn>2</mml:mn></mml:mfrac></mml:mstyle><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:msubsup><mml:mi>f</mml:mi><mml:mi>t</mml:mi><mml:mn>2</mml:mn></mml:msubsup><mml:mo>(</mml:mo><mml:msub><mml:mi>x</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:mrow></mml:mfenced><mml:mo>+</mml:mo><mml:mi>Ω</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>f</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>)</mml:mo></mml:math></disp-formula></disp-formula-group>
        <p id="p-ef54fbe70aa8"/>
        <p id="paragraph-32">where <inline-formula id="if-c715ef798612"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> represents the first derivative and <inline-formula id="if-34bf13296cd0"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> represents the second derivative of the loss function. <inline-formula id="if-1d8bf8563348"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> and <inline-formula id="if-9b54b818ed78"> <mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub></mml:math></inline-formula> are given by<bold id="s-57ee57af04f4"><xref id="x-5cfeebb3c6f9" rid="R208498129137442" ref-type="bibr">31</xref>:</bold></p>
        <p id="paragraph-33"> </p>
        <disp-formula-group id="dfg-c8bde91b6334"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>g</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:mi>ϑ</mml:mi><mml:mrow><mml:mi>ϑ</mml:mi><mml:msubsup><mml:mstyle displaystyle="true"><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover></mml:mstyle><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:mo>(</mml:mo><mml:mi>L</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup><mml:mo>)</mml:mo><mml:mo>)</mml:mo></mml:math></disp-formula></disp-formula-group>
        <p id="p-92e144dba32c"/>
        <disp-formula-group id="dfg-4d9d3b5ba1f3"> <disp-formula><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:msub><mml:mi>h</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mfrac><mml:msup><mml:mi>ϑ</mml:mi><mml:mn>2</mml:mn></mml:msup><mml:mrow><mml:mi>ϑ</mml:mi><mml:msubsup><mml:mstyle displaystyle="true"><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover></mml:mstyle><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup></mml:mrow></mml:mfrac><mml:mo>(</mml:mo><mml:mi>L</mml:mi><mml:mo>(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mi>i</mml:mi></mml:msub><mml:mo>,</mml:mo><mml:msubsup><mml:mover><mml:mi>y</mml:mi><mml:mo>∧</mml:mo></mml:mover><mml:mi>i</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mi>t</mml:mi><mml:mo>-</mml:mo><mml:mn>1</mml:mn><mml:mo>)</mml:mo></mml:mrow></mml:msubsup></mml:math></disp-formula></disp-formula-group>
        <p id="p-2ca939021e17"/>
        <p id="paragraph-34">This study applied the model from the open-source XG-Boost library. The algorithm also applies the softmax parameter and the cross-entropy function. After fitting the data, the Matplotlib library visualizes the fitting process and stops the process early to prevent overfitting.</p>
      </sec>
      <sec>
        <title id="t-c649b83c5601">
          <bold id="strong-14">Tenfold cross-validation<xref id="x-09f1094a9850" rid="R208498129137443" ref-type="bibr">32</xref></bold>
        </title>
        <p id="paragraph-36">Grid Search cross-validation (GridSearchCV) is an object provided by Python’s Scikit-learn library that generates a set of hyperparameters for tenfold cross-validation to achieve a maximally accurate model (estimator). GridSearch evaluates the grid of indicated parameters based on the estimator during the call to fit, including predicting, scoring, or transforming methods. Then, it returns the best-performing combination of hyperparameters with a maximum score (the scoring strategy of the basic estimator). Any other estimator can be applied to this object in this manner. Lastly, all modifiers and an estimator are assembled by a pipeline, resulting in a combined estimator that can implement several reductions afterward, such as tuning dimensions before fitting.</p>
      </sec>
    </sec>
    <sec>
      <title id="t-0a32b5fbbca2">Results</title>
      <sec>
        <title id="t-3b677a63c6ff">
          <bold id="s-45b32f25e776">Feature extraction</bold>
        </title>
        <p id="p-abb32d1e7ac1">After preprocessing and extraction, 358 features were exported. <bold id="s-32f49385f9a6"><xref id="x-9fa67409fb2f" rid="tw-72ff92f741fe" ref-type="table">Table 2</xref></bold>  shows a portion of the extraction results. From the extraction results, we assessed the discriminative power of several features and two additional cognitive scores (CDR and MMSE) using the point distributions between three classes: AD, CN, and EMCI (<bold id="s-5af86675fb2a"><xref id="x-5d31292cc2ce" rid="f-81997c1ebe71" ref-type="fig">Figure 3</xref></bold>). We selected the top four weighted features according to XGBI and BE: left hemisphere banks of superior temporal sulcus thickness, right hemisphere fusiform volume, left hemisphere estimated total intracranial volume (eTIV), and left hippocampus volume. The two scores of the dementia tests (CDR and MMSE) showed a distinctive distribution in the density plots between the three classes (<bold id="s-67287c492b3e"><xref id="x-f16c55f7e5dc" rid="f-81997c1ebe71" ref-type="fig">Figure 3</xref></bold> <bold id="s-ec0aaabef32a">A, B</bold>). In contrast, a significant overlap existed between classes in the eTIV distribution (<bold id="s-68e40256e63b"><xref id="x-94116c6b4de8" rid="f-81997c1ebe71" ref-type="fig">Figure 3</xref></bold> <bold id="s-0195dfcf4778">E</bold>). Nevertheless, the AD group separated relatively well from the CN and EMCI groups in the distributions of the other three Freesurfer features, especially the left hippocampus volume (<bold id="s-30f01ce29ecd"><xref id="x-9603df50c3a0" rid="f-81997c1ebe71" ref-type="fig">Figure 3</xref></bold> <bold id="s-95e55feefddf">F</bold>). Overall, the density plots in <bold id="s-b7bbcba24e32"><xref id="x-ff7c5e432648" rid="f-81997c1ebe71" ref-type="fig">Figure 3</xref> </bold>showed the great potential of CDR and MMSE to enhance model accuracy when combined with the extracted features. These plots also highlight the challenges in distinguishing the CN and EMCI groups.</p>
      </sec>
      <sec>
        <title id="t-6a4498bb3c3b">
          <bold id="s-20ffdd648ccd">Feature selection</bold>
        </title>
        <p id="p-9b00eb83abcf">Several primary factors, such as redundancy (feature-feature) and relevance (feature-class), must be considered during feature selection<bold id="s-ddc235a69991"><xref id="x-cdca2fd1e4dc" rid="R208498129137444" ref-type="bibr">33</xref></bold>. For redundancy minimization, this study used Pearson’s product-moment correlation coefficient to measure the association between features and remove all linearly related features<bold id="s-543c4920c524"><xref id="x-c3638cfba48a" rid="R208498129137445" ref-type="bibr">34</xref></bold>. This phase reduced the features from 360 to 324. Next, PCA, a popular feature selection method, was used to reduce dimensionality and identify highly effective and minimally redundant features. PCA created 33 feature sets; the first contained one feature, the second 11 features, and so on until the final set contained 321 features. Then, the performance of these feature sets was compared to investigate the efficiency of the PCA method.</p>
        <p id="p-5f046f03c81c">Besides PCA, <bold id="s-633c91196ebe"><xref id="x-bbee44a79d54" rid="tw-432e2555a671" ref-type="table">Table 3</xref></bold>  and <bold id="s-5cfd277bc9ca"><xref id="x-f4cd5fea735b" rid="f-c4908520505b" ref-type="fig">Figure 4</xref></bold>  summarize the results with the other two feature selection methods (XGBI and BE) to maximize relevance. The XG-Boost library identified several features with unimportant values during the training process. Consequently, Approach 4 selected 228 features with non-zero importance coefficients to ensure that every feature benefits the training model. In addition, BE was applied for its speed and simplicity in removing irrelevant features with <italic id="e-7f34126fbfcd">p</italic>-values &gt; 0.05. Interestingly, it only identified 29 features, of which 15 were shared with XGBI, including the two cognitive scores and 13 brain structure features (<bold id="s-031fb775a7fb"><xref id="x-e1c80fae4184" rid="f-c4908520505b" ref-type="fig">Figure 4</xref></bold>).</p>
        <p id="p-fd0393d4d560">After selection, XG-Boost continued to train on the features, resulting in the best performance with Approach 4 (see the <bold id="s-dd50ad11f21d">Classification results </bold>section). <bold id="s-c3a3d928dd03"><xref id="x-3600ae71b341" rid="f-6f84e200eaaa" ref-type="fig">Figure 5</xref></bold>  shows the weights of top-ranked features with Approach 4. The two cognitive scores were most influential in the prediction since their weights are approximately sixfold higher than those of the brain structure features (0.263 and 0.257, respectively). Moreover, the thickness of the left superior temporal sulcus was the most informative brain structure feature. The temporal lobe was also the most informative brain region because several features extracted from it had high weights, including the superior temporal sulcus, fusiform gyrus, transverse temporal gyrus, middle temporal gyrus, the temporal pole from the right hemisphere, and hippocampus from the left hemisphere. In conclusion, the temporal lobe shows the most significant changes in patients with AD.</p>
      </sec>
      <sec>
        <title id="t-9e5c530c7966">
          <bold id="s-25fa527bc1f3">Classification</bold>
        </title>
        <p id="p-0e2f11ba4b9a">The accuracies of all approaches and the details of each approach are summarized in <bold id="s-67ab2720aa53"><xref rid="f-b5dbb6622b89" ref-type="fig">Figure 1</xref>, <xref rid="f-594fe7c11047" ref-type="fig">Figure 6</xref></bold>. The accuracies of these three-class classification models were assessed by the proportion of correct expected observations to all actual class observations with tenfold cross-validation. Approach 1, using 358 brain features, had the lowest accuracy (69.00% ± 3.00%). The accuracy improved with Approach 2, which added the two cognitive scores to the feature set (86.00% ± 2.00%). The accuracy improved again with Approach 3, which used XGBI to select the features (91.05% ± 3.34%). However, the accuracy decreased with Approaches 4 (90.90% ± 3.35%) and 5 (74.00%). In Approach 5, the accuracies ranged from 63% to 74%, corresponding to 1 to 321 PCA features; the highest accuracy is shown in <bold id="s-2210e1f84b65"><xref id="x-3d83ace63748" rid="f-594fe7c11047" ref-type="fig">Figure 6</xref></bold>. Approach 6, using BE for feature selection and tuning model parameters with grid search, achieved 92.00% accuracy.</p>
        <p id="p-a8887651bea1">The performance of the six approaches is summarized in <bold id="s-05bdc91a9989"><xref id="x-a9cf394ccf33" rid="tw-c2334e1d3e26" ref-type="table">Table 4</xref></bold>. In Approach 1, the AD class had the highest precision (79%), recall (74%), and F1 score (77%), while the CN class had the lowest precision, recall, and F1 score. In Approach 6, the AD class also achieved the highest precision (96%) and F1 score (95%). However, the CN class had the highest recall (97%) and a higher F1 score (93%) than the EMCI class (88%). </p>
        <p id="p-be3ac8796670"><bold id="s-f9ba121da2f0"><xref id="x-eb7747f25c7d" rid="f-4128e68b8a91" ref-type="fig">Figure 7</xref></bold> presents ROC curves showing the classification performance of Approaches 1 and 6. The ROC curve for Approach 1 showed that the model had poor performance in classifying CN and EMCI subjects (<bold id="s-51f9014147be"><xref id="x-c8fec7145685" rid="f-4128e68b8a91" ref-type="fig">Figure 7</xref></bold> <bold id="strong-19">A</bold>). The AUC of the EMCI class (0.83) was slightly higher than that of the CN class (0.82). However, Approach 1 performed well in identifying the AD class (AUC = 0.92). The ROC curve for Approach 6 showed that the final model classified the EMCI class less accurately than the CN and AD classes (AUC = 0.88; <bold id="s-f97d85ff75d6"><xref id="x-7bc35596f4f4" rid="f-4128e68b8a91" ref-type="fig">Figure 7</xref></bold> <bold id="strong-20">B</bold>). Nevertheless, the ROC curves of all three classes were significantly improved with Approach 6 compared to Approach 1. The ROC curves for the CN (AUC= 0.94) and AD (AUC = 0.98) classes demonstrated excellent performance. The ground truths and their corresponding predictions in three classes are illustrated in <bold id="s-b86b30bdefac"><xref id="x-def07054ecb9" rid="f-5387e71a6702" ref-type="fig">Figure 8</xref></bold>.</p>
      </sec>
    </sec>
    <sec>
      <title id="t-a264291dbd49">Discussion</title>
      <p id="p-1b9891dc6a5b">This study’s primary aim was to implement the XG-Boost algorithm in early AD detection at the EMCI stage. The model performance significantly improved from 68.8% to 92.0% after adding two cognitive scores (MMSE and CDR) and selecting features (<bold id="s-69fc5523513f"><xref id="x-1e3c4f5dfb32" rid="f-594fe7c11047" ref-type="fig">Figure 6</xref></bold>  and <bold id="s-8a2d722d09ff"><xref id="x-f50df94c5aab" rid="tw-c2334e1d3e26" ref-type="table">Table 4</xref>)</bold>. The final model achieved the highest accuracy of 92% by combining Pearson’s correlations with BE for feature selection, reducing the number of features from 360 to 29 (<bold id="s-dd1ba7183e26"><xref id="x-0b86a6158615" rid="f-c4908520505b" ref-type="fig">Figure 4</xref></bold>  and <bold id="s-4d347b847371"><xref id="x-d8f9cbaac72d" rid="tw-432e2555a671" ref-type="table">Table 3</xref></bold> ). In addition, BE was explicitly recognized as the most suitable selection method (<bold id="s-4bc44664a51b"><xref id="x-64a3eab455fe" rid="f-594fe7c11047" ref-type="fig">Figure 6</xref></bold>  and <bold id="s-1ba4d949fe6a"><xref id="x-3feecdb283bc" rid="tw-c2334e1d3e26" ref-type="table">Table 4</xref></bold>). The ROC curve illustrated excellent performance for Approach 6 (<bold id="s-ced92a2c07c3"><xref id="x-3513e3d535e7" rid="f-5387e71a6702" ref-type="fig">Figure 8</xref></bold> <bold id="s-e871abb25d31">B</bold>), with the AD class having the highest AUC (0.98), followed by the CN class (0.94) and the EMCI class (0.88).</p>
      <sec>
        <title id="t-0b8335491a25">
          <bold id="s-9c225c5d63e1">Feature weights</bold>
        </title>
        <p id="p-1fbc18f77dd2">The BE method in Approach 4 showed that the hippocampus and temporal lobe features were the most important. This result is expected since structural changes in these regions are considered early indicators of MCI and AD<bold id="s-7a32166d7857"><xref id="x-5520b502ab8c" rid="R208498129137446" ref-type="bibr">35</xref></bold>. During the earliest stages of AD, brain atrophy typically follows the hippocampal pathway (entorhinal cortex, hippocampus, and posterior cingulate cortex) and is associated with early memory deficits<bold id="s-b653c5ad2701"><xref id="x-3863b20ca392" rid="R208498129137447" ref-type="bibr">36</xref></bold>. Furthermore, the variations in structural measures, including hippocampus and temporal lobe volumes, sulcus width and thickness, and subcortical nuclei volume, correlate with cognitive performance<bold id="s-8b756bb7f7fd"><xref rid="R208498129137448" ref-type="bibr">37</xref>, <xref rid="R208498129137449" ref-type="bibr">38</xref>, <xref rid="R208498129137450" ref-type="bibr">39</xref>, <xref rid="R208498129137451" ref-type="bibr">40</xref></bold>.</p>
        <p id="p-3adf305e5bfa">Our study found that the two cognitive scores (MMSE and CDR) had substantially higher weights than the brain features. We conclude that the ML architecture designed in this study remains insufficiently effective. Clinically, these two scores are used as parts of the preferred standard diagnosis procedure for AD. Moreover, MMSE and CDR mainly depend on general cognitive and behavioral states rather than the underlying biological changes in the nervous system<bold id="s-011a2da7b223"><xref rid="R208498129137452" ref-type="bibr">41</xref>, <xref rid="R208498129137453" ref-type="bibr">42</xref></bold>. Consequently, while the final model still shows considerable performance, it remains too dependent on symptom testing rather than brain structure changes. </p>
      </sec>
      <sec>
        <title id="t-09ac83b56854">
          <bold id="s-6ccc5d008868">Roles of cognitive scores and feature selection</bold>
        </title>
        <p id="p-2d138307c095">Performance differed significantly between the first approach excluding the cognitive scores and the other approaches including them. Specifically, after adding MMSE and CDR to the feature set, the accuracy increased drastically by nearly 20%, from 69% ± 3% to 86% ± 2%. We suggest that future model development should minimize the influences of the two scores in the prediction to make applying the model in the clinical setting less dependent on the availability of well-trained neurologists to conduct such cognitive tests. There has been a recent increase in the number of studies completing this task. For example, Liu <italic id="e-d42a7655f04b">et al</italic>. reported a multi-model DL framework with accuracies of 88.9% for classifying AD and CN and 76.2% for classifying MCI and CN<bold id="s-73218a6929ab"><xref id="x-8c22512d24a0" rid="R208498129137454" ref-type="bibr">43</xref></bold>. Farooq <italic id="e-89b49c53b9da">et al</italic>. compared GoogLeNet, ResNet-18, and ResNet-152, reporting accuracies of 98% for all three models<bold id="s-a5b0bc35ca7f"><xref id="x-38f685272d51" rid="R208498129137455" ref-type="bibr">44</xref></bold>. However, most recent studies only used a DL approach, which could hinder technology acceptance by medical doctors<bold id="s-d03d04378d4a"><xref id="x-d77ef324f97f" rid="R208498129137456" ref-type="bibr">45</xref></bold>.</p>
        <p id="p-53c81f37a92e">Our study also illustrated that feature selection, especially BE and XGBI, plays a crucial role in the classification model. Both methods led to significant increases in model performance, which surpassed the results of other approaches. The reason is that, from a biological perspective, not all brain features contribute to AD pathology<bold id="s-d5cf6cd86b48"><xref rid="R208498129137457" ref-type="bibr">46</xref>, <xref rid="R208498129137458" ref-type="bibr">47</xref>, <xref rid="R208498129137459" ref-type="bibr">48</xref></bold>. Several studies suggest that several brain regions are affected by AD-related atrophy, including the frontal, temporal, and parietal lobes or cerebellum brain regions<bold id="s-5b3e4857a249"><xref rid="R208498129137457" ref-type="bibr">46</xref>, <xref rid="R208498129137458" ref-type="bibr">47</xref>, <xref rid="R208498129137459" ref-type="bibr">48</xref></bold>. Other feature selection methods also showed outstanding accuracy. For example, Fang <italic id="e-1553229ae122">et al.</italic> proposed several ML algorithms combined with goal-directed conceptual aggregation to demonstrate the effectiveness of this method compared to other approaches (PCA, least absolute shrinkage and selection operator, and univariate feature selection). They achieved 79.25 % accuracy in classifying CN <italic id="e-2746522dc145">vs</italic>. EMCI and 83.33% in classifying CN <italic id="e-4d3f87112219">vs</italic>. LMCI<bold id="s-c985782b353e"><xref id="x-bd8b594cc67c" rid="R208498129137460" ref-type="bibr">49</xref></bold>. Khagi <italic id="e-3c4e9ab67e61">et al</italic>. combined SVM and K-nearest neighbors with one of four feature selection methods (ReliefF, Laplacian, UDFS, and Mutinffs), reporting accuracies of approximately 99% for AD classification<bold id="s-a4974d96a1af"><xref id="x-fe112df3b524" rid="R208498129137461" ref-type="bibr">50</xref></bold>.</p>
      </sec>
      <sec>
        <title id="t-54eafdb9c376">
          <bold id="s-ed216378db83">Model selection and comparison</bold>
        </title>
        <p id="p-f4e93f3d7141">While the models in Approaches 3, 4, and 6 performed relatively similarly, Approach 6 was chosen to be the final model. Firstly, this approach achieved the highest accuracy (92%). Secondly, this model had a shorter training time (45.5 seconds) than Approach 3 (242.6 seconds). Moreover, in the feature selection step, Approach 6 selected features automatically, while Approach 4 required manual feature selection. In addition, by running GridSearch, Approach 6 could obtain optimal parameters compared to Approach 4 (without GridSearch).</p>
        <p id="p-a2109569ee85">Approaches 1 and 6 had greater difficulty classifying EMCI than the other classes. The AUC for the CN class was the lowest in Approach 1 (0.82) but increased significantly in Approach 6 (0.94). This increase indicates that feature selection may eliminate misleading features, which remained significant for CN classification<bold id="s-5707489b4764"><xref id="x-350dee00fac3" rid="R208498129137462" ref-type="bibr">51</xref></bold>. However, the AUC of the EMCI class increased slightly from 0.83 to 0.88; therefore, EMCI is the most challenging class for the model to identify. Brain structural changes in patients with EMCI are likely not prominent enough for the model to recognize easily. Moreover, the EMCI classification remains challenging, and this class often showed low accuracy in previous studies. For example, Goryawala <italic id="e-17e06107563e">et al</italic>. only achieved an accuracy of 0.616 for distinguishing CN and EMCI and 0.814 for distinguishing EMCI and AD<bold id="s-278525e425c6"><xref id="x-81cfe98182f6" rid="R208498129137463" ref-type="bibr">52</xref></bold>.</p>
        <p id="p-de9af090e5fa">Overall, three-way classification in the AD diagnosis model still performs poorly. The proposed model is compared to current models in<bold id="s-fcd1e93e6d7f"><xref id="x-bf4256aa9947" rid="tw-2b56fb8d4526" ref-type="table">Table 5</xref></bold>. However, most current models using three-way classification focus on the MCI class, while the EMCI class is more important in facilitating early AD diagnosis. This oversight underscores the distinctiveness of this study, which introduces novelty by addressing three-class classification involving EMCI, AD, and CN categories. Therefore, the proposed method shows substantial promise in its performance compared to other methods. Compared with state-of-the-art models for three-way classification, the method proposed in this study achieves promising performance with 92% accuracy. However, Ahmed <italic id="e-083d1289efc9">et al</italic>. developed a multi-class deep CNN framework for early AD diagnosis, achieving 93.86% accuracy for three-way AD/MCI/CN classification<bold id="s-b274dc8535fd"><xref id="x-dd25180dcd5a" rid="R208498129137464" ref-type="bibr">53</xref></bold>. It is important to note that their focus was on MCI, whereas our study focuses on the more challenging EMCI classification. Consequently, our model offers a more sophisticated approach and, therefore, has a competitive advantage.</p>
        <p id="p-30c6e5b40fc6"/>
        <table-wrap id="tw-2b56fb8d4526" orientation="portrait">
          <label>Table 5</label>
          <caption id="c-01c48f3b81ab">
            <title id="t-a160eb0ce00b">
              <bold id="s-03fc28ff8ed4">Model performance of three-way classification in early diagnosis of Alzheimer</bold>
            </title>
          </caption>
          <table id="t-8b12d4c8e7e7" rules="rows">
            <colgroup>
              <col width="17.169999999999998"/>
              <col width="22.700000000000003"/>
              <col width="39.72"/>
              <col width="20.41"/>
            </colgroup>
            <thead id="table-section-header-3eaa129f2eac">
              <tr id="tr-c39849fe29c0">
                <th id="tc-53fca70f9412" align="left">
                  <p id="p-4bb3338d0fa2">Study</p>
                </th>
                <th id="tc-e46ee581736f" align="left">
                  <p id="p-6f11ad3cbb0b">Sample size</p>
                </th>
                <th id="tc-5172698e1ad8" align="left">
                  <p id="p-f2d2fcfdce19">Method</p>
                </th>
                <th id="tc-d4efae2ab37f" align="center">
                  <p id="p-c5423c3a34ef">Model performance</p>
                </th>
              </tr>
            </thead>
            <tbody id="ts-6b705044dbf5">
              <tr id="tr-449154ee3a35">
                <td id="table-cell-5" align="left">
                  <p id="p-296d6a9915f5"> <bold id="s-a2d6916ba4f1"><xref id="x-c7efe1402735" rid="R208498129137465" ref-type="bibr">54</xref></bold> </p>
                </td>
                <td id="tc-d2f535381a03" align="left">
                  <p id="p-e87de25f975b">224 CN, 133 MCI, 85 AD </p>
                </td>
                <td id="tc-a835b664d805" align="left">
                  <p id="p-b8c0773c8bba">Modified Tresnet </p>
                </td>
                <td id="tc-95aea1220423" align="center">
                  <p id="p-01c804587839">63.2 % </p>
                </td>
              </tr>
              <tr id="tr-9b15fe3d1102">
                <td id="tc-23c3687eac1f" align="left">
                  <p id="p-cb1b8aeee02b"><bold id="s-c25a626a2a50"><xref id="x-b7a1e5f00c56" rid="R208498129137466" ref-type="bibr">55</xref></bold> </p>
                </td>
                <td id="tc-63bbd11a55d1" align="left">
                  <p id="p-985e41a26a58">200 CN, 441 MCI, 105 AD </p>
                </td>
                <td id="tc-3742dd9cf274" align="left">
                  <p id="p-485e649811ba">Decision tree with linear discriminant analysis </p>
                </td>
                <td id="tc-cfea90637b90" align="center">
                  <p id="paragraph-12">66.7 % </p>
                </td>
              </tr>
              <tr id="tr-5070294dbe75">
                <td id="tc-e56184643584" align="left">
                  <p id="p-2f7003fa3707"> <bold id="s-f818620907ae"><xref id="x-b50765b501d8" rid="R208498129137467" ref-type="bibr">56</xref></bold> </p>
                </td>
                <td id="tc-cc7f983109f9" align="left">
                  <p id="paragraph-14">197 CN, 330 MCI, 279 AD</p>
                </td>
                <td id="tc-5cfdf81ee369" align="left">
                  <p id="p-95a5bb467344">3D CNN with 8 instance normalization layers</p>
                </td>
                <td id="tc-8d413b2a6142" align="center">
                  <p id="paragraph-16">66.9 %</p>
                </td>
              </tr>
              <tr id="tr-df03edda364f">
                <td id="tc-79a5fee2f7c4" align="left">
                  <p id="p-17a87adab32e"><bold id="s-051e47f94719"><xref id="x-20dd153a8259" rid="R208498129137468" ref-type="bibr">57</xref></bold> </p>
                </td>
                <td id="tc-b2e49d5b44ae" align="left">
                  <p id="paragraph-18">CN vs. MCI vs. AD </p>
                </td>
                <td id="tc-ee48c3628e74" align="left">
                  <p id="p-5f2fddf39742">XG-Boost </p>
                </td>
                <td id="tc-199b16ba3bf1" align="center">
                  <p id="paragraph-20">66.8 % </p>
                </td>
              </tr>
              <tr id="tr-0e3cf0f00eca">
                <td id="tc-445f00975737" align="left">
                  <p id="p-f01a608500a1"><bold id="s-bce627059247"><xref id="x-bd1f8c62bfc6" rid="R208498129137469" ref-type="bibr">58</xref></bold> </p>
                </td>
                <td id="tc-55a677bd8925" align="left">
                  <p id="p-1ad179a548f7">229 CN, 398 MCI, 192 AD</p>
                </td>
                <td id="tc-7c18e772fb31" align="left">
                  <p id="p-db2b1c971b0a">VGG-16 (Visual Geometry Group 16)</p>
                </td>
                <td id="tc-a5c5e4d4e674" align="center">
                  <p id="p-189ef7c6949d">80.66 %</p>
                </td>
              </tr>
              <tr id="tr-3f084296410c">
                <td id="tc-754457678619" align="left">
                  <p id="p-3e1a685968c0"><bold id="s-fb4482c61832"><xref id="x-44e7816fdfe1" rid="R208498129137470" ref-type="bibr">59</xref></bold> </p>
                </td>
                <td id="tc-5271fe201abd" align="left">
                  <p id="p-f9f5f476fc74">115 CN, 133 MCI, 58 AD</p>
                </td>
                <td id="tc-9da32aa92980" align="left">
                  <p id="p-ebb7c8e05c25">ResNet-18 with Weighted Loss and Transfer Learning and Mish Activation</p>
                </td>
                <td id="tc-aed46436ffc5" align="center">
                  <p id="p-ca51ac237010">88.3 %</p>
                </td>
              </tr>
              <tr id="tr-3629a6196bae">
                <td id="tc-c0ed5be24930" align="left">
                  <p id="p-90af8b7286fb"><bold id="s-27bc3ceefb03"><xref id="x-a251bf041a80" rid="R208498129137471" ref-type="bibr">60</xref></bold> </p>
                </td>
                <td id="tc-f76a9ab99826" align="left">
                  <p id="p-06ca2a43b6f1">229 CN, 382 MCI, 187 AD </p>
                </td>
                <td id="tc-ffa8d757bdc2" align="left">
                  <p id="p-528ac63d1e17">Combined Graph convolutional networks and CNN </p>
                </td>
                <td id="tc-439a6ea8e186" align="center">
                  <p id="p-21c76014bc78">89.4 %</p>
                </td>
              </tr>
              <tr id="tr-8207bb020f8c">
                <td id="tc-d3ced94b4ee1" align="left">
                  <p id="p-7dfbddddf42b">Proposed method</p>
                </td>
                <td id="tc-79fad9182d04" align="left">
                  <p id="p-b9feb16bb040">221 CN, 221 MCI, 221 AD</p>
                </td>
                <td id="tc-161d34679a2c" align="left">
                  <p id="paragraph-35">XG-Boost and BE </p>
                </td>
                <td id="tc-d2d8a19451f8" align="center">
                  <p id="p-edc811171c7d">92 %</p>
                </td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p id="p-47ce6102736a"/>
      </sec>
    </sec>
    <sec>
      <title id="t-20ac2d96e678">Conclusions</title>
      <p id="p-8bc0a02b3344">This study developed an ML model for early AD diagnosis based on structural MRI scans using XG-Boost to classify three classes: CN, EMCI, and AD. We also evaluated three feature selection methods (BE, XGBI, and PCA) to identify the optimal method for our model. The final model using BE with tuning parameters achieved the highest accuracy of 92%. The AUCs for the AD, CN, and EMCI classes were 0.98, 0.94, and 0.88, respectively. Compared to previous three-class classification methods, the proposed method appears promising for early AD detection.</p>
      <p id="p-ffe4cb8e0d33">While the XG-Boost model attained high accuracy with the aid of BE, several technical issues remain unsolved. Firstly, the AUC was lower for the EMCI class than for the CN and AD classes. Therefore, additional interventions in fitting parameters to enhance the performance of EMCI accuracy are essential. In addition, the model should be modified to reduce its dependence on MMSE and CDR scores. Finally, the model should be tested on multi-datasets to optimize its performance.</p>
    </sec>
    <sec>
      <title id="t-8f80a5f110d2">Abbreviations</title>
      <p id="p-4a69375fba42"><bold id="s-79e93a222971">ADNI</bold>: Alzheimer’s Disease Neuroimaging Initiative; <bold id="s-13165b4acb07">AD</bold>: Alzheimer's disease; <bold id="s-97e598d4e813">AI</bold>: Artificial Learning; <bold id="s-a5203b89ec57">BE</bold>: Backward Elimination; <bold id="s-c90dab233978">CAD</bold>: Computer-Aided Diagnosis; <bold id="s-820e4edfb256">CDR</bold>: Clinical Dementia Rating; <bold id="s-bd71be8425c6">CN</bold>: Cognitive Normal; <bold id="s-462ed208258d">CNN</bold>: Convolutional Neural Network; <bold id="s-2ab3edd27046">DL</bold>: Deep Learning; <bold id="s-8e49b1314487">eTIV</bold>:  estimated Total Intracranial Volume; <bold id="s-b8ab20a0ceaf">EMCI</bold>: Early MCI; <bold id="s-550f67dbda8e">GMM</bold>: Gaussian Mixture Model; <bold id="s-03fb8029a691">GridSearchCV</bold>: Grid Search cross-validation; <bold id="s-c28b75070b2e">GDCA</bold>: Goal-Directed Conceptual Aggregation; <bold id="s-6c3da7b820b1">GLCM</bold>: Gray Level Co-occurrence Matrix; <bold id="s-b9cbc24f11f7">KNN</bold>: K Nearest Neighbor; <bold id="s-e05846de71bf">LMCI</bold>: Late MCI; <bold id="s-da364064e4a4">ML</bold>: Machine Learning; <bold id="s-2a1db269b9aa">MCI</bold>: Mild Cognitive Impairment; <bold id="s-a40e9f660555">MMSE</bold>: Mini-Mental State Examination; <bold id="s-9109e1ba977b">MRI</bold>: Magnetic Resonance Imaging; <bold id="s-089d00ce1e49">OLS</bold>: Ordinary Least Square; <bold id="s-7c134e00089b">RELM</bold>: Rough Extreme Learning Machine; <bold id="s-8fc2ae3d5b62">ROC-AUC</bold>: Area Under The ROC Curve; <bold id="s-2d0b032810cc">PET</bold>: Positron Emission Tomography; <bold id="s-4f7892f404a2">PCA</bold>: Principle Component Analysis; <bold id="s-8183fe692830">PC</bold>: Principle Components; <bold id="s-937e84c631e8">sMRI</bold>: structural MRI; <bold id="s-319215889a9f">SVM</bold>: Support Vector Machine; <bold id="s-4773a5e84c8c">SL</bold>: Significance Level; <bold id="s-b6d79196c256">XGBI</bold>: XG-Boost Importance.</p>
    </sec>
    <sec>
      <title id="t-9ae26adfca58">Acknowledgments </title>
      <p id="p-cb9c6bf603ae">Data used in preparation of this article were obtained from the Alzheimer's Disease Neuroimaging Initiative (ADNI) database (adni.loni.usc.edu) and the Alzheimer's Disease Metabolomics Consortium (ADMC). As such, the investigators within the ADNI contributed to the design and implementation of ADNI and/or provided data but did not participate in analysis or writing of this report. A complete listing of ADNI investigators can be found at: http://adni.loni.usc.edu/wpcontent/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf and https://sites.duke.edu/adnimetab/team </p>
    </sec>
    <sec>
      <title id="t-d4d82801a32b">Author’s contributions</title>
      <p id="p-246318ce1f4b">All authors contributed to the ideas, designed, did the experiments. All authors read and approved the final manuscript. </p>
    </sec>
    <sec>
      <title id="t-24bbce28c25c">Funding</title>
      <p id="p-7f66feb8cef1">This research is funded by Vietnam National University Ho Chi Minh City (VNU-HCM) under grant number NCM2020-28-01.</p>
    </sec>
    <sec>
      <title id="t-7cbc55d9fef4">Availability of data and materials</title>
      <p id="p-7a754565b970">The data that support the findings of this study are available in ADNI at http://adni.loni.usc.edu/data-samples/access-data/ </p>
    </sec>
    <sec>
      <title id="t-a5f536b67c32">Ethics approval and consent to participate</title>
      <p id="p-1647766adea1">Not applicable. </p>
    </sec>
    <sec>
      <title id="t-c80a8cbeffb9">Consent for publication</title>
      <p id="p-db73d4da8a3b">Not applicable. </p>
    </sec>
    <sec>
      <title id="t-7fa02d63ff7c">Competing interests</title>
      <p id="p-ecc0a536f982">The authors declare that they have no competing interests.</p>
    </sec>
  </body>
  <back>
    <ref-list>
      <title>References</title>
      <ref id="R208498129137412">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Wong</surname>
              <given-names>W.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Economic burden of Alzheimer disease and managed care considerations</article-title>
          <source>The American Journal of Managed Care</source>
          <year>2020</year>
          <volume>26</volume>
          <issue>8</issue>
          <fpage>177</fpage>
          <lpage>83</lpage>
          <issn>1936-2692</issn>
          <pub-id pub-id-type="pmid">32840331</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137413">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Kumar</surname>
              <given-names>A.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Alzheimer Disease. 2021 Aug 11. StatPearls. Treasure Island (FL): StatPearls Publishing, 2022</article-title>
          <year>2022</year>
        </element-citation>
      </ref>
      <ref id="R208498129137414">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Bi</surname>
              <given-names>X.A.</given-names>
            </name>
            <name>
              <surname>Xu</surname>
              <given-names>Q.</given-names>
            </name>
            <name>
              <surname>Luo</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Sun</surname>
              <given-names>Q.</given-names>
            </name>
            <name>
              <surname>Wang</surname>
              <given-names>Z.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Analysis of progression toward Alzheimer's disease based on evolutionary weighted random support vector machine cluster</article-title>
          <source>Frontiers in Neuroscience</source>
          <year>2018</year>
          <volume>12</volume>
          <fpage>716</fpage>
          <issn>1662-4548</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3389/fnins.2018.00716</pub-id>
          <pub-id pub-id-type="pmid">30349454</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137415">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Tatiparti</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Sau</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Rauf</surname>
              <given-names>M.A.</given-names>
            </name>
            <name>
              <surname>Iyer</surname>
              <given-names>A.K.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Smart treatment strategies for alleviating tauopathy and neuroinflammation to improve clinical outcome in Alzheimer's disease</article-title>
          <source>Drug Discovery Today</source>
          <year>2020</year>
          <volume>25</volume>
          <issue>12</issue>
          <fpage>2110</fpage>
          <lpage>29</lpage>
          <issn>1878-5832</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.drudis.2020.09.025</pub-id>
          <pub-id pub-id-type="pmid">33011341</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137416">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kang</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Jiang</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Huang</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>T.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Identifying early mild cognitive impairment by multi-modality mri-based deep learning</article-title>
          <source>Frontiers in Aging Neuroscience</source>
          <year>2020</year>
          <volume>12</volume>
          <fpage>206</fpage>
          <issn>1663-4365</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3389/fnagi.2020.00206</pub-id>
          <pub-id pub-id-type="pmid">33101003</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137417">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Zhang</surname>
              <given-names>F.</given-names>
            </name>
            <name>
              <surname>Pan</surname>
              <given-names>B.</given-names>
            </name>
            <name>
              <surname>Shao</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Shen</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Yao</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Alzheimer's Disease Neuroimaging Initiative</surname>
              <given-names/>
            </name>
            <name>
              <surname>Australian Imaging Biomarkers Lifestyle flagship study of ageing</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>A single model deep learning approach for Alzheimer's disease diagnosis</article-title>
          <source>Neuroscience</source>
          <year>2022</year>
          <volume>491</volume>
          <fpage>200</fpage>
          <lpage>14</lpage>
          <issn>1873-7544</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neuroscience.2022.03.026</pub-id>
          <pub-id pub-id-type="pmid">35398507</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137418">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Xing</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname/>
              <given-names>G. Liang</given-names>
            </name>
            <name>
              <surname/>
              <given-names>Y. Zhang</given-names>
            </name>
            <name>
              <surname/>
              <given-names>S. Khanal</given-names>
            </name>
            <name>
              <surname/>
              <given-names>A.L. Lin</given-names>
            </name>
            <name>
              <surname/>
              <given-names>N. Jacobs</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Advit: Vision transformer on multi-modality pet images for alzheimer disease diagnosis</article-title>
          <source>In2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI)</source>
          <year>2022</year>
          <volume>2022</volume>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">https://doi.org/10.1109/ISBI52829.2022.9761584</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137419">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Diogo</surname>
              <given-names>V.S.</given-names>
            </name>
            <name>
              <surname>Ferreira</surname>
              <given-names>H.A.</given-names>
            </name>
            <name>
              <surname>Prata</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Initiative</surname>
              <given-names>undefined Alzheimer's Disease Neuroimaging</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Early diagnosis of Alzheimer's disease using machine learning: a multi-diagnostic, generalizable approach</article-title>
          <source>Alzheimer &amp; Research &amp; Therapy</source>
          <year>2022</year>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>107</fpage>
          <issn>1758-9193</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1186/s13195-022-01047-y</pub-id>
          <pub-id pub-id-type="pmid">35922851</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137420">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Tábuas-Pereira</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Baldeiras</surname>
              <given-names>I.</given-names>
            </name>
            <name>
              <surname>Duro</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Santiago</surname>
              <given-names>B.</given-names>
            </name>
            <name>
              <surname>Ribeiro</surname>
              <given-names>M.H.</given-names>
            </name>
            <name>
              <surname>Leitão</surname>
              <given-names>M.J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Prognosis of early-onset vs. late-onset mild cognitive impairment: comparison of conversion rates and its predictors</article-title>
          <source>Geriatrics (Basel, Switzerland)</source>
          <year>2016</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>11</fpage>
          <issn>2308-3417</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3390/geriatrics1020011</pub-id>
          <pub-id pub-id-type="pmid">31022805</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137421">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Mirzaei</surname>
              <given-names>G.</given-names>
            </name>
            <name>
              <surname>Adeli</surname>
              <given-names>H.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Machine learning techniques for diagnosis of alzheimer disease, mild cognitive disorder, and other types of dementia</article-title>
          <source>Biomedical Signal Processing and Control</source>
          <year>2022</year>
          <volume>72</volume>
          <fpage>103293</fpage>
          <issn>1746-8094</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.bspc.2021.103293</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137422">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kolahkaj</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Zare</surname>
              <given-names>H.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>A connectome-based deep learning approach for Early MCI and MCI detection using structural brain networks</article-title>
          <source>Neuroscience Informatics (Online)</source>
          <year>2023</year>
          <volume>3</volume>
          <issue>1</issue>
          <fpage>100118</fpage>
          <issn>2772-5286</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neuri.2023.100118</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137423">
        <element-citation publication-type="inproceedings">
          <person-group person-group-type="author">
            <name>
              <surname>Rice</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Wong</surname>
              <given-names>E.</given-names>
            </name>
            <name>
              <surname>Kolter</surname>
              <given-names>Z.</given-names>
            </name>
            <collab/>
          </person-group>
          <person-group person-group-type="editor"/>
          <article-title>Overfitting in adversarially robust deep learning</article-title>
          <source>International Conference on Machine Learning</source>
          <year>2020</year>
          <fpage>8093</fpage>
          <lpage>8104</lpage>
          <uri>http://proceedings.mlr.press/v119/rice20a</uri>
        </element-citation>
      </ref>
      <ref id="R208498129137424">
        <element-citation publication-type="inproceedings">
          <person-group person-group-type="author">
            <name>
              <surname>Techa</surname>
              <given-names>C.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <person-group person-group-type="editor">
            <etal/>
          </person-group>
          <article-title>Alzheimer's disease multi-class classification model based on CNN and StackNet using brain MRI data.</article-title>
          <source>International Conference on Advanced Intelligent Systems and Informatics</source>
          <year>2022</year>
          <publisher-name>Springer.</publisher-name>
          <pub-id pub-id-type="doi">https://doi.org/10.1007/978-3-031-20601-6_23</pub-id>
          <fpage>248</fpage>
          <lpage>259</lpage>
        </element-citation>
      </ref>
      <ref id="R208498129137425">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Alorf</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Khan</surname>
              <given-names>M.U.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Multi-label classification of Alzheimer's disease stages from resting-state fMRI-based correlation connectivity data and deep learning</article-title>
          <source>Computers in Biology and Medicine</source>
          <year>2022</year>
          <volume>151</volume>
          <fpage>106240</fpage>
          <issn>1879-0534</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.compbiomed.2022.106240</pub-id>
          <pub-id pub-id-type="pmid">36423532</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137426">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Alfalahi</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Dias</surname>
              <given-names>S.B.</given-names>
            </name>
            <name>
              <surname>Khandoker</surname>
              <given-names>A.H.</given-names>
            </name>
            <name>
              <surname>Chaudhuri</surname>
              <given-names>K.R.</given-names>
            </name>
            <name>
              <surname>Hadjileontiadis</surname>
              <given-names>L.J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>A scoping review of neurodegenerative manifestations in explainable digital phenotyping</article-title>
          <source>NPJ Parkinson &amp; Disease</source>
          <year>2023</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>49</fpage>
          <issn>2373-8057</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1038/s41531-023-00494-0</pub-id>
          <pub-id pub-id-type="pmid">36997573</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137427">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Garre-Olmo</surname>
              <given-names>J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>[Epidemiology of Alzheimer's disease and other dementias]</article-title>
          <source>Revista de neurologia</source>
          <year>2018</year>
          <volume>66</volume>
          <issue>11</issue>
          <fpage>377</fpage>
          <lpage>86</lpage>
          <issn>1576-6578</issn>
          <pub-id pub-id-type="pmid">29790571</pub-id>
          <uri>https://europepmc.org/article/med/29790571</uri>
        </element-citation>
      </ref>
      <ref id="R208498129137428">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Riek</surname>
              <given-names>H.C.</given-names>
            </name>
            <name>
              <surname>Brien</surname>
              <given-names>D.C.</given-names>
            </name>
            <name>
              <surname>Coe</surname>
              <given-names>B.C.</given-names>
            </name>
            <name>
              <surname>Huang</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Perkins</surname>
              <given-names>J.E.</given-names>
            </name>
            <name>
              <surname>Yep</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Investigators</surname>
              <given-names>ONDRI</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Cognitive correlates of antisaccade behaviour across multiple neurodegenerative diseases</article-title>
          <source>Brain Communications</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>2</issue>
          <issn>2632-1297</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1093/braincomms/fcad049</pub-id>
          <pub-id pub-id-type="pmid">36970045</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137429">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Jayasudha</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Elangovan</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Mahdal</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Priyadarshini</surname>
              <given-names>J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Accurate estimation of tensile strength of 3D printed parts using machine learning algorithms</article-title>
          <source>Processes (Basel, Switzerland)</source>
          <year>2022</year>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>1158</fpage>
          <issn>2227-9717</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3390/pr10061158</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137430">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Sun</surname>
              <given-names>X.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title> Application and Comparison of Artificial Neural Networks and XGBoost on Alzheimer's Disease</article-title>
          <source>InProceedings of the 2021 international conference on bioinformatics and intelligent computing</source>
          <year>2021</year>
          <volume>2021</volume>
          <fpage>101</fpage>
          <lpage>105</lpage>
          <pub-id pub-id-type="doi">https://doi.org/10.1145/3448748.3448765</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137431">
        <element-citation publication-type="inproceedings">
          <person-group person-group-type="author">
            <name>
              <surname>Ong</surname>
              <given-names>H.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <person-group person-group-type="editor">
            <etal/>
          </person-group>
          <article-title>A Machine Learning Framework Based on Extreme Gradient Boosting for Intelligent Alzheimer's Disease Diagnosis Using Structure MRI</article-title>
          <source>International Conference on the Development of Biomedical Engineering in Vietnam</source>
          <year>2020</year>
          <volume>2020</volume>
          <publisher-name>Springer.</publisher-name>
          <fpage>815</fpage>
          <lpage>827</lpage>
        </element-citation>
      </ref>
      <ref id="R208498129137432">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Tuan</surname>
              <given-names>T.A.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Alzheimer's diagnosis using deep learning in segmenting and classifying 3D brain MR images</article-title>
          <source>The International Journal of Neuroscience</source>
          <year>2020</year>
          <volume>132</volume>
          <issue>7</issue>
          <fpage>689</fpage>
          <lpage>98</lpage>
          <issn>0020-7454</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1080/00207454.2020.1835900</pub-id>
          <pub-id pub-id-type="pmid">33045895</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137433">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname/>
              <given-names>T.A. Tuan</given-names>
            </name>
            <name>
              <surname/>
              <given-names>T.B. Pham</given-names>
            </name>
            <name>
              <surname/>
              <given-names>J.Y. Kim</given-names>
            </name>
            <name>
              <surname/>
              <given-names>J.M. Tavares</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Alzheimer’s diagnosis using deep learning in segmenting and classifying 3D brain MR images</article-title>
          <source>International Journal of Neuroscience</source>
          <year>2021</year>
          <volume>132</volume>
          <issue>7</issue>
          <fpage>689</fpage>
          <lpage>98</lpage>
          <pub-id pub-id-type="doi">https://doi.org/10.1080/00207454.2020.1835900</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137434">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <collab/>
          </person-group>
          <article-title>FreeSurfer. 6 Aug 2021; Available from: https://surfer.nmr.mgh.harvard.edu.</article-title>
          <year>2021</year>
        </element-citation>
      </ref>
      <ref id="R208498129137435">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Geladi</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Linderholm</surname>
              <given-names>J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Principal Component Analysis</article-title>
          <year>2020</year>
          <volume>2020</volume>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/B978-0-12-409547-2.14892-9</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137436">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lever</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Krzywinski</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Altman</surname>
              <given-names>N.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Principal component analysis</article-title>
          <source>Nature Methods</source>
          <year>2017</year>
          <volume>14</volume>
          <issue>7</issue>
          <fpage>641</fpage>
          <lpage>2</lpage>
          <issn>1548-7091</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1038/nmeth.4346</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137437">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Dunkler</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Plischke</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Leffondré</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Heinze</surname>
              <given-names>G.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Augmented backward elimination: a pragmatic and purposeful way to develop statistical models</article-title>
          <source>PLoS One</source>
          <year>2014</year>
          <volume>9</volume>
          <issue>11</issue>
          <fpage>e113677</fpage>
          <issn>1932-6203</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1371/journal.pone.0113677</pub-id>
          <pub-id pub-id-type="pmid">25415265</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137438">
        <element-citation publication-type="book">
          <person-group person-group-type="author">
            <name>
              <surname>Royston</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Sauerbrei</surname>
              <given-names>W.</given-names>
            </name>
            <collab/>
          </person-group>
          <person-group person-group-type="editor"/>
          <source>Multivariable model-building: a pragmatic approach to regression anaylsis based on fractional polynomials for modelling continuous variables</source>
          <volume>777</volume>
          <publisher-name>John Wiley &amp;amp; Sons</publisher-name>
          <year>2008</year>
          <pub-id pub-id-type="doi">https://doi.org/10.1002/9780470770771</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137439">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Chen</surname>
              <given-names>T.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Xgboost: extreme gradient boosting. R package version 0.4-2, 2015. 1(4): p. 1-4.</article-title>
          <year>2015</year>
        </element-citation>
      </ref>
      <ref id="R208498129137440">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Liu</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Hao</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Bao</surname>
              <given-names>Y.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Measuring distance using ultra-wideband radio technology enhanced by extreme gradient boosting decision tree (XGBoost)</article-title>
          <source>Automation in Construction</source>
          <year>2021</year>
          <volume>126</volume>
          <fpage>103678</fpage>
          <issn>0926-5805</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.autcon.2021.103678</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137441">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Mitchell</surname>
              <given-names>R.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Xgboost: Scalable GPU accelerated learning. arXiv preprint arXiv:1806.11248, 2018</article-title>
        </element-citation>
      </ref>
      <ref id="R208498129137442">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Guo</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Bie</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Gao</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Shen</surname>
              <given-names>Y.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>An XGBoost-based physical fitness evaluation model using advanced feature selection and Bayesian hyper-parameter optimization for wearable running monitoring</article-title>
          <source>Computer Networks</source>
          <year>2019</year>
          <volume>151</volume>
          <fpage>166</fpage>
          <lpage>80</lpage>
          <issn>1389-1286</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.comnet.2019.01.026</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137443">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Pedregosa</surname>
              <given-names>F.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Scikit-learn: Machine learning in Python</article-title>
          <source>the Journal of machine Learning research</source>
          <year>2011</year>
          <volume>2011</volume>
          <fpage>825</fpage>
          <lpage>2830</lpage>
          <uri>https://www.jmlr.org/papers/volume12/pedregosa11a/pedregosa11a.pdf?ref=https:/</uri>
        </element-citation>
      </ref>
      <ref id="R208498129137444">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Cai</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Luo</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Wang</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>S.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Feature selection in machine learning: A new perspective</article-title>
          <source>Neurocomputing</source>
          <year>2018</year>
          <volume>300</volume>
          <fpage>70</fpage>
          <lpage>9</lpage>
          <issn>0925-2312</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neucom.2017.11.077</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137445">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Liu</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>R.</given-names>
            </name>
            <name>
              <surname>Wu</surname>
              <given-names>R.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Feature selection for varying coefficient models with ultrahigh-dimensional covariates</article-title>
          <source>Journal of the American Statistical Association</source>
          <year>2014</year>
          <volume>109</volume>
          <issue>505</issue>
          <fpage>266</fpage>
          <lpage>74</lpage>
          <issn>0162-1459</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1080/01621459.2013.850086</pub-id>
          <pub-id pub-id-type="pmid">24678135</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137446">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Wisse</surname>
              <given-names>L.E.</given-names>
            </name>
            <name>
              <surname>Biessels</surname>
              <given-names>G.J.</given-names>
            </name>
            <name>
              <surname>Heringa</surname>
              <given-names>S.M.</given-names>
            </name>
            <name>
              <surname>Kuijf</surname>
              <given-names>H.J.</given-names>
            </name>
            <name>
              <surname>Koek</surname>
              <given-names>D.H.</given-names>
            </name>
            <name>
              <surname>Luijten</surname>
              <given-names>P.R.</given-names>
            </name>
            <name>
              <surname>Utrecht Vascular Cognitive Impairment (VCI) Study Group</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>Hippocampal subfield volumes at 7T in early Alzheimer's disease and normal aging</article-title>
          <source>Neurobiology of Aging</source>
          <year>2014</year>
          <volume>35</volume>
          <issue>9</issue>
          <fpage>2039</fpage>
          <lpage>45</lpage>
          <issn>1558-1497</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neurobiolaging.2014.02.021</pub-id>
          <pub-id pub-id-type="pmid">24684788</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137447">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Scahill</surname>
              <given-names>R.I.</given-names>
            </name>
            <name>
              <surname>Schott</surname>
              <given-names>J.M.</given-names>
            </name>
            <name>
              <surname>Stevens</surname>
              <given-names>J.M.</given-names>
            </name>
            <name>
              <surname>Rossor</surname>
              <given-names>M.N.</given-names>
            </name>
            <name>
              <surname>Fox</surname>
              <given-names>N.C.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Mapping the evolution of regional atrophy in Alzheimer's disease: unbiased analysis of fluid-registered serial MRI</article-title>
          <source>Proceedings of the National Academy of Sciences of the United States of America</source>
          <year>2002</year>
          <volume>99</volume>
          <issue>7</issue>
          <fpage>4703</fpage>
          <lpage>7</lpage>
          <issn>0027-8424</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1073/pnas.052587399</pub-id>
          <pub-id pub-id-type="pmid">11930016</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137448">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Ridha</surname>
              <given-names>B.H.</given-names>
            </name>
            <name>
              <surname>Anderson</surname>
              <given-names>V.M.</given-names>
            </name>
            <name>
              <surname>Barnes</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Boyes</surname>
              <given-names>R.G.</given-names>
            </name>
            <name>
              <surname>Price</surname>
              <given-names>S.L.</given-names>
            </name>
            <name>
              <surname>Rossor</surname>
              <given-names>M.N.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Volumetric MRI and cognitive measures in Alzheimer disease : comparison of markers of progression</article-title>
          <source>Journal of Neurology</source>
          <year>2008</year>
          <volume>255</volume>
          <issue>4</issue>
          <fpage>567</fpage>
          <lpage>74</lpage>
          <issn>0340-5354</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1007/s00415-008-0750-9</pub-id>
          <pub-id pub-id-type="pmid">18274807</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137449">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Hua</surname>
              <given-names>X.</given-names>
            </name>
            <name>
              <surname>Lee</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Yanovsky</surname>
              <given-names>I.</given-names>
            </name>
            <name>
              <surname>Leow</surname>
              <given-names>A.D.</given-names>
            </name>
            <name>
              <surname>Chou</surname>
              <given-names>Y.Y.</given-names>
            </name>
            <name>
              <surname>Ho</surname>
              <given-names>A.J.</given-names>
            </name>
            <name>
              <surname>Alzheimer's Disease Neuroimaging Initiative</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>Optimizing power to track brain degeneration in Alzheimer's disease and mild cognitive impairment with tensor-based morphometry: an ADNI study of 515 subjects</article-title>
          <source>NeuroImage</source>
          <year>2009</year>
          <volume>48</volume>
          <issue>4</issue>
          <fpage>668</fpage>
          <lpage>81</lpage>
          <issn>1095-9572</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neuroimage.2009.07.011</pub-id>
          <pub-id pub-id-type="pmid">19615450</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137450">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Visser</surname>
              <given-names>P.J.</given-names>
            </name>
            <name>
              <surname>Scheltens</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Verhey</surname>
              <given-names>F.R.</given-names>
            </name>
            <name>
              <surname>Schmand</surname>
              <given-names>B.</given-names>
            </name>
            <name>
              <surname>Launer</surname>
              <given-names>L.J.</given-names>
            </name>
            <name>
              <surname>Jolles</surname>
              <given-names>J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Medial temporal lobe atrophy and memory dysfunction as predictors for dementia in subjects with mild cognitive impairment</article-title>
          <source>Journal of Neurology</source>
          <year>1999</year>
          <volume>246</volume>
          <issue>6</issue>
          <fpage>477</fpage>
          <lpage>85</lpage>
          <issn>0340-5354</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1007/s004150050387</pub-id>
          <pub-id pub-id-type="pmid">10431775</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137451">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Dickerson</surname>
              <given-names>B.C.</given-names>
            </name>
            <name>
              <surname>Bakkour</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Salat</surname>
              <given-names>D.H.</given-names>
            </name>
            <name>
              <surname>Feczko</surname>
              <given-names>E.</given-names>
            </name>
            <name>
              <surname>Pacheco</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Greve</surname>
              <given-names>D.N.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>The cortical signature of Alzheimer's disease: regionally specific cortical thinning relates to symptom severity in very mild to mild AD dementia and is detectable in asymptomatic amyloid-positive individuals</article-title>
          <source>Cerebral Cortex (New York, N.Y.)</source>
          <year>2009</year>
          <volume>19</volume>
          <issue>3</issue>
          <fpage>497</fpage>
          <lpage>510</lpage>
          <issn>1460-2199</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1093/cercor/bhn113</pub-id>
          <pub-id pub-id-type="pmid">18632739</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137452">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Brossard</surname>
              <given-names>B.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>pubifying Dementia: the Use of the Mini-Mental State Exam in Medical Research and Practice, in Measuring Mental Disorders. 2018, Elsevier. p. 127-154</article-title>
          <year>2018</year>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/B978-1-78548-305-9.50004-3</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137453">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Sinha</surname>
              <given-names>A.</given-names>
            </name>
            <name>
              <surname>Sinha</surname>
              <given-names>A.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Mild Cognitive Impairment and its Diagnosis to Progression to Dementia with Several Screening Measures</article-title>
          <source>The Open Psychology Journal</source>
          <year>2018</year>
          <volume>11</volume>
          <issue>1</issue>
          <fpage>142</fpage>
          <lpage>7</lpage>
          <issn>1874-3501</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.2174/1874350101811010142</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137454">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Liu</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>F.</given-names>
            </name>
            <name>
              <surname>Yan</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Wang</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Ma</surname>
              <given-names>Y.</given-names>
            </name>
            <name>
              <surname>Shen</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Alzheimer's Disease Neuroimaging Initiative</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>A multi-model deep convolutional neural network for automatic hippocampus segmentation and classification in Alzheimer's disease</article-title>
          <source>NeuroImage</source>
          <year>2020</year>
          <volume>208</volume>
          <fpage>116459</fpage>
          <issn>1095-9572</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neuroimage.2019.116459</pub-id>
          <pub-id pub-id-type="pmid">31837471</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137455">
        <element-citation publication-type="inproceedings">
          <person-group person-group-type="author">
            <name>
              <surname>Farooq</surname>
              <given-names>A.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <person-group person-group-type="editor">
            <etal/>
          </person-group>
          <article-title>A deep CNN based multi-class classification of Alzheimer's disease using MRI</article-title>
          <source>2017 IEEE International Conference on Imaging systems and techniques (IST)</source>
          <year>2017</year>
          <publisher-name>IEEE.</publisher-name>
          <pub-id pub-id-type="doi">https://doi.org/10.1109/IST.2017.8261460</pub-id>
          <fpage>1</fpage>
          <lpage>6</lpage>
        </element-citation>
      </ref>
      <ref id="R208498129137456">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Ahuja</surname>
              <given-names>A.S.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>The impact of artificial intelligence in medicine on the future role of the physician</article-title>
          <source>PeerJ</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>e7702</fpage>
          <issn>2167-8359</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.7717/peerj.7702</pub-id>
          <pub-id pub-id-type="pmid">31592346</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137457">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Patel</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Dobson</surname>
              <given-names>R.J.</given-names>
            </name>
            <name>
              <surname>Newhouse</surname>
              <given-names>S.J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>A meta-analysis of Alzheimer's disease brain transcriptomic data</article-title>
          <source>Journal of Alzheimer's  Disease</source>
          <year>2019</year>
          <volume>68</volume>
          <issue>4</issue>
          <fpage>1635</fpage>
          <lpage>56</lpage>
          <issn>1875-8908</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3233/JAD-181085</pub-id>
          <pub-id pub-id-type="pmid">30909231</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137458">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Jagust</surname>
              <given-names>W.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Imaging the evolution and pathophysiology of Alzheimer disease</article-title>
          <source>Nature Reviews. Neuroscience</source>
          <year>2018</year>
          <volume>19</volume>
          <issue>11</issue>
          <fpage>687</fpage>
          <lpage>700</lpage>
          <issn>1471-0048</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1038/s41583-018-0067-3</pub-id>
          <pub-id pub-id-type="pmid">30266970</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137459">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Gautam</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Cherbuin</surname>
              <given-names>N.</given-names>
            </name>
            <name>
              <surname>Sachdev</surname>
              <given-names>P.S.</given-names>
            </name>
            <name>
              <surname>Wen</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Anstey</surname>
              <given-names>K.J.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Relationships between cognitive function and frontal grey matter volumes and thickness in middle aged and early old-aged adults: the PATH Through Life Study</article-title>
          <source>NeuroImage</source>
          <year>2011</year>
          <volume>55</volume>
          <issue>3</issue>
          <fpage>845</fpage>
          <lpage>55</lpage>
          <issn>1095-9572</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.neuroimage.2011.01.015</pub-id>
          <pub-id pub-id-type="pmid">21255657</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137460">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Fang</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>C.</given-names>
            </name>
            <name>
              <surname>Forouzannezhad</surname>
              <given-names>P.</given-names>
            </name>
            <name>
              <surname>Cabrerizo</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Curiel</surname>
              <given-names>R.E.</given-names>
            </name>
            <name>
              <surname>Loewenstein</surname>
              <given-names>D.</given-names>
            </name>
            <name>
              <surname>Alzheimer's Disease Neuroimaging Initiative</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>Gaussian discriminative component analysis for early detection of Alzheimer's disease: A supervised dimensionality reduction algorithm</article-title>
          <source>Journal of Neuroscience Methods</source>
          <year>2020</year>
          <volume>344</volume>
          <fpage>108856</fpage>
          <issn>1872-678X</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.jneumeth.2020.108856</pub-id>
          <pub-id pub-id-type="pmid">32663548</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137461">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Khagi</surname>
              <given-names>B.</given-names>
            </name>
            <name>
              <surname>Kwon</surname>
              <given-names>G.R.</given-names>
            </name>
            <name>
              <surname>Lama</surname>
              <given-names>R.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Comparative analysis of Alzheimer's disease classification by CDR level using CNN, feature selection, and machine-learning techniques</article-title>
          <source>International Journal of Imaging Systems and Technology</source>
          <year>2019</year>
          <volume>29</volume>
          <issue>3</issue>
          <fpage>297</fpage>
          <lpage>310</lpage>
          <issn>0899-9457</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1002/ima.22316</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137462">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Khaire</surname>
              <given-names>U.M.</given-names>
            </name>
            <name>
              <surname>Dhanalakshmi</surname>
              <given-names>R.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Stability of feature selection algorithm: A review</article-title>
          <source>Journal of King Saud University. Computer and Information Sciences</source>
          <year>2019</year>
          <volume>34</volume>
          <issue>4</issue>
          <fpage>1060</fpage>
          <lpage>73</lpage>
          <issn>1319-1578</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.jksuci.2019.06.012</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137463">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Goryawala</surname>
              <given-names>M.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Inclusion of neuropsychological scores in atrophy models improves diagnostic classification of Alzheimer’s disease and mild cognitive impairment</article-title>
          <source>Computational intelligence and neuroscience</source>
          <year>2015</year>
          <volume>2015</volume>
          <fpage>865265</fpage>
          <pub-id pub-id-type="doi">https://doi.org/10.1155/2015/865265</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137464">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Ahmed</surname>
              <given-names>H.M.</given-names>
            </name>
            <name>
              <surname>Elsharkawy</surname>
              <given-names>Z.F.</given-names>
            </name>
            <name>
              <surname>Elkorany</surname>
              <given-names>A.S.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Alzheimer disease diagnosis for magnetic resonance brain images using deep learning neural networks</article-title>
          <source>Multimedia Tools and Applications</source>
          <year>2023</year>
          <volume>82</volume>
          <issue>12</issue>
          <fpage>17963</fpage>
          <lpage>77</lpage>
          <issn>1380-7501</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1007/s11042-022-14203-1</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137470">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Oktavian</surname>
              <given-names>M.W.</given-names>
            </name>
            <name>
              <surname>Yudistira</surname>
              <given-names>N.</given-names>
            </name>
            <name>
              <surname>Ridok</surname>
              <given-names>A.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Classification of Alzheimer's Disease Using the Convolutional Neural Network (CNN) with Transfer Learning and Weighted Loss. arXiv preprint arXiv:2207.01584, 2022</article-title>
          <year>2022</year>
        </element-citation>
      </ref>
      <ref id="R208498129137469">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lim</surname>
              <given-names>B.Y.</given-names>
            </name>
            <name>
              <surname>Lai</surname>
              <given-names>K.W.</given-names>
            </name>
            <name>
              <surname>Haiskin</surname>
              <given-names>K.</given-names>
            </name>
            <name>
              <surname>Kulathilake</surname>
              <given-names>K.A.</given-names>
            </name>
            <name>
              <surname>Ong</surname>
              <given-names>Z.C.</given-names>
            </name>
            <name>
              <surname>Hum</surname>
              <given-names>Y.C.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Deep learning model for prediction of progressive mild cognitive impairment to Alzheimer's disease using structural MRI</article-title>
          <source>Frontiers in Aging Neuroscience</source>
          <year>2022</year>
          <volume>14</volume>
          <fpage>876202</fpage>
          <issn>1663-4365</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3389/fnagi.2022.876202</pub-id>
          <pub-id pub-id-type="pmid">35721012</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137468">
        <element-citation publication-type="misc">
          <person-group person-group-type="author">
            <name>
              <surname>Stubblefield</surname>
              <given-names>J.</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>Study the combination of brain MRI imaging and other datatypes to improve Alzheimer′ s disease diagnosis. MedRxiv, 2022: p. 2022</article-title>
          <year>2022</year>
        </element-citation>
      </ref>
      <ref id="R208498129137471">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lin</surname>
              <given-names>L.</given-names>
            </name>
            <name>
              <surname>Xiong</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>G.</given-names>
            </name>
            <name>
              <surname>Kang</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Sun</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Wu</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname>Initiative Alzheimer's Disease Neuroimaging</surname>
              <given-names/>
            </name>
            <collab/>
          </person-group>
          <article-title>A Convolutional Neural Network and Graph Convolutional Network Based Framework for AD Classification</article-title>
          <source>Sensors (Basel)</source>
          <year>2023</year>
          <volume>23</volume>
          <issue>4</issue>
          <fpage>1914</fpage>
          <issn>1424-8220</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3390/s23041914</pub-id>
          <pub-id pub-id-type="pmid">36850510</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137466">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lin</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Gao</surname>
              <given-names>Q.</given-names>
            </name>
            <name>
              <surname>Du</surname>
              <given-names>M.</given-names>
            </name>
            <name>
              <surname>Chen</surname>
              <given-names>W.</given-names>
            </name>
            <name>
              <surname>Tong</surname>
              <given-names>T.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Multiclass diagnosis of stages of Alzheimer's disease using linear discriminant analysis scoring for multimodal data</article-title>
          <source>Computers in Biology and Medicine</source>
          <year>2021</year>
          <volume>134</volume>
          <fpage>104478</fpage>
          <issn>1879-0534</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.1016/j.compbiomed.2021.104478</pub-id>
          <pub-id pub-id-type="pmid">34000523</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137465">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Xu</surname>
              <given-names>Z.</given-names>
            </name>
            <name>
              <surname>Deng</surname>
              <given-names>H.</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>J.</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>Y.</given-names>
            </name>
            <collab/>
          </person-group>
          <article-title>Diagnosis of Alzheimer's Disease Based on the Modified Tresnet</article-title>
          <source>Electronics (Basel)</source>
          <year>2021</year>
          <volume>10</volume>
          <issue>16</issue>
          <fpage>1908</fpage>
          <issn>2079-9292</issn>
          <pub-id pub-id-type="doi">https://doi.org/10.3390/electronics10161908</pub-id>
        </element-citation>
      </ref>
      <ref id="R208498129137467">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Liu</surname>
              <given-names>S.</given-names>
            </name>
            <name>
              <surname/>
              <given-names>C. Yadav</given-names>
            </name>
            <name>
              <surname/>
              <given-names>C. Fernandez-Granda</given-names>
            </name>
            <name>
              <surname/>
              <given-names>N. Razavian</given-names>
            </name>
            <collab/>
            <etal/>
          </person-group>
          <article-title>On the design of convolutional neural networks for automatic detection of Alzheimer’s disease</article-title>
          <source>InMachine Learning for Health Workshop</source>
          <year>2020</year>
          <volume>2020</volume>
          <fpage>184</fpage>
          <lpage>201</lpage>
          <uri>http://proceedings.mlr.press/v116/liu20a</uri>
        </element-citation>
      </ref>
    </ref-list>
  </back>
</article>
