<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hans Clausdorff Fiedler</style></author><author><style face="normal" font="default" size="100%">Ross Prager</style></author><author><style face="normal" font="default" size="100%">Delaney Smith</style></author><author><style face="normal" font="default" size="100%">Derek Wu</style></author><author><style face="normal" font="default" size="100%">Chintan Dave</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Ben Wu</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Richard Malthaner</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automated real-time detection of lung sliding using artificial intelligence: a prospective diagnostic accuracy study</style></title><secondary-title><style face="normal" font="default" size="100%">Chest</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2025</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.sciencedirect.com/science/article/abs/pii/S0012369224001570</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">166</style></volume><pages><style face="normal" font="default" size="100%">362-370</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">2</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Rushil Chaudhary</style></author><author><style face="normal" font="default" size="100%">Jordan Ho</style></author><author><style face="normal" font="default" size="100%">Delaney Smith</style></author><author><style face="normal" font="default" size="100%">Saad Hossain</style></author><author><style face="normal" font="default" size="100%">Jaswin Hargun</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Niall Murphy</style></author><author><style face="normal" font="default" size="100%">Ross Prager</style></author><author><style face="normal" font="default" size="100%">Kiran Rikhraj</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Diagnostic accuracy of an automated classifier for the detection of pleural effusions in patients undergoing lung ultrasound</style></title><secondary-title><style face="normal" font="default" size="100%">The American Journal of Emergency Medicine</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2025</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.sciencedirect.com/science/article/abs/pii/S0735675725000476</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">90</style></volume><pages><style face="normal" font="default" size="100%">142-150</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Hans Clausdorff Fiedler</style></author><author><style face="normal" font="default" size="100%">Ross Prager</style></author><author><style face="normal" font="default" size="100%">Delaney Smith</style></author><author><style face="normal" font="default" size="100%">Derek Wu</style></author><author><style face="normal" font="default" size="100%">Chintan Dave</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Ben Wu</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Richard Malthaner</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automated Real-Time Detection of Lung Sliding Using Artificial Intelligence: A Prospective Diagnostic Accuracy Study</style></title><secondary-title><style face="normal" font="default" size="100%">Chest</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.sciencedirect.com/science/article/pii/S0012369224001570?casa_token=g13WQ_x28TkAAAAA:D-mzIIM96w9NQSQkviivCleNzgdj9l5lRWrlvoOBy5MBXPKNf6Kuj-8OyqATsj_47pxnjfqVnlo</style></url></web-urls></urls><pages><style face="normal" font="default" size="100%">S0012--3692</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Derek Wu</style></author><author><style face="normal" font="default" size="100%">Delaney Smith</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Amir Roshankar</style></author><author><style face="normal" font="default" size="100%">Hoseok Lee</style></author><author><style face="normal" font="default" size="100%">Brian Li</style></author><author><style face="normal" font="default" size="100%">Faraz Ali</style></author><author><style face="normal" font="default" size="100%">Marwan Rahman</style></author><author><style face="normal" font="default" size="100%">John Basmaji</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Alex Ford</style></author><author><style face="normal" font="default" size="100%">Bennett VanBerlo</style></author><author><style face="normal" font="default" size="100%">Ashritha Durvasula</style></author><author><style face="normal" font="default" size="100%">Claire Vannelli</style></author><author><style face="normal" font="default" size="100%">Chintan Dave</style></author><author><style face="normal" font="default" size="100%">Jason Deglint</style></author><author><style face="normal" font="default" size="100%">Jordan Ho</style></author><author><style face="normal" font="default" size="100%">Rushil Chaudhary</style></author><author><style face="normal" font="default" size="100%">Hans Clausdorff</style></author><author><style face="normal" font="default" size="100%">Ross Prager</style></author><author><style face="normal" font="default" size="100%">Scott Millington</style></author><author><style face="normal" font="default" size="100%">Samveg Shah</style></author><author><style face="normal" font="default" size="100%">Brian Buchanan</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Improving the Generalizability and Performance of an Ultrasound Deep Learning Model Using Limited Multicenter Data for Lung Sliding Artifact Identification</style></title><secondary-title><style face="normal" font="default" size="100%">Diagnostics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mdpi.com/2075-4418/14/11/1081</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">14</style></volume><pages><style face="normal" font="default" size="100%">1081</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">11</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Alexander Wong</style></author><author><style face="normal" font="default" size="100%">Jesse Hoey</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Intra-video positive pairs in self-supervised learning for ultrasound</style></title><secondary-title><style face="normal" font="default" size="100%">Frontiers in Imaging</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.frontiersin.org/articles/10.3389/fimag.2024.1416114/full</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">3</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;
	&lt;strong&gt;Introduction:&lt;/strong&gt;&amp;nbsp;Self-supervised learning (SSL) is a strategy for addressing the paucity of labelled data in medical imaging by learning representations from unlabelled images. Contrastive and non-contrastive SSL methods produce learned representations that are similar for pairs of related images. Such pairs are commonly constructed by randomly distorting the same image twice. The videographic nature of ultrasound offers flexibility for defining the similarity relationship between pairs of images.
&lt;/p&gt;

&lt;p&gt;
	&lt;strong&gt;Methods:&lt;/strong&gt;&amp;nbsp;We investigated the effect of utilizing proximal, distinct images from the same B-mode ultrasound video as pairs for SSL. Additionally, we introduced a sample weighting scheme that increases the weight of closer image pairs and demonstrated how it can be integrated into SSL objectives.
&lt;/p&gt;

&lt;p&gt;
	&lt;strong&gt;Results:&lt;/strong&gt;&amp;nbsp;Named&amp;nbsp;&lt;i&gt;Intra-Video Positive Pairs&lt;/i&gt;&amp;nbsp;(IVPP), the method surpassed previous ultrasound-specific contrastive learning methods' average test accuracy on COVID-19 classification with the POCUS dataset by ≥ 1.3%. Detailed investigations of IVPP's hyperparameters revealed that some combinations of IVPP hyperparameters can lead to improved or worsened performance, depending on the downstream task.
&lt;/p&gt;

&lt;p&gt;
	&lt;strong&gt;Discussion:&lt;/strong&gt;&amp;nbsp;Guidelines for practitioners were synthesized based on the results, such as the merit of IVPP with task-specific hyperparameters, and the improved performance of contrastive methods for ultrasound compared to non-contrastive counterparts.
&lt;/p&gt;
</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Jesse Hoey</style></author><author><style face="normal" font="default" size="100%">Alexander Wong</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A Survey of the Impact of Self-Supervised Pretraining for Diagnostic Tasks in Medical X-ray, CT, MRI, and Ultrasound</style></title><secondary-title><style face="normal" font="default" size="100%">BMC Medical Imaging</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2024</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://link.springer.com/article/10.1186/s12880-024-01253-0</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">24</style></volume><pages><style face="normal" font="default" size="100%">79</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><issue><style face="normal" font="default" size="100%">1</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>47</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Li, Brian</style></author><author><style face="normal" font="default" size="100%">Wong, Alexander</style></author><author><style face="normal" font="default" size="100%">Hoey, Jesse</style></author><author><style face="normal" font="default" size="100%">Arntfield, Robert</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Exploring the Utility of Self-Supervised Pretraining Strategies for the Detection of Absent Lung Sliding in M-Mode Lung Ultrasound</style></title><secondary-title><style face="normal" font="default" size="100%">Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://openaccess.thecvf.com/content/CVPR2023W/DL-UIA/html/VanBerlo_Exploring_the_Utility_of_Self-Supervised_Pretraining_Strategies_for_the_Detection_CVPRW_2023_paper.html</style></url></web-urls></urls><pages><style face="normal" font="default" size="100%">3076–3085</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Dave, Chintan</style></author><author><style face="normal" font="default" size="100%">Wu, Derek</style></author><author><style face="normal" font="default" size="100%">Tschirhart, Jared</style></author><author><style face="normal" font="default" size="100%">Smith, Delaney</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Deglint, Jason</style></author><author><style face="normal" font="default" size="100%">Ali, Faraz</style></author><author><style face="normal" font="default" size="100%">Chaudhary, Rushil</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Bennett</style></author><author><style face="normal" font="default" size="100%">Ford, Alex</style></author><author><style face="normal" font="default" size="100%">others</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Prospective Real-Time Validation of a Lung Ultrasound Deep Learning Model in the ICU</style></title><secondary-title><style face="normal" font="default" size="100%">Critical Care Medicine</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.ingentaconnect.com/content/wk/ccm/2023/00000051/00000002/art00025</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">2</style></number><publisher><style face="normal" font="default" size="100%">Wolters Kluwer</style></publisher><volume><style face="normal" font="default" size="100%">51</style></volume><pages><style face="normal" font="default" size="100%">301–309</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Brian Li</style></author><author><style face="normal" font="default" size="100%">Jesse Hoey</style></author><author><style face="normal" font="default" size="100%">Alexander Wong</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Self-Supervised Pretraining Improves Performance and Inference Efficiency in Multiple Lung Ultrasound Interpretation Tasks</style></title><secondary-title><style face="normal" font="default" size="100%">IEEE Access</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2023</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://ieeexplore.ieee.org/document/10332196</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">11</style></volume><pages><style face="normal" font="default" size="100%">135696-135707</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Derek Wu</style></author><author><style face="normal" font="default" size="100%">Brian Li</style></author><author><style face="normal" font="default" size="100%">Marwan A. Rahman</style></author><author><style face="normal" font="default" size="100%">Gregory Hogg</style></author><author><style face="normal" font="default" size="100%">Bennett VanBerlo</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Alex Ford</style></author><author><style face="normal" font="default" size="100%">Jordan Ho</style></author><author><style face="normal" font="default" size="100%">Joseph McCauley</style></author><author><style face="normal" font="default" size="100%">Benjamin Wu</style></author><author><style face="normal" font="default" size="100%">Jason Deglint</style></author><author><style face="normal" font="default" size="100%">Jaswin Hargun</style></author><author><style face="normal" font="default" size="100%">Rushil Chaudhary</style></author><author><style face="normal" font="default" size="100%">Chintan Dave</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Accurate assessment of the lung sliding artefact on lung ultrasonography using a deep learning approach</style></title><secondary-title><style face="normal" font="default" size="100%">Computers in Biology and Medicine</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1016/j.compbiomed.2022.105953</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">148</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Pneumothorax is a potentially life-threatening condition that can be rapidly and accurately assessed via the lung sliding artefact generated using lung ultrasound (LUS). Access to LUS is challenged by user dependence and shortage of training. Image classification using deep learning methods can automate interpretation in LUS and has not been thoroughly studied for lung sliding. Using a labelled LUS dataset from 2 academic hospitals, clinical B-mode (also known as brightness or two-dimensional mode) videos featuring both presence and absence of lung sliding were transformed into motion (M) mode images. These images were subsequently used to train a deep neural network binary classifier that was evaluated using a holdout set comprising 15% of the total data. Grad-CAM explanations were examined. Our binary classifier using the EfficientNetB0 architecture was trained using 2535 LUS clips from 614 patients. When evaluated on a test set of data uninvolved in training (540 clips from 124 patients), the model performed with a sensitivity of 93.5%, specificity of 87.3% and an area under the receiver operating characteristic curve (AUC) of 0.973. Grad-CAM explanations confirmed the model’s focus on relevant regions on M-mode images. Our solution accurately distinguishes between the presence and absence of lung sliding artefacts on LUS.</style></abstract><issue><style face="normal" font="default" size="100%">0010-4825</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Bennett VanBerlo</style></author><author><style face="normal" font="default" size="100%">Delaney Smith</style></author><author><style face="normal" font="default" size="100%">Jared Tschirhart</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Derek Wu</style></author><author><style face="normal" font="default" size="100%">Alex Ford</style></author><author><style face="normal" font="default" size="100%">Joseph McCauley</style></author><author><style face="normal" font="default" size="100%">Benjamin Wu</style></author><author><style face="normal" font="default" size="100%">Rushil Chaudhary</style></author><author><style face="normal" font="default" size="100%">Chintan Dave</style></author><author><style face="normal" font="default" size="100%">Jordan Ho</style></author><author><style face="normal" font="default" size="100%">Jason Deglint</style></author><author><style face="normal" font="default" size="100%">Brian Li</style></author><author><style face="normal" font="default" size="100%">Robert Arntfield</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Enhancing Annotation Efficiency with Machine Learning: Automated Partitioning of a Lung Ultrasound Dataset by View</style></title><secondary-title><style face="normal" font="default" size="100%">Diagnostics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2022</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mdpi.com/2075-4418/12/10/2351</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">12</style></volume><pages><style face="normal" font="default" size="100%">2351</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Background: Annotating large medical imaging datasets is an arduous and expensive task, especially when the datasets in question are not organized according to deep learning goals. Here, we propose a method that exploits the hierarchical organization of annotating tasks to optimize efficiency. Methods: We trained a machine learning model to accurately distinguish between one of two classes of lung ultrasound (LUS) views using 2908 clips from a larger dataset. Partitioning the remaining dataset by view would reduce downstream labelling efforts by enabling annotators to focus on annotating pathological features specific to each view. Results: In a sample view-specific annotation task, we found that automatically partitioning a 780-clip dataset by view saved 42 min of manual annotation time and resulted in&amp;nbsp;55±6&amp;nbsp;additional relevant labels per hour. Conclusions: Automatic partitioning of a LUS dataset by view significantly increases annotator efficiency, resulting in higher throughput relevant to the annotating task at hand. The strategy described in this work can be applied to other hierarchical annotation schemes.</style></abstract><issue><style face="normal" font="default" size="100%">10</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arntfield, Robert</style></author><author><style face="normal" font="default" size="100%">Wu, Derek</style></author><author><style face="normal" font="default" size="100%">Tschirhart, Jared</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Ford, Alex</style></author><author><style face="normal" font="default" size="100%">Ho, Jordan</style></author><author><style face="normal" font="default" size="100%">McCauley, Joseph</style></author><author><style face="normal" font="default" size="100%">Wu, Benjamin</style></author><author><style face="normal" font="default" size="100%">Deglint, Jason</style></author><author><style face="normal" font="default" size="100%">Chaudhary, Rushil</style></author><author><style face="normal" font="default" size="100%">Dave, Chintan</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Bennett</style></author><author><style face="normal" font="default" size="100%">Basmaji, John</style></author><author><style face="normal" font="default" size="100%">Millington, Scott</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automation of Lung Ultrasound Interpretation via Deep Learning for the Classification of Normal versus Abnormal Lung Parenchyma: A Multicenter Study</style></title><secondary-title><style face="normal" font="default" size="100%">Diagnostics</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://www.mdpi.com/2075-4418/11/11/2049</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">11</style></number><volume><style face="normal" font="default" size="100%">11</style></volume><pages><style face="normal" font="default" size="100%">2049</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Lung ultrasound (LUS) is an accurate thoracic imaging technique distinguished by its handheld size, low-cost, and lack of radiation. User dependence and poor access to training have limited the impact and dissemination of LUS outside of acute care hospital environments. Automated interpretation of LUS using deep learning can overcome these barriers by increasing accuracy while allowing point-of-care use by non-experts. In this multicenter study, we seek to automate the clinically vital distinction between A line (normal parenchyma) and B line (abnormal parenchyma) on LUS by training a customized neural network using 272,891 labelled LUS images. After external validation on 23,393 frames, pragmatic clinical application at the clip level was performed on 1162 videos. The trained classifier demonstrated an area under the receiver operating curve (AUC) of 0.96 (±0.02) through 10-fold cross-validation on local frames and an AUC of 0.93 on the external validation dataset. Clip-level inference yielded sensitivities and specificities of 90% and 92% (local) and 83% and 82% (external), respectively, for detecting the B line pattern. This study demonstrates accurate deep-learning-enabled LUS interpretation between normal and abnormal lung parenchyma on ultrasound frames while rendering diagnostically important sensitivity and specificity at the video clip level.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Arntfield, Robert</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Alaifan, Thamer</style></author><author><style face="normal" font="default" size="100%">Phelps, Nathan</style></author><author><style face="normal" font="default" size="100%">White, Matthew</style></author><author><style face="normal" font="default" size="100%">Chaudhary, Rushil</style></author><author><style face="normal" font="default" size="100%">Ho, Jordan</style></author><author><style face="normal" font="default" size="100%">Wu, Derek</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Development of a convolutional neural network to differentiate among the etiology of similar appearing pathological B lines on lung ultrasound: a deep learning study</style></title><secondary-title><style face="normal" font="default" size="100%">BMJ Open</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2021</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://bmjopen.bmj.com/content/11/3/e045120</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">3</style></number><publisher><style face="normal" font="default" size="100%">British Medical Journal Publishing Group</style></publisher><volume><style face="normal" font="default" size="100%">11</style></volume><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Objectives Lung ultrasound (LUS) is a portable, low-cost respiratory imaging tool but is challenged by user dependence and lack of diagnostic specificity. It is unknown whether the advantages of LUS implementation could be paired with deep learning (DL) techniques to match or exceed human-level, diagnostic specificity among similar appearing, pathological LUS images.Design A convolutional neural network (CNN) was trained on LUS images with B lines of different aetiologies. CNN diagnostic performance, as validated using a 10% data holdback set, was compared with surveyed LUS-competent physicians.Setting Two tertiary Canadian hospitals.Participants 612 LUS videos (121 381 frames) of B lines from 243 distinct patients with either (1) COVID-19 (COVID), non-COVID acute respiratory distress syndrome (NCOVID) or (3) hydrostatic pulmonary edema (HPE).Results The trained CNN performance on the independent dataset showed an ability to discriminate between COVID (area under the receiver operating characteristic curve (AUC) 1.0), NCOVID (AUC 0.934) and HPE (AUC 1.0) pathologies. This was significantly better than physician ability (AUCs of 0.697, 0.704, 0.967 for the COVID, NCOVID and HPE classes, respectively), p&amp;lt;0.01.Conclusions A DL model can distinguish similar appearing LUS pathology, including COVID-19, that cannot be distinguished by humans. The performance gap between humans and the model suggests that subvisible biomarkers within ultrasound images could exist and multicentre research is merited.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Groves, Leah A.</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Veinberg, Natan</style></author><author><style face="normal" font="default" size="100%">Alboog, Abdulrahman</style></author><author><style face="normal" font="default" size="100%">Peters, Terry M.</style></author><author><style face="normal" font="default" size="100%">Chen, Elvis C.S.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Automatic segmentation of the carotid artery and internal jugular vein from 2D ultrasound images for 3D vascular reconstruction</style></title><secondary-title><style face="normal" font="default" size="100%">International Journal of Computer Assisted Radiology and Surgery</style></secondary-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Automatic segmentation</style></keyword><keyword><style  face="normal" font="default" size="100%">deep learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Surface reconstruction</style></keyword><keyword><style  face="normal" font="default" size="100%">Surgical guidance</style></keyword><keyword><style  face="normal" font="default" size="100%">US</style></keyword><keyword><style  face="normal" font="default" size="100%">Vasculature</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1007/s11548-020-02248-2</style></url></web-urls></urls><number><style face="normal" font="default" size="100%">Cvc</style></number><publisher><style face="normal" font="default" size="100%">Springer International Publishing</style></publisher><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Purpose: In the context of analyzing neck vascular morphology, this work formulates and compares Mask R-CNN and U-Net-based algorithms to automatically segment the carotid artery (CA) and internal jugular vein (IJV) from transverse neck ultrasound (US). Methods: US scans of the neck vasculature were collected to produce a dataset of 2439 images and their respective manual segmentations. Fourfold cross-validation was employed to train and evaluate Mask RCNN and U-Net models. The U-Net algorithm includes a post-processing step that selects the largest connected segmentation for each class. A Mask R-CNN-based vascular reconstruction pipeline was validated by performing a surface-to-surface distance comparison between US and CT reconstructions from the same patient. Results: The average CA and IJV Dice scores produced by the Mask R-CNN across the evaluation data from all four sets were 0.90 ± 0.08 and 0.88 ± 0.14. The average Dice scores produced by the post-processed U-Net were 0.81 ± 0.21 and 0.71 ± 0.23 , for the CA and IJV, respectively. The reconstruction algorithm utilizing the Mask R-CNN was capable of producing accurate 3D reconstructions with majority of US reconstruction surface points being within 2 mm of the CT equivalent. Conclusions: On average, the Mask R-CNN produced more accurate vascular segmentations compared to U-Net. The Mask R-CNN models were used to produce 3D reconstructed vasculature with a similar accuracy to that of a manually segmented CT scan. This implementation of the Mask R-CNN network enables automatic analysis of the neck vasculature and facilitates 3D vascular reconstruction.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Leah A. Groves</style></author><author><style face="normal" font="default" size="100%">Natalie Li</style></author><author><style face="normal" font="default" size="100%">Blake VanBerlo</style></author><author><style face="normal" font="default" size="100%">Natan Veinberg</style></author><author><style face="normal" font="default" size="100%">Terry M. Peters</style></author><author><style face="normal" font="default" size="100%">Elvis C. S. Chen</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Improving central line needle insertions using in-situ vascular reconstructions</style></title><secondary-title><style face="normal" font="default" size="100%">Computer Methods in Biomechanics and Biomedical Engineering: Imaging &amp; Visualization</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">https://doi.org/10.1080/21681163.2020.1835542</style></url></web-urls></urls><publisher><style face="normal" font="default" size="100%">Taylor &amp; Francis</style></publisher><pages><style face="normal" font="default" size="100%">1-7</style></pages><language><style face="normal" font="default" size="100%">eng</style></language></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Ross, Matthew A. S.</style></author><author><style face="normal" font="default" size="100%">Rivard, Jonathan</style></author><author><style face="normal" font="default" size="100%">Booker, Ryan</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Interpretable Machine Learning Approaches to Prediction of Chronic Homelessness</style></title></titles><dates><year><style  face="normal" font="default" size="100%">2020</style></year></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://arxiv.org/abs/2009.09072</style></url></web-urls></urls><pages><style face="normal" font="default" size="100%">1–14</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">We introduce a machine learning approach to predict chronic homelessness from de-identified client shelter records drawn from a commonly used Canadian homelessness management information system. Using a 30-day time step, a dataset for 6521 individuals was generated. Our model, HIFIS-RNN-MLP, incorporates both static and dynamic features of a client's history to forecast chronic homelessness 6 months into the client's future. The training method was fine-tuned to achieve a high F1-score, giving a desired balance between high recall and precision. Mean recall and precision across 10-fold cross validation were 0.921 and 0.651 respectively. An interpretability method was applied to explain individual predictions and gain insight into the overall factors contributing to chronic homelessness among the population studied. The model achieves state-of-the-art performance and improved stakeholder trust of what is usually a &quot;black box&quot; neural network model through interpretable AI.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Groves, Leah A.</style></author><author><style face="normal" font="default" size="100%">VanBerlo, Blake</style></author><author><style face="normal" font="default" size="100%">Peters, Terry M.</style></author><author><style face="normal" font="default" size="100%">Chen, Elvis C.S.</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Deep learning approach for automatic out-of-plane needle localisation for semi-automatic ultrasound probe calibration</style></title><secondary-title><style face="normal" font="default" size="100%">Healthcare Technology Letters</style></secondary-title></titles><dates><year><style  face="normal" font="default" size="100%">2019</style></year></dates><number><style face="normal" font="default" size="100%">6</style></number><volume><style face="normal" font="default" size="100%">6</style></volume><pages><style face="normal" font="default" size="100%">204–209</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">The authors present a deep learning algorithm for the automatic centroid localisation of out-of-plane US needle reflections to produce a semi-automatic ultrasound (US) probe calibration algorithm. A convolutional neural network was trained on a dataset of 3825 images at a 6 cm imaging depth to predict the position of the centroid of a needle reflection. Applying the automatic centroid localisation algorithm to a test set of 614 annotated images produced a root mean squared error of 0.62 and 0.74 mm (6.08 and 7.62 pixels) in the axial and lateral directions, respectively. The mean absolute errors associated with the test set were 0.50 ± 0.40 mm and 0.51 ± 0.54 mm (4.9 ± 3.96 pixels and 5.24 ± 5.52 pixels) for the axial and lateral directions, respectively. The trained model was able to produce visually validated US probe calibrations at imaging depths on the range of 4–8 cm, despite being solely trained at 6 cm. This work has automated the pixel localisation required for the guided-US calibration algorithm producing a semi-automatic implementation available open-source through 3D Slicer. The automatic needle centroid localisation improves the usability of the algorithm and has the potential to decrease the fiducial localisation and target registration errors associated with the guided-US calibration method.</style></abstract></record></records></xml>