<?xml version="1.0" encoding="UTF-8"?><lom xmlns="http://ltsc.ieee.org/xsd/LOM" xmlns:lomfr="http://www.lom-fr.fr/xsd/LOMFR" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://ltsc.ieee.org/xsd/LOM http://www.lom-fr.fr/xsd/lomfrv1.0/std/lomfr.xsd">
<general>
<identifier>
<catalog>Canal-U_Ocms</catalog>
<entry>33229</entry>
</identifier>
<title><string language="fre"><![CDATA[Auditory scene analysis]]></string></title>
<language>ENG</language>
<description>
<string language="fre"><![CDATA[Part 1 : Introduction to Robot Hearing
1.1. Why do robots need to hear?      
1.2. Human-robot interaction      
1.3. Auditory scene analysis      
1.4. Audio signal processing in brief      
1.5. Audio processing in the ear      
1.6. Audio processing in the midbrain      
1.7. Audio processing in the brain]]></string></description>
<keyword><string language="fre"><![CDATA[machine learning]]></string></keyword><keyword><string language="fre"><![CDATA[robotics]]></string></keyword><keyword><string language="fre"><![CDATA[robot hearing]]></string></keyword><keyword><string language="fre"><![CDATA[human-robot interaction]]></string></keyword><keyword><string language="fre"><![CDATA[binaural hearing]]></string></keyword>
<lomfr:documentType>
<lomfr:source>LOMFRv1.0</lomfr:source>
<lomfr:value>image en mouvement</lomfr:value>
</lomfr:documentType>
</general><lifeCycle>
<contribute>
<role>
<source>LOMv1.0</source>
<value>author</value>
</role>
<entity><![CDATA[BEGIN:VCARD
VERSION:3.0
CLASS:PUBLIC
REV:2021-09-16 17:40:59
FN:Radu HORAUD
N:HORAUD;Radu;;;
URL;TYPE=work:https://team.inria.fr/perception/team-members/radu-patrice-horaud/
ROLE:author
NOTE:Radu Patrice Horaud holds a position of research director at INRIA Grenoble Rhône-Alpes. He is the founder and leader of the PERCEPTION team. Radu’s research interests cover computational vision, audio signal processing, audio-visual scene analysis, machine learning, and robotics. He has authored over 160 scientific publications. Radu has pioneered work in computer vision using range data (or depth images) and has developed a number of principles and methods at the cross-roads of computer vision and robotics. In 2006, he started to develop audio-visual fusion and recognition techniques in conjunction with human-robot interaction. Radu Horaud was the scientific coordinator of the European Marie Curie network VISIONTRAIN (2005-2009), STREP projects POP (2006-2008) and HUMAVIPS (2010-2013), and the principal investigator of a collaborative project between INRIA and Samsung’s Advanced Institute of Technology (SAIT) on computer vision algorithms for 3D television (2010-2013). In 2013 he was awarded an ERC Advanced Grant for his five year project VHIA (2014-2019). 
TZ:+0200
END:VCARD
]]></entity>
<date><dateTime>2015-03-16</dateTime></date>
</contribute>
</lifeCycle>
<metaMetadata>
<metadataSchema>LOMv1.0</metadataSchema>
<metadataSchema>LOMFRv1.0</metadataSchema>
</metaMetadata>
<technical>
<format>video/mp4</format>
<location><![CDATA[https://www.canal-u.tv/video/inria/auditory_scene_analysis.33229]]></location>
<location><![CDATA[https://streaming-canal-u.fmsh.fr/vod/media/canalu/videos/fuscia/auditory.scene.analysis_33229/c011rh.w1.s3.en.mp4]]></location>
<size>39568579</size>
<duration><duration>PT0H4M43S</duration></duration>
</technical>
<educational>
<learningResourceType>
<source>LOMv1.0</source>
<value>lecture</value>
</learningResourceType>
<context>
<source>LOMv1.0</source>
<value>master</value>
</context>
<context>
<source>LOMv1.0</source>
<value>doctorat</value>
</context>
<context>
<source>LOMv1.0</source>
<value>master</value>
</context>
</educational>
<rights>
<cost>
<source>LOMv1.0</source>
<value>no</value>
</cost>
<copyrightAndOtherRestrictions>
<source>LOMv1.0</source>
<value>no</value>
</copyrightAndOtherRestrictions>
<description>
<string language="fre"><![CDATA[Droits réservés à l'éditeur et aux auteurs. 
Licence Creative Commons BY-NC-ND : the name of the author should always be mentionned. The user may not use the material for commercial purposes. The user can exploit the work except in a commercial context and he cannot make changes in the original work.]]></string>
</description>
</rights>
<relation>
<kind>
<source>LOMv1.0</source>
<value>ispartof</value>
</kind>
<resource>
<identifier>
<catalog>URI</catalog>
<entry>https://www.canal-u.tv/producteurs/inria/cours_en_ligne/binaural_hearing_for_robots</entry>
</identifier>
<description>
<string language="fre"><![CDATA[Binaural Hearing for Robots]]></string>
</description>
</resource>
</relation>
<relation>
<kind>
<source>LOMv1.0</source>
<value>ispartof</value>
</kind>
<resource>
<identifier>
<catalog>URI</catalog>
<entry>https://www.canal-u.tv/producteurs/inria/cours_en_ligne/binaural_hearing_for_robots/1_introduction_to_robot_hearing</entry>
</identifier>
<description>
<string language="fre"><![CDATA[1: Introduction to Robot Hearing]]></string>
</description>
</resource>
</relation>
<classification>
<purpose>
<source>LOMv1.0</source>
<value>discipline</value>
</purpose>
<taxonPath>
<source>
<string language="fre"><![CDATA[Universités Numériques Thématiques 2009 http://www.universites-numeriques.fr]]></string>
</source>
<taxon>
<id/>
<entry>
<string language="fre"/>
</entry>
</taxon>
</taxonPath>
</classification>
<classification>
<purpose>
<source>LOMv1.0</source>
<value>discipline</value>
</purpose>
<taxonPath>
<source>
<string language="fre">CDD 22e éd.</string>
<string language="eng">DDC 22nd ed.</string>
</source>
<taxon>
<id>629.892</id>
<entry>
<string language="fre"><![CDATA[Robots]]></string>
</entry>
</taxon>
</taxonPath>
</classification> </lom>