<?xml version="1.0" encoding="UTF-8" ?>
<oai_dc:dc schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>3D facial merging for virtual human reconstruction</dc:title>
<dc:creator>Pagés Scasso, Rafael</dc:creator>
<dc:creator>Morán Burgos, Francisco</dc:creator>
<dc:subject>Telecomunicaciones</dc:subject>
<dc:subject>Robótica e Informática Industrial</dc:subject>
<dc:description>There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</dc:description>
<dc:publisher>E.T.S.I. Telecomunicación (UPM)</dc:publisher>
<dc:rights>https://creativecommons.org/licenses/by-nc-nd/3.0/es/</dc:rights>
<dc:date>2012</dc:date>
<dc:type>info:eu-repo/semantics/conferenceObject</dc:type>
<dc:type>Ponencia en Congreso o Jornada</dc:type>
<dc:source>3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 15/10/2012 - 17/10/2012 | Zurich, Switzerland</dc:source>
<dc:type>PeerReviewed</dc:type>
<dc:format>application/pdf</dc:format>
<dc:language>eng</dc:language>
<dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
<dc:relation>info:eu-repo/semantics/altIdentifier/doi/10.1109/3DTV.2012.6365448</dc:relation>
<dc:identifier>https://oa.upm.es/30500/</dc:identifier>
</oai_dc:dc>
<?xml version="1.0" encoding="UTF-8" ?>
<resource schemaLocation="http://datacite.org/schema/kernel-4 http://schema.datacite.org/meta/kernel-4.4/metadata.xsd">
<identifier identifierType="DOI">10.1109/3DTV.2012.6365448</identifier>
<relatedIdentifiers>
<relatedIdentifier relatedIdentifierType="URL" relationType="IsIdenticalTo">https://oa.upm.es/30500/1/INVE_MEM_2012_173560.pdf</relatedIdentifier>
</relatedIdentifiers>
<titles>
<title>3D facial merging for virtual human reconstruction</title>
</titles>
<creators>
<creator>
<creatorName nameType="Personal">Pagés Scasso, Rafael</creatorName>
<givenName>Rafael</givenName>
<familyName>Pagés Scasso</familyName>
</creator>
<creator>
<creatorName nameType="Personal">Morán Burgos, Francisco</creatorName>
<givenName>Francisco</givenName>
<familyName>Morán Burgos</familyName>
<nameIdentifier nameIdentifierScheme="ORCID" schemeURI="http://orcid.org/">0000-0003-3837-692X</nameIdentifier>
</creator>
</creators>
<contributors>
<contributor>
<contributorName nameType="Personal">none supplied</contributorName>
</contributor>
</contributors>
<editors>
<editorName nameType="Personal">none supplied</editorName>
</editors>
<descriptions>
<description descriptionType="Abstract">There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</description>
</descriptions>
<subjects>
<subject>Telecomunicaciones</subject>
<subject>Robótica e Informática Industrial</subject>
</subjects>
<publicationYear>2012</publicationYear>
<publisher>Universidad Politécnica de Madrid</publisher>
</resource>
<?xml version="1.0" encoding="UTF-8" ?>
<didl:DIDL DIDLDocumentId="https://oa.upm.es/id/eprint/30500" schemaLocation="urn:mpeg:mpeg21:2002:02-DIDL-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/did/didl.xsd urn:mpeg:mpeg21:2002:01-DII-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dii/dii.xsd urn:mpeg:mpeg21:2005:01-DIP-NS http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-21_schema_files/dip/dip.xsd">
<didl:Item>
<didl:Descriptor>
<didl:Statement mimeType="application/xml">
<dii:Identifier>https://oa.upm.es/id/eprint/30500</dii:Identifier>
</didl:Statement>
</didl:Descriptor>
<didl:Descriptor>
<didl:Statement mimeType="application/xml">
<dcterms:modified>2024-03-19T08:27:40Z</dcterms:modified>
</didl:Statement>
</didl:Descriptor>
<didl:Component>
</didl:Component>
<didl:Item>
<didl:Descriptor>
<didl:Statement mimeType="application/xml">
<dip:ObjectType>info:eu-repo/semantics/descriptiveMetadata</dip:ObjectType>
</didl:Statement>
</didl:Descriptor>
<didl:Component>
<didl:Resource mimeType="application/xml">
<oai_dc:dc schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>3D facial merging for virtual human reconstruction</dc:title>
<dc:creator>Pagés Scasso, Rafael</dc:creator>
<dc:creator>Morán Burgos, Francisco</dc:creator>
<dc:subject>Telecomunicaciones</dc:subject>
<dc:subject>Robótica e Informática Industrial</dc:subject>
<dc:description>There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</dc:description>
<dc:publisher>E.T.S.I. Telecomunicación (UPM)</dc:publisher>
<dc:rights>https://creativecommons.org/licenses/by-nc-nd/3.0/es/</dc:rights>
<dc:date>2012</dc:date>
<dc:type>info:eu-repo/semantics/conferenceObject</dc:type>
<dc:type>Ponencia en Congreso o Jornada</dc:type>
<dc:source>3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 15/10/2012 - 17/10/2012 | Zurich, Switzerland</dc:source>
<dc:type>PeerReviewed</dc:type>
<dc:format>application/pdf</dc:format>
<dc:language>eng</dc:language>
<dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
<dc:relation>info:eu-repo/semantics/altIdentifier/doi/10.1109/3DTV.2012.6365448</dc:relation>
<dc:identifier>https://oa.upm.es/30500/</dc:identifier>
</oai_dc:dc>
</didl:Resource>
</didl:Component>
</didl:Item>
<didl:Item>
<didl:Descriptor>
<didl:Statement mimeType="application/xml">
<dip:ObjectType>info:eu-repo/semantics/objectFile</dip:ObjectType>
</didl:Statement>
</didl:Descriptor>
<didl:Component>
</didl:Component>
</didl:Item>
<didl:Item>
<didl:Descriptor>
<didl:Statement mimeType="application/xml">
<dip:ObjectType>info:eu-repo/semantics/humanStartPage</dip:ObjectType>
</didl:Statement>
</didl:Descriptor>
<didl:Component>
</didl:Component>
</didl:Item>
</didl:Item>
</didl:DIDL>
<?xml version="1.0" encoding="UTF-8" ?>
<oai_dc:dc schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>3D facial merging for virtual human reconstruction</dc:title>
<dc:creator>Pagés Scasso, Rafael</dc:creator>
<dc:creator>Morán Burgos, Francisco ; orcid:0000-0003-3837-692X</dc:creator>
<dc:subject>Telecomunicaciones</dc:subject>
<dc:subject>Robótica e Informática Industrial</dc:subject>
<dc:description>There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</dc:description>
<dc:publisher>E.T.S.I. Telecomunicación (UPM)</dc:publisher>
<dc:rights>https://creativecommons.org/licenses/by-nc-nd/3.0/es/</dc:rights>
<dc:date>2012</dc:date>
<dc:type>info:eu-repo/semantics/conferenceObject</dc:type>
<dc:type>Ponencia en Congreso o Jornada</dc:type>
<dc:source>3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON) | 15/10/2012 - 17/10/2012 | Zurich, Switzerland</dc:source>
<dc:type>PeerReviewed</dc:type>
<dc:format>application/pdf</dc:format>
<dc:language>eng</dc:language>
<dc:rights>info:eu-repo/semantics/openAccess</dc:rights>
<dc:relation>info:eu-repo/semantics/altIdentifier/doi/10.1109/3DTV.2012.6365448</dc:relation>
<dc:identifier>https://oa.upm.es/30500/</dc:identifier>
</oai_dc:dc>
<?xml version="1.0" encoding="UTF-8" ?>
<mets:mets LABEL="Eprints Item" OBJID="eprint_30500" schemaLocation="http://www.loc.gov/METS/ http://www.loc.gov/standards/mets/mets.xsd http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mods-3-3.xsd">
<mets:metsHdr CREATEDATE="2024-04-12T14:24:27Z">
<mets:agent ROLE="CUSTODIAN" TYPE="ORGANIZATION">
<mets:name>Archivo Digital UPM</mets:name>
</mets:agent>
</mets:metsHdr>
<mets:dmdSec ID="DMD_eprint_30500_mods">
<mets:mdWrap MDTYPE="MODS">
<mets:xmlData>
<mods:titleInfo>
<mods:title>3D facial merging for virtual human reconstruction</mods:title>
</mods:titleInfo>
<mods:name type="personal">
<mods:namePart type="given">Rafael</mods:namePart>
<mods:namePart type="family">Pagés Scasso</mods:namePart>
<mods:role>
<mods:roleTerm type="text">author</mods:roleTerm>
</mods:role>
</mods:name>
<mods:name type="personal">
<mods:namePart type="given">Francisco</mods:namePart>
<mods:namePart type="family">Morán Burgos</mods:namePart>
<mods:role>
<mods:roleTerm type="text">author</mods:roleTerm>
</mods:role>
</mods:name>
<mods:abstract>There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</mods:abstract>
<mods:classification authority="lcc">Telecomunicaciones</mods:classification>
<mods:classification authority="lcc">Robótica e Informática Industrial</mods:classification>
<mods:originInfo>
<mods:dateIssued encoding="iso8601">2012</mods:dateIssued>
</mods:originInfo>
<mods:originInfo>
<mods:publisher>IEEE</mods:publisher>
</mods:originInfo>
<mods:genre>Ponencia en Congreso o Jornada</mods:genre>
</mets:xmlData>
</mets:mdWrap>
</mets:dmdSec>
<mets:amdSec ID="TMD_eprint_30500">
<mets:rightsMD ID="rights_eprint_30500_mods">
<mets:mdWrap MDTYPE="MODS">
<mets:xmlData>
<mods:useAndReproduction>
<p>
<strong>
<a href="mailto:archivo.digital@upm.es">archivo.digital@upm.es</a>
</strong>
</p>
<p>El autor declara que es el titular de los derechos de propiedad intelectual, objeto de la presente cesión, en relación con la obra que autoarchiva, que ésta es una obra original, y que ostenta la condición de autor de esta obra.</p>
<p>En caso de ser cotitular de tales derechos, el autor declara que cuenta con la autorización de los restantes titulares para hacer la presente cesión, y que los nombres de todos los coautores aparecen mencionados en la obra.</p>
<p>En caso de previa cesión a terceros de derechos de explotación de la obra, el autor declara que tiene la oportuna reserva o autorización de dichos titulares de derechos a los fines de esta cesión.</p>
<p>Con el fin de dar la máxima difusión a esta obra a través de internet, el autor cede a la Universidad Politécnica de Madrid, de forma gratuita y no exclusiva, por el máximo plazo legal y con ámbito universal, los derechos de reproducción, de distribución, de comunicación pública, incluido el derecho de puesta a disposición electrónica, para que pueda ser utilizada de forma libre y gratuita por todos los usuarios de internet, siempre que se cite su autoría y su uso se ajuste a las licencia Creative Commons elegida. Se entienden autorizados todos los actos necesarios para el registro de la obra, su seguridad y su conservación.</p>
<p>El autor garantiza que el compromiso que aquí adquiere no infringe ningú n derecho de propiedad industrial, intelectual, derecho al honor, intimidad, o imagen, o cualquier otro derecho de terceros.</p>
<p>El autor asume toda reclamación que pudiera ejercitarse contra la Universidad por terceros que vieran infringidos sus derechos a causa de la cesión.</p>
<p>El autor renuncia a cualquier reclamación frente a la Universidad por las formas no ajustadas a la legislación vigente en que los usuarios hagan uso de las obras.</p>
<p>
<a href="mailto:archivo.digital@upm.es">archivo.digital@upm.es</a>
</p>
<p>
<strong>Para trabajos depositados por otros que no sean su autor:</strong>
<strong>UPM</strong>
</p>
<p>Pulsar en el botón de depósitar E-print Ahora indica su aceptación de estos términos.</p>
</mods:useAndReproduction>
</mets:xmlData>
</mets:mdWrap>
</mets:rightsMD>
</mets:amdSec>
<mets:fileSec>
<mets:fileGrp USE="reference">
<mets:file ID="eprint_30500_30539_1" MIMETYPE="application/pdf" OWNERID="https://oa.upm.es/30500/1/INVE_MEM_2012_173560.pdf" SIZE="1195497">
</mets:file>
</mets:fileGrp>
</mets:fileSec>
<mets:structMap>
<mets:div ADMID="TMD_eprint_30500" DMDID="DMD_eprint_30500_mods">
</mets:div>
</mets:structMap>
</mets:mets>
<?xml version="1.0" encoding="UTF-8" ?>
<?xml version="1.0" encoding="UTF-8" ?>
<rdf:RDF>
<rdf:Description about="">
<rdfs:comment datatype="http://www.w3.org/2001/XMLSchema#string">The repository administrator has not yet configured an RDF license.</rdfs:comment>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/1/INVE_MEM_2012_173560.pdf">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">INVE_MEM_2012_173560.pdf</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/3/indexcodes.txt">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">indexcodes.txt</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/4/lightbox.jpg">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">lightbox.jpg</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/5/preview.jpg">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">preview.jpg</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/6/medium.jpg">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">medium.jpg</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/7/small.jpg">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">small.jpg</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/30500/">
<dc:format>text/html</dc:format>
<dc:title>HTML Summary of #30500 3D facial merging for virtual human reconstruction</dc:title>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/30539">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (PDF (Portable Document Format))</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/69422">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (Otro)</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/283260">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (Otro)</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/283262">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (Otro)</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/283265">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (Otro)</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/document/283266">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction (Otro)</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/eprint/30500#authors">
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/eprint/30500">
<bibo:abstract datatype="http://www.w3.org/2001/XMLSchema#string">There is an increasing need of easy and affordable technologies to automatically generate virtual 3D models from their real counterparts. In particular, 3D human reconstruction has driven the creation of many clever techniques, most of them based on the visual hull (VH) concept. Such techniques do not require expensive hardware; however, they tend to yield 3D humanoids with realistic bodies but mediocre faces, since VH cannot handle concavities. On the other hand, structured light projectors allow to capture very accurate depth data, and thus to reconstruct realistic faces, but they are too expensive to use several of them. We have developed a technique to merge a VH-based 3D mesh of a reconstructed humanoid and the depth data of its face, captured by a single structured light projector. By combining the advantages of both systems in a simple setting, we are able to reconstruct realistic 3D human models with believable faces.</bibo:abstract>
<dct:date>2012</dct:date>
<dct:title datatype="http://www.w3.org/2001/XMLSchema#string">3D facial merging for virtual human reconstruction</dct:title>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/subject/robotica">
<skos:prefLabel lang="en">Robotics and Industrial Computing</skos:prefLabel>
<skos:prefLabel lang="es">Robótica e Informática Industrial</skos:prefLabel>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/subject/telecomunicaciones">
<skos:prefLabel lang="en">Telecommunications</skos:prefLabel>
<skos:prefLabel lang="es">Telecomunicaciones</skos:prefLabel>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/event/ext-d9f110ff395fcbe77267eba1af130f81">
<dct:title datatype="http://www.w3.org/2001/XMLSchema#string">3DTV-Conference: The True Vision - Capture, Transmission and Display of 3D Video (3DTV-CON)</dct:title>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/location/ext-1059df5addd86c69941107b4ecfba289">
<rdfs:label datatype="http://www.w3.org/2001/XMLSchema#string">Zurich, Switzerland</rdfs:label>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/org/ext-af0a9a5baed87c407844a3f5db44597c">
<foaf:name datatype="http://www.w3.org/2001/XMLSchema#string">IEEE</foaf:name>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/person/ext-1fb079b70faa502f0f2772cd8ecb7706">
<foaf:familyName datatype="http://www.w3.org/2001/XMLSchema#string">Morán Burgos</foaf:familyName>
<foaf:givenName datatype="http://www.w3.org/2001/XMLSchema#string">Francisco</foaf:givenName>
<foaf:name datatype="http://www.w3.org/2001/XMLSchema#string">Francisco Morán Burgos</foaf:name>
</rdf:Description>
<rdf:Description about="https://oa.upm.es/id/person/ext-476aefcbe15e5a22ff4e31c7eaeb4e19">
<foaf:familyName datatype="http://www.w3.org/2001/XMLSchema#string">Pagés Scasso</foaf:familyName>
<foaf:givenName datatype="http://www.w3.org/2001/XMLSchema#string">Rafael</foaf:givenName>
<foaf:name datatype="http://www.w3.org/2001/XMLSchema#string">Rafael Pagés Scasso</foaf:name>
</rdf:Description>
</rdf:RDF>
<?xml version="1.0" encoding="UTF-8" ?>