We describe a comparative evaluation of different movement generation systems capable of computing articulatory trajectories from phonetic input. The articulatory trajectories here pilot the facial deformation of a 3D clone of a human female speaker. We test the adequacy of the predicted trajectories in accompanying the production of natural utterances. The performance of these predictions are compared to the ones of natural articulatory trajectories produced by the speaker and estimated by an original video-based motion capture technique. The test uses the point-light technique (Rosenblum, L.D. and Saldana, H.M., 1996; 1998).
Please log in to take part in the discussion (add own reviews or comments).
Cite this publication
More citation styles
- please select -
%0 Conference Paper
%1 Bailly2002
%A Bailly, Gérard
%A Gibert, Guillaume
%A Odisio, Matthias
%B Proceedings of the 2002 IEEE Workshop on Speech Synthesis
%C Santa Monica, CA, USA
%D 2002
%K animation animation;Humans;Natural animation;motion capture clone;articulatory clone;motion computer control;Speech;System deformation;human estimation;natural evaluation;natural evaluation;speech generation input;point-light interfaces;software language languages;Shape models;Facial performance processing;3D processing;video production;facial signal speaker speech system systems;facial technique;Cloning;Deformable technique;movement testing;Trajectory;Videos trajectories;audiovisual utterances;phonetic
%P 27-30
%R 10.1109/WSS.2002.1224365
%T Evaluation of movement generation systems using the point-light technique
%X We describe a comparative evaluation of different movement generation systems capable of computing articulatory trajectories from phonetic input. The articulatory trajectories here pilot the facial deformation of a 3D clone of a human female speaker. We test the adequacy of the predicted trajectories in accompanying the production of natural utterances. The performance of these predictions are compared to the ones of natural articulatory trajectories produced by the speaker and estimated by an original video-based motion capture technique. The test uses the point-light technique (Rosenblum, L.D. and Saldana, H.M., 1996; 1998).
@inproceedings{Bailly2002,
abstract = {We describe a comparative evaluation of different movement generation systems capable of computing articulatory trajectories from phonetic input. The articulatory trajectories here pilot the facial deformation of a 3D clone of a human female speaker. We test the adequacy of the predicted trajectories in accompanying the production of natural utterances. The performance of these predictions are compared to the ones of natural articulatory trajectories produced by the speaker and estimated by an original video-based motion capture technique. The test uses the point-light technique (Rosenblum, L.D. and Saldana, H.M., 1996; 1998).},
added-at = {2021-02-01T10:51:23.000+0100},
address = {Santa Monica, CA, USA},
author = {Bailly, Gérard and Gibert, Guillaume and Odisio, Matthias},
biburl = {https://www.bibsonomy.org/bibtex/24bde0e45313fa51acbcd3cf0903dbf80/m-toman},
booktitle = {Proceedings of the 2002 IEEE Workshop on Speech Synthesis},
doi = {10.1109/WSS.2002.1224365},
file = {:pdfs/bailly_ssw_2002.pdf:PDF},
interhash = {2fbc5132ce172fe36429224db2135a90},
intrahash = {4bde0e45313fa51acbcd3cf0903dbf80},
keywords = {animation animation;Humans;Natural animation;motion capture clone;articulatory clone;motion computer control;Speech;System deformation;human estimation;natural evaluation;natural evaluation;speech generation input;point-light interfaces;software language languages;Shape models;Facial performance processing;3D processing;video production;facial signal speaker speech system systems;facial technique;Cloning;Deformable technique;movement testing;Trajectory;Videos trajectories;audiovisual utterances;phonetic},
month = sep,
owner = {schabus},
pages = {27-30},
timestamp = {2021-02-01T10:51:23.000+0100},
title = {Evaluation of movement generation systems using the point-light technique},
year = 2002
}