DISCLAIMER: To ensure timely dissemination of our research work we provide preprints (PDFs) of most of our publications together with corresponding BibTex files. The copyright for the publications are retained by the members of our lab or by other copyright holders. All persons copying any information are expected to adhere to the terms and constraints invoked by the copyright. Our publications are not to be reposted without the explicit permission of the copyright holder.
2016 |
Jaka Kravanja; Mario Žganec; Jerneja Žganec-Gros; Simon Dobrišek; Vitomir Štruc Robust Depth Image Acquisition Using Modulated Pattern Projection and Probabilistic Graphical Models Journal Article In: Sensors, 16 (10), pp. 1740, 2016. @article{kravanja2016robust, title = {Robust Depth Image Acquisition Using Modulated Pattern Projection and Probabilistic Graphical Models}, author = { Jaka Kravanja and Mario \v{Z}ganec and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/11/sensors-16-01740-1.pdf}, doi = {10.3390/s16101740}, year = {2016}, date = {2016-10-20}, journal = {Sensors}, volume = {16}, number = {10}, pages = {1740}, publisher = {Multidisciplinary Digital Publishing Institute}, abstract = {Depth image acquisition with structured light approaches in outdoor environments is a challenging problem due to external factors, such as ambient sunlight, which commonly affect the acquisition procedure. This paper presents a novel structured light sensor designed specifically for operation in outdoor environments. The sensor exploits a modulated sequence of structured light projected onto the target scene to counteract environmental factors and estimate a spatial distortion map in a robust manner. The correspondence between the projected pattern and the estimated distortion map is then established using a probabilistic framework based on graphical models. Finally, the depth image of the target scene is reconstructed using a number of reference frames recorded during the calibration process. We evaluate the proposed sensor on experimental data in indoor and outdoor environments and present comparative experiments with other existing methods, as well as commercial sensors.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Depth image acquisition with structured light approaches in outdoor environments is a challenging problem due to external factors, such as ambient sunlight, which commonly affect the acquisition procedure. This paper presents a novel structured light sensor designed specifically for operation in outdoor environments. The sensor exploits a modulated sequence of structured light projected onto the target scene to counteract environmental factors and estimate a spatial distortion map in a robust manner. The correspondence between the projected pattern and the estimated distortion map is then established using a probabilistic framework based on graphical models. Finally, the depth image of the target scene is reconstructed using a number of reference frames recorded during the calibration process. We evaluate the proposed sensor on experimental data in indoor and outdoor environments and present comparative experiments with other existing methods, as well as commercial sensors. |
Walter Scheirer; Patrick Flynn; Changxing Ding; Guodong Guo; Vitomir Štruc; Mohamad Al Jazaery; Simon Dobrišek; Klemen Grm; Dacheng Tao; Yu Zhu; Joel Brogan; Sandipan Banerjee; Aparna Bharati; Brandon Richard Webster Report on the BTAS 2016 Video Person Recognition Evaluation Conference Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS), IEEE, 2016. @conference{BTAS2016, title = {Report on the BTAS 2016 Video Person Recognition Evaluation}, author = {Walter Scheirer and Patrick Flynn and Changxing Ding and Guodong Guo and Vitomir \v{S}truc and Mohamad Al Jazaery and Simon Dobri\v{s}ek and Klemen Grm and Dacheng Tao and Yu Zhu and Joel Brogan and Sandipan Banerjee and Aparna Bharati and Brandon Richard Webster}, year = {2016}, date = {2016-10-05}, booktitle = {Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS)}, publisher = {IEEE}, abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 \textemdash a remarkable advancement in performance from the competition held at FG 2015.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 — a remarkable advancement in performance from the competition held at FG 2015. |
Janez Križaj; Simon Dobrišek; France Mihelič; Vitomir Štruc Facial Landmark Localization from 3D Images Inproceedings In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), Portorož, Slovenia, 2016. @inproceedings{ERK2016Janez, title = {Facial Landmark Localization from 3D Images}, author = {Janez Kri\v{z}aj and Simon Dobri\v{s}ek and France Miheli\v{c} and Vitomir \v{S}truc}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, abstract = {A novel method for automatic facial landmark localization is presented. The method builds on the supervised descent framework, which was shown to successfully localize landmarks in the presence of large expression variations and mild occlusions, but struggles when localizing landmarks on faces with large pose variations. We propose an extension of the supervised descent framework which trains multiple descent maps and results in increased robustness to pose variations. The performance of the proposed method is demonstrated on the Bosphorus database for the problem of facial landmark localization from 3D data. Our experimental results show that the proposed method exhibits increased robustness to pose variations, while retaining high performance in the case of expression and occlusion variations.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } A novel method for automatic facial landmark localization is presented. The method builds on the supervised descent framework, which was shown to successfully localize landmarks in the presence of large expression variations and mild occlusions, but struggles when localizing landmarks on faces with large pose variations. We propose an extension of the supervised descent framework which trains multiple descent maps and results in increased robustness to pose variations. The performance of the proposed method is demonstrated on the Bosphorus database for the problem of facial landmark localization from 3D data. Our experimental results show that the proposed method exhibits increased robustness to pose variations, while retaining high performance in the case of expression and occlusion variations. |
Sebastjan Fabijan; Vitomir Štruc Vpliv registracije obraznih področij na učinkovitost samodejnega razpoznavanja obrazov: študija z OpenBR Inproceedings In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), 2016. @inproceedings{ERK2016_Seba, title = {Vpliv registracije obraznih podro\v{c}ij na u\v{c}inkovitost samodejnega razpoznavanja obrazov: \v{s}tudija z OpenBR}, author = {Sebastjan Fabijan and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/erk_2016_08_22.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, abstract = {Razpoznavanje obrazov je v zadnjih letih postalo eno najuspe\v{s}nej\v{s}ih podro\v{c}ij samodejne, ra\v{c}unalni\v{s}ko podprte analize slik, ki se lahko pohvali z razli\v{c}nimi primeri upor-abe v praksi. Enega klju\v{c}nih korakav za uspe\v{s}no razpoznavanje predstavlja poravnava obrazov na slikah. S poravnavo posku\v{s}amo zagotoviti neodvisnost razpozn-av-an-ja od sprememb zornih kotov pri zajemu slike, ki v slikovne podatke vna\v{s}ajo visoko stopnjo variabilnosti. V prispevku predstavimo tri postopke poravnavanja obrazov (iz literature) in prou\v{c}imo njihov vpliv na uspe\v{s}nost razpoznavanja s postopki, udejanjenimi v odprtokodnem programskem ogrodju Open Source Biometric Recognition (OpenBR). Vse poizkuse izvedemo na podatkovni zbirki Labeled Faces in the Wild (LFW).}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Razpoznavanje obrazov je v zadnjih letih postalo eno najuspešnejših področij samodejne, računalniško podprte analize slik, ki se lahko pohvali z različnimi primeri upor-abe v praksi. Enega ključnih korakav za uspešno razpoznavanje predstavlja poravnava obrazov na slikah. S poravnavo poskušamo zagotoviti neodvisnost razpozn-av-an-ja od sprememb zornih kotov pri zajemu slike, ki v slikovne podatke vnašajo visoko stopnjo variabilnosti. V prispevku predstavimo tri postopke poravnavanja obrazov (iz literature) in proučimo njihov vpliv na uspešnost razpoznavanja s postopki, udejanjenimi v odprtokodnem programskem ogrodju Open Source Biometric Recognition (OpenBR). Vse poizkuse izvedemo na podatkovni zbirki Labeled Faces in the Wild (LFW). |
Žiga Stržinar; Klemen Grm; Vitomir Štruc Učenje podobnosti v globokih nevronskih omrežjih za razpoznavanje obrazov Inproceedings In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), Portorož, Slovenia, 2016. @inproceedings{ERK2016_sebastjan, title = {U\v{c}enje podobnosti v globokih nevronskih omre\v{z}jih za razpoznavanje obrazov}, author = {\v{Z}iga Str\v{z}inar and Klemen Grm and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/erk_ziga_Vziga.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, abstract = {U\v{c}enje podobnosti med pari vhodnih slik predstavlja enega najpopularnej\v{s}ih pristopov k razpoznavanju na podro\v{c}ju globokega u\v{c}enja. Pri tem pristopu globoko nevronsko omre\v{z}je na vhodu sprejme par slik (obrazov) in na izhodu vrne mero podobnosti med vhodnima slikama, ki jo je mo\v{c} uporabiti za razpoznavanje. Izra\v{c}un podobnosti je pri tem lahko v celoti udejanjen z globokim omre\v{z}jem, lahko pa se omre\v{z}je uporabi zgolj za izra\v{c}un predstavitve vhodnega para slik, preslikava iz izra\v{c}unane predstavitve v mero podobnosti pa se izvede z drugim, potencialno primernej\v{s}im modelom. V tem prispevku preizkusimo 5 razli\v{c}nih modelov za izvedbo preslikave med izra\v{c}unano predstavitvijo in mero podobnosti, pri \v{c}emer za poizkuse uporabimo lastno nevronsko omre\v{z}je. Rezultati na\v{s}ih eksperimentov na problemu razpoznavanja obrazov ka\v{z}ejo na pomembnost izbire primernega modela, saj so razlike med uspe\v{s}nostjo razpoznavanje od modela do modela precej\v{s}nje.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Učenje podobnosti med pari vhodnih slik predstavlja enega najpopularnejših pristopov k razpoznavanju na področju globokega učenja. Pri tem pristopu globoko nevronsko omrežje na vhodu sprejme par slik (obrazov) in na izhodu vrne mero podobnosti med vhodnima slikama, ki jo je moč uporabiti za razpoznavanje. Izračun podobnosti je pri tem lahko v celoti udejanjen z globokim omrežjem, lahko pa se omrežje uporabi zgolj za izračun predstavitve vhodnega para slik, preslikava iz izračunane predstavitve v mero podobnosti pa se izvede z drugim, potencialno primernejšim modelom. V tem prispevku preizkusimo 5 različnih modelov za izvedbo preslikave med izračunano predstavitvijo in mero podobnosti, pri čemer za poizkuse uporabimo lastno nevronsko omrežje. Rezultati naših eksperimentov na problemu razpoznavanja obrazov kažejo na pomembnost izbire primernega modela, saj so razlike med uspešnostjo razpoznavanje od modela do modela precejšnje. |
Simon Dobrišek; David Čefarin; Vitomir Štruc; France Mihelič Assessment of the Google Speech Application Programming Interface for Automatic Slovenian Speech Recognition Inproceedings In: Jezikovne Tehnologije in Digitalna Humanistika, 2016. @inproceedings{SJDT, title = {Assessment of the Google Speech Application Programming Interface for Automatic Slovenian Speech Recognition}, author = {Simon Dobri\v{s}ek and David \v{C}efarin and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/jtdh16-ulfe-luks-sd-final-pdfa.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Jezikovne Tehnologije in Digitalna Humanistika}, abstract = {Automatic speech recognizers are slowly maturing into technologies that enable humans to communicate more naturally and effectively with a variety of smart devices and information-communication systems. Large global companies such as Google, Microsoft, Apple, IBM and Baidu compete in developing the most reliable speech recognizers, supporting as many of the main world languages as possible. Due to the relatively small number of speakers, the support for the Slovenian spoken language is lagging behind, and among the major global companies only Google has recently supported our spoken language. The paper presents the results of our independent assessment of the Google speech-application programming interface for automatic Slovenian speech recognition. For the experiments, we used speech databases that are otherwise used for the development and assessment of Slovenian speech recognizers.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Automatic speech recognizers are slowly maturing into technologies that enable humans to communicate more naturally and effectively with a variety of smart devices and information-communication systems. Large global companies such as Google, Microsoft, Apple, IBM and Baidu compete in developing the most reliable speech recognizers, supporting as many of the main world languages as possible. Due to the relatively small number of speakers, the support for the Slovenian spoken language is lagging behind, and among the major global companies only Google has recently supported our spoken language. The paper presents the results of our independent assessment of the Google speech-application programming interface for automatic Slovenian speech recognition. For the experiments, we used speech databases that are otherwise used for the development and assessment of Slovenian speech recognizers. |
Metod Ribič; Žiga Emeršič; Vitomir Štruc; Peter Peer Influence of alignment on ear recognition : case study on AWE Dataset Inproceedings In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), pp. 131-134, Portorož, Slovenia, 2016. @inproceedings{RibicERK2016, title = {Influence of alignment on ear recognition : case study on AWE Dataset}, author = {Metod Ribi\v{c} and \v{Z}iga Emer\v{s}i\v{c} and Vitomir \v{S}truc and Peter Peer }, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Influence_of_Alignment_on_Ear_Recognitio.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, pages = {131-134}, address = {Portoro\v{z}, Slovenia}, abstract = {Ear as a biometric modality presents a viable source for automatic human recognition. In recent years local description methods have been gaining on popularity due to their invariance to illumination and occlusion. However, these methods require that images are well aligned and preprocessed as good as possible. This causes one of the greatest challenges of ear recognition: sensitivity to pose variations. Recently, we presented Annotated Web Ears dataset that opens new challenges in ear recognition. In this paper we test the influence of alignment on recognition performance and prove that even with the alignment the database is still very challenging, even-though the recognition rate is improved due to alignment. We also prove that more sophisticated alignment methods are needed to address the AWE dataset efficiently}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Ear as a biometric modality presents a viable source for automatic human recognition. In recent years local description methods have been gaining on popularity due to their invariance to illumination and occlusion. However, these methods require that images are well aligned and preprocessed as good as possible. This causes one of the greatest challenges of ear recognition: sensitivity to pose variations. Recently, we presented Annotated Web Ears dataset that opens new challenges in ear recognition. In this paper we test the influence of alignment on recognition performance and prove that even with the alignment the database is still very challenging, even-though the recognition rate is improved due to alignment. We also prove that more sophisticated alignment methods are needed to address the AWE dataset efficiently |
Jaka Kravanja; Mario Žganec; Jerneja Žganec-Gros; Simon Dobrišek; Vitomir Štruc Exploiting Spatio-Temporal Information for Light-Plane Labeling in Depth-Image Sensors Using Probabilistic Graphical Models Journal Article In: Informatica, 27 (1), pp. 67–84, 2016. @article{kravanja2016exploiting, title = {Exploiting Spatio-Temporal Information for Light-Plane Labeling in Depth-Image Sensors Using Probabilistic Graphical Models}, author = { Jaka Kravanja and Mario \v{Z}ganec and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/11/jaka_informatica_camera.pdf}, year = {2016}, date = {2016-03-30}, journal = {Informatica}, volume = {27}, number = {1}, pages = {67--84}, publisher = {Vilnius University Institute of Mathematics and Informatics}, abstract = {This paper proposes a novel approach to light plane labeling in depth-image sensors relying on “uncoded” structured light. The proposed approach adopts probabilistic graphical models (PGMs) to solve the correspondence problem between the projected and the detected light patterns. The procedure for solving the correspondence problem is designed to take the spatial relations between the parts of the projected pattern and prior knowledge about the structure of the pattern into account, but it also exploits temporal information to achieve reliable light-plane labeling. The procedure is assessed on a database of light patterns detected with a specially developed imaging sensor that, unlike most existing solutions on the market, was shown to work reliably in outdoor environments as well as in the presence of other identical (active) sensors directed at the same scene. The results of our experiments show that the proposed approach is able to reliably solve the correspondence problem and assign light-plane labels to the detected pattern with a high accuracy, even when large spatial discontinuities are present in the observed scene.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper proposes a novel approach to light plane labeling in depth-image sensors relying on “uncoded” structured light. The proposed approach adopts probabilistic graphical models (PGMs) to solve the correspondence problem between the projected and the detected light patterns. The procedure for solving the correspondence problem is designed to take the spatial relations between the parts of the projected pattern and prior knowledge about the structure of the pattern into account, but it also exploits temporal information to achieve reliable light-plane labeling. The procedure is assessed on a database of light patterns detected with a specially developed imaging sensor that, unlike most existing solutions on the market, was shown to work reliably in outdoor environments as well as in the presence of other identical (active) sensors directed at the same scene. The results of our experiments show that the proposed approach is able to reliably solve the correspondence problem and assign light-plane labels to the detected pattern with a high accuracy, even when large spatial discontinuities are present in the observed scene. |
Klemen Grm; Simon Dobrišek; Vitomir Štruc Deep pair-wise similarity learning for face recognition Conference 4th International Workshop on Biometrics and Forensics (IWBF), IEEE 2016. @conference{grm2016deep, title = {Deep pair-wise similarity learning for face recognition}, author = { Klemen Grm and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IWBF_2016.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {4th International Workshop on Biometrics and Forensics (IWBF)}, pages = {1--6}, organization = {IEEE}, abstract = {Recent advances in deep learning made it possible to build deep hierarchical models capable of delivering state-of-the-art performance in various vision tasks, such as object recognition, detection or tracking. For recognition tasks the most common approach when using deep models is to learn object representations (or features) directly from raw image-input and then feed the learned features to a suitable classifier. Deep models used in this pipeline are typically heavily parameterized and require enormous amounts of training data to deliver competitive recognition performance. Despite the use of data augmentation techniques, many application domains, predefined experimental protocols or specifics of the recognition problem limit the amount of available training data and make training an effective deep hierarchical model a difficult task. In this paper, we present a novel, deep pair-wise similarity learning (DPSL) strategy for deep models, developed specifically to overcome the problem of insufficient training data, and demonstrate its usage on the task of face recognition. Unlike existing (deep) learning strategies, DPSL operates on image-pairs and tries to learn pair-wise image similarities that can be used for recognition purposes directly instead of feature representations that need to be fed to appropriate classification techniques, as with traditional deep learning pipelines. Since our DPSL strategy assumes an image pair as the input to the learning procedure, the amount of training data available to train deep models is quadratic in the number of available training images, which is of paramount importance for models with a large number of parameters. We demonstrate the efficacy of the proposed learning strategy by developing a deep model for pose-invariant face recognition, called Pose-Invariant Similarity Index (PISI), and presenting comparative experimental results on the FERET an IJB-A datasets.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Recent advances in deep learning made it possible to build deep hierarchical models capable of delivering state-of-the-art performance in various vision tasks, such as object recognition, detection or tracking. For recognition tasks the most common approach when using deep models is to learn object representations (or features) directly from raw image-input and then feed the learned features to a suitable classifier. Deep models used in this pipeline are typically heavily parameterized and require enormous amounts of training data to deliver competitive recognition performance. Despite the use of data augmentation techniques, many application domains, predefined experimental protocols or specifics of the recognition problem limit the amount of available training data and make training an effective deep hierarchical model a difficult task. In this paper, we present a novel, deep pair-wise similarity learning (DPSL) strategy for deep models, developed specifically to overcome the problem of insufficient training data, and demonstrate its usage on the task of face recognition. Unlike existing (deep) learning strategies, DPSL operates on image-pairs and tries to learn pair-wise image similarities that can be used for recognition purposes directly instead of feature representations that need to be fed to appropriate classification techniques, as with traditional deep learning pipelines. Since our DPSL strategy assumes an image pair as the input to the learning procedure, the amount of training data available to train deep models is quadratic in the number of available training images, which is of paramount importance for models with a large number of parameters. We demonstrate the efficacy of the proposed learning strategy by developing a deep model for pose-invariant face recognition, called Pose-Invariant Similarity Index (PISI), and presenting comparative experimental results on the FERET an IJB-A datasets. |
2015 |
Klemen Grm; Simon Dobrišek; Vitomir Štruc The pose-invariant similarity index for face recognition Incollection In: Proceedings of the Electrotechnical and Computer Science Conference (ERK), Portorož, Slovenia, 2015. @incollection{ERK2015Klemen, title = {The pose-invariant similarity index for face recognition}, author = {Klemen Grm and Simon Dobri\v{s}ek and Vitomir \v{S}truc }, year = {2015}, date = {2015-04-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, keywords = {}, pubstate = {published}, tppubtype = {incollection} } |
Vitomir Štruc; Janez Križaj; Simon Dobrišek Modest face recognition Conference Proceedings of the International Workshop on Biometrics and Forensics (IWBF), IEEE, 2015. @conference{struc2015modest, title = {Modest face recognition}, author = { Vitomir \v{S}truc and Janez Kri\v{z}aj and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IWBF2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)}, pages = {1--6}, publisher = {IEEE}, abstract = {The facial imagery usually at the disposal for forensics investigations is commonly of a poor quality due to the unconstrained settings in which it was acquired. The captured faces are typically non-frontal, partially occluded and of a low resolution, which makes the recognition task extremely difficult. In this paper we try to address this problem by presenting a novel framework for face recognition that combines diverse features sets (Gabor features, local binary patterns, local phase quantization features and pixel intensities), probabilistic linear discriminant analysis (PLDA) and data fusion based on linear logistic regression. With the proposed framework a matching score for the given pair of probe and target images is produced by applying PLDA on each of the four feature sets independently - producing a (partial) matching score for each of the PLDA-based feature vectors - and then combining the partial matching results at the score level to generate a single matching score for recognition. We make two main contributions in the paper: i) we introduce a novel framework for face recognition that relies on probabilistic MOdels of Diverse fEature SeTs (MODEST) to facilitate the recognition process and ii) benchmark it against the existing state-of-the-art. We demonstrate the feasibility of our MODEST framework on the FRGCv2 and PaSC databases and present comparative results with the state-of-the-art recognition techniques, which demonstrate the efficacy of our framework.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The facial imagery usually at the disposal for forensics investigations is commonly of a poor quality due to the unconstrained settings in which it was acquired. The captured faces are typically non-frontal, partially occluded and of a low resolution, which makes the recognition task extremely difficult. In this paper we try to address this problem by presenting a novel framework for face recognition that combines diverse features sets (Gabor features, local binary patterns, local phase quantization features and pixel intensities), probabilistic linear discriminant analysis (PLDA) and data fusion based on linear logistic regression. With the proposed framework a matching score for the given pair of probe and target images is produced by applying PLDA on each of the four feature sets independently - producing a (partial) matching score for each of the PLDA-based feature vectors - and then combining the partial matching results at the score level to generate a single matching score for recognition. We make two main contributions in the paper: i) we introduce a novel framework for face recognition that relies on probabilistic MOdels of Diverse fEature SeTs (MODEST) to facilitate the recognition process and ii) benchmark it against the existing state-of-the-art. We demonstrate the feasibility of our MODEST framework on the FRGCv2 and PaSC databases and present comparative results with the state-of-the-art recognition techniques, which demonstrate the efficacy of our framework. |
Ross Beveridge; Hao Zhang; Bruce A Draper; Patrick J Flynn; Zhenhua Feng; Patrik Huber; Josef Kittler; Zhiwu Huang; Shaoxin Li; Yan Li; Vitomir Štruc; Janez Križaj; others Report on the FG 2015 video person recognition evaluation Conference 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG), 1 , IEEE 2015. @conference{beveridge2015report, title = {Report on the FG 2015 video person recognition evaluation}, author = {Ross Beveridge and Hao Zhang and Bruce A Draper and Patrick J Flynn and Zhenhua Feng and Patrik Huber and Josef Kittler and Zhiwu Huang and Shaoxin Li and Yan Li and Vitomir \v{S}truc and Janez Kri\v{z}aj and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/fg2015videoEvalPreprint.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG)}, volume = {1}, pages = {1--8}, organization = {IEEE}, abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 11th IEEE International Conference on Automatic Face and Gesture Recognition. Two experiments required algorithms to recognize people in videos from the Point-and-Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. Five groups from around the world participated in the evaluation. The video handheld experiment was included in the International Joint Conference on Biometrics (IJCB) 2014 Handheld Video Face and Person Recognition Competition. The top verification rate from this evaluation is double that of the top performer in the IJCB competition. Analysis shows that the factor most effecting algorithm performance is the combination of location and action: where the video was acquired and what the person was doing.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } This report presents results from the Video Person Recognition Evaluation held in conjunction with the 11th IEEE International Conference on Automatic Face and Gesture Recognition. Two experiments required algorithms to recognize people in videos from the Point-and-Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. Five groups from around the world participated in the evaluation. The video handheld experiment was included in the International Joint Conference on Biometrics (IJCB) 2014 Handheld Video Face and Person Recognition Competition. The top verification rate from this evaluation is double that of the top performer in the IJCB competition. Analysis shows that the factor most effecting algorithm performance is the combination of location and action: where the video was acquired and what the person was doing. |
Tadej Justin; Vitomir Štruc; Simon Dobrišek; Boštjan Vesnicer; Ivo Ipšić; France Mihelič Speaker de-identification using diphone recognition and speech synthesis Conference 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): DeID 2015, 4 , IEEE 2015. @conference{justin2015speaker, title = {Speaker de-identification using diphone recognition and speech synthesis}, author = { Tadej Justin and Vitomir \v{S}truc and Simon Dobri\v{s}ek and Bo\v{s}tjan Vesnicer and Ivo Ip\v{s}i\'{c} and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Deid2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): DeID 2015}, volume = {4}, pages = {1--7}, organization = {IEEE}, abstract = {The paper addresses the problem of speaker (or voice) de-identification by presenting a novel approach for concealing the identity of speakers in their speech. The proposed technique first recognizes the input speech with a diphone recognition system and then transforms the obtained phonetic transcription into the speech of another speaker with a speech synthesis system. Due to the fact that a Diphone RecOgnition step and a sPeech SYnthesis step are used during the deidentification, we refer to the developed technique as DROPSY. With this approach the acoustical models of the recognition and synthesis modules are completely independent from each other, which ensures the highest level of input speaker deidentification. The proposed DROPSY-based de-identification approach is language dependent, text independent and capable of running in real-time due to the relatively simple computing methods used. When designing speaker de-identification technology two requirements are typically imposed on the deidentification techniques: i) it should not be possible to establish the identity of the speakers based on the de-identified speech, and ii) the processed speech should still sound natural and be intelligible. This paper, therefore, implements the proposed DROPSY-based approach with two different speech synthesis techniques (i.e, with the HMM-based and the diphone TDPSOLA- based technique). The obtained de-identified speech is evaluated for intelligibility and evaluated in speaker verification experiments with a state-of-the-art (i-vector/PLDA) speaker recognition system. The comparison of both speech synthesis modules integrated in the proposed method reveals that both can efficiently de-identify the input speakers while still producing intelligible speech.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper addresses the problem of speaker (or voice) de-identification by presenting a novel approach for concealing the identity of speakers in their speech. The proposed technique first recognizes the input speech with a diphone recognition system and then transforms the obtained phonetic transcription into the speech of another speaker with a speech synthesis system. Due to the fact that a Diphone RecOgnition step and a sPeech SYnthesis step are used during the deidentification, we refer to the developed technique as DROPSY. With this approach the acoustical models of the recognition and synthesis modules are completely independent from each other, which ensures the highest level of input speaker deidentification. The proposed DROPSY-based de-identification approach is language dependent, text independent and capable of running in real-time due to the relatively simple computing methods used. When designing speaker de-identification technology two requirements are typically imposed on the deidentification techniques: i) it should not be possible to establish the identity of the speakers based on the de-identified speech, and ii) the processed speech should still sound natural and be intelligible. This paper, therefore, implements the proposed DROPSY-based approach with two different speech synthesis techniques (i.e, with the HMM-based and the diphone TDPSOLA- based technique). The obtained de-identified speech is evaluated for intelligibility and evaluated in speaker verification experiments with a state-of-the-art (i-vector/PLDA) speaker recognition system. The comparison of both speech synthesis modules integrated in the proposed method reveals that both can efficiently de-identify the input speakers while still producing intelligible speech. |
Simon Dobrišek; Vitomir Štruc; Janez Križaj; France Mihelič Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier Conference 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): BWild 2015, 2 , IEEE 2015. @conference{dobrivsek2015face, title = {Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier}, author = { Simon Dobri\v{s}ek and Vitomir \v{S}truc and Janez Kri\v{z}aj and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Bwild2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): BWild 2015}, volume = {2}, pages = {1--6}, organization = {IEEE}, abstract = {The paper addresses the problem of face recognition in the wild. It introduces a novel approach to unconstrained face recognition that exploits Gabor magnitude features and a simplified version of the probabilistic linear discriminant analysis (PLDA). The novel approach, named Probabilistic Gabor-Fisher Classifier (PGFC), first extracts a vector of Gabor magnitude features from the given input image using a battery of Gabor filters, then reduces the dimensionality of the extracted feature vector by projecting it into a low-dimensional subspace and finally produces a representation suitable for identity inference by applying PLDA to the projected feature vector. The proposed approach extends the popular Gabor-Fisher Classifier (GFC) to a probabilistic setting and thus improves on the generalization capabilities of the GFC method. The PGFC technique is assessed in face verification experiments on the Point and Shoot Face Recognition Challenge (PaSC) database, which features real-world videos of subjects performing everyday tasks. Experimental results on this challenging database show the feasibility of the proposed approach, which improves on the best results on this database reported in the literature by the time of writing.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper addresses the problem of face recognition in the wild. It introduces a novel approach to unconstrained face recognition that exploits Gabor magnitude features and a simplified version of the probabilistic linear discriminant analysis (PLDA). The novel approach, named Probabilistic Gabor-Fisher Classifier (PGFC), first extracts a vector of Gabor magnitude features from the given input image using a battery of Gabor filters, then reduces the dimensionality of the extracted feature vector by projecting it into a low-dimensional subspace and finally produces a representation suitable for identity inference by applying PLDA to the projected feature vector. The proposed approach extends the popular Gabor-Fisher Classifier (GFC) to a probabilistic setting and thus improves on the generalization capabilities of the GFC method. The PGFC technique is assessed in face verification experiments on the Point and Shoot Face Recognition Challenge (PaSC) database, which features real-world videos of subjects performing everyday tasks. Experimental results on this challenging database show the feasibility of the proposed approach, which improves on the best results on this database reported in the literature by the time of writing. |
Tadej Justin; Vitomir Štruc; Janez Žibert; France Mihelič Development and Evaluation of the Emotional Slovenian Speech Database-EmoLUKS Conference Proceedings of the International Conference on Text, Speech, and Dialogue (TSD), Springer 2015. @conference{justin2015development, title = {Development and Evaluation of the Emotional Slovenian Speech Database-EmoLUKS}, author = { Tadej Justin and Vitomir \v{S}truc and Janez \v{Z}ibert and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/tsd2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the International Conference on Text, Speech, and Dialogue (TSD)}, pages = {351--359}, organization = {Springer}, abstract = {This paper describes a speech database built from 17 Slovenian radio dramas. The dramas were obtained from the national radio-and-television station (RTV Slovenia) and were given at the universities disposal with an academic license for processing and annotating the audio material. The utterances of one male and one female speaker were transcribed, segmented and then annotated with emotional states of the speakers. The annotation of the emotional states was conducted in two stages with our own web-based application for crowd sourcing. The final (emotional) speech database consists of 1385 recordings of one male (975 recordings) and one female (410 recordings) speaker and contains labeled emotional speech with a total duration of around 1 hour and 15 minutes. The paper presents the two-stage annotation process used to label the data and demonstrates the usefulness of the employed annotation methodology. Baseline emotion recognition experiments are also presented. The reported results are presented with the un-weighted as well as weighted average recalls and precisions for 2-class and 7-class recognition experiments.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } This paper describes a speech database built from 17 Slovenian radio dramas. The dramas were obtained from the national radio-and-television station (RTV Slovenia) and were given at the universities disposal with an academic license for processing and annotating the audio material. The utterances of one male and one female speaker were transcribed, segmented and then annotated with emotional states of the speakers. The annotation of the emotional states was conducted in two stages with our own web-based application for crowd sourcing. The final (emotional) speech database consists of 1385 recordings of one male (975 recordings) and one female (410 recordings) speaker and contains labeled emotional speech with a total duration of around 1 hour and 15 minutes. The paper presents the two-stage annotation process used to label the data and demonstrates the usefulness of the employed annotation methodology. Baseline emotion recognition experiments are also presented. The reported results are presented with the un-weighted as well as weighted average recalls and precisions for 2-class and 7-class recognition experiments. |
Necati Cihan Camgoz; Vitomir Štruc; Berk Gokberk; Lale Akarun; Ahmet Alp Kindiroglu Facial Landmark Localization in Depth Images using Supervised Ridge Descent Conference Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW): Chaa Learn, 2015. @conference{cihan2015facial, title = {Facial Landmark Localization in Depth Images using Supervised Ridge Descent}, author = { Necati Cihan Camgoz and Vitomir \v{S}truc and Berk Gokberk and Lale Akarun and Ahmet Alp Kindiroglu}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Camgoz_Facial_Landmark_Localization_ICCV_2015_paper.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW): Chaa Learn}, pages = {136--141}, abstract = {Supervised Descent Method (SDM) has proven successful in many computer vision applications such as face alignment, tracking and camera calibration. Recent studies which used SDM, achieved state of the-art performance on facial landmark localization in depth images [4]. In this study, we propose to use ridge regression instead of least squares regression for learning the SDM, and to change feature sizes in each iteration, effectively turning the landmark search into a coarse to fine process. We apply the proposed method to facial landmark localization on the Bosphorus 3D Face Database; using frontal depth images with no occlusion. Experimental results confirm that both ridge regression and using adaptive feature sizes improve the localization accuracy considerably}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Supervised Descent Method (SDM) has proven successful in many computer vision applications such as face alignment, tracking and camera calibration. Recent studies which used SDM, achieved state of the-art performance on facial landmark localization in depth images [4]. In this study, we propose to use ridge regression instead of least squares regression for learning the SDM, and to change feature sizes in each iteration, effectively turning the landmark search into a coarse to fine process. We apply the proposed method to facial landmark localization on the Bosphorus 3D Face Database; using frontal depth images with no occlusion. Experimental results confirm that both ridge regression and using adaptive feature sizes improve the localization accuracy considerably |
2014 |
Peter Peer; Žiga Emeršič; Jernej Bule; Jerneja Žganec-Gros; Vitomir Štruc Strategies for exploiting independent cloud implementations of biometric experts in multibiometric scenarios Journal Article In: Mathematical problems in engineering, 2014 , 2014. @article{peer2014strategies, title = {Strategies for exploiting independent cloud implementations of biometric experts in multibiometric scenarios}, author = { Peter Peer and \v{Z}iga Emer\v{s}i\v{c} and Jernej Bule and Jerneja \v{Z}ganec-Gros and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/585139-1.pdf}, doi = {http://dx.doi.org/10.1155/2014/585139}, year = {2014}, date = {2014-01-01}, journal = {Mathematical problems in engineering}, volume = {2014}, publisher = {Hindawi Publishing Corporation}, abstract = {Cloud computing represents one of the fastest growing areas of technology and offers a new computing model for various applications and services. This model is particularly interesting for the area of biometric recognition, where scalability, processing power, and storage requirements are becoming a bigger and bigger issue with each new generation of recognition technology. Next to the availability of computing resources, another important aspect of cloud computing with respect to biometrics is accessibility. Since biometric cloud services are easily accessible, it is possible to combine different existing implementations and design new multibiometric services that next to almost unlimited resources also offer superior recognition performance and, consequently, ensure improved security to its client applications. Unfortunately, the literature on the best strategies of how to combine existing implementations of cloud-based biometric experts into a multibiometric service is virtually nonexistent. In this paper, we try to close this gap and evaluate different strategies for combining existing biometric experts into a multibiometric cloud service. We analyze the (fusion) strategies from different perspectives such as performance gains, training complexity, or resource consumption and present results and findings important to software developers and other researchers working in the areas of biometrics and cloud computing. The analysis is conducted based on two biometric cloud services, which are also presented in the paper.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Cloud computing represents one of the fastest growing areas of technology and offers a new computing model for various applications and services. This model is particularly interesting for the area of biometric recognition, where scalability, processing power, and storage requirements are becoming a bigger and bigger issue with each new generation of recognition technology. Next to the availability of computing resources, another important aspect of cloud computing with respect to biometrics is accessibility. Since biometric cloud services are easily accessible, it is possible to combine different existing implementations and design new multibiometric services that next to almost unlimited resources also offer superior recognition performance and, consequently, ensure improved security to its client applications. Unfortunately, the literature on the best strategies of how to combine existing implementations of cloud-based biometric experts into a multibiometric service is virtually nonexistent. In this paper, we try to close this gap and evaluate different strategies for combining existing biometric experts into a multibiometric cloud service. We analyze the (fusion) strategies from different perspectives such as performance gains, training complexity, or resource consumption and present results and findings important to software developers and other researchers working in the areas of biometrics and cloud computing. The analysis is conducted based on two biometric cloud services, which are also presented in the paper. |
Vitomir Štruc; Jerneja Žganec-Gros; Boštjan Vesnicer; Nikola Pavešić Beyond parametric score normalisation in biometric verification systems Journal Article In: IET Biometrics, 3 (2), pp. 62–74, 2014. @article{struc2014beyond, title = {Beyond parametric score normalisation in biometric verification systems}, author = { Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Bo\v{s}tjan Vesnicer and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IET_Vito.pdf}, doi = {10.1049/iet-bmt.2013.0076}, year = {2014}, date = {2014-01-01}, journal = {IET Biometrics}, volume = {3}, number = {2}, pages = {62--74}, publisher = {IET}, abstract = {Similarity scores represent the basis for identity inference in biometric verification systems. However, because of the so-called miss-matched conditions across enrollment and probe samples and identity-dependent factors these scores typically exhibit statistical variations that affect the verification performance of biometric systems. To mitigate these variations, scorenormalisation techniques, such as the z-norm, the t-norm or the zt-norm, are commonly adopted. In this study, the authors study the problem of score normalisation in the scope of biometric verification and introduce a new class of non-parametric normalisation techniques, which make no assumptions regarding the shape of the distribution from which the scores are drawn (as the parametric techniques do). Instead, they estimate the shape of the score distribution and use the estimate to map the initial distribution to a common (predefined) distribution. Based on the new class of normalisation techniques they also develop a hybrid normalisation scheme that combines non-parametric and parametric techniques into hybrid two-step procedures. They evaluate the performance of the non-parametric and hybrid techniques in face-verification experiments on the FRGCv2 and SCFace databases and show that the non-parametric techniques outperform their parametric counterparts and that the hybrid procedure is not only feasible, but also retains some desirable characteristics from both the non-parametric and the parametric techniques.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Similarity scores represent the basis for identity inference in biometric verification systems. However, because of the so-called miss-matched conditions across enrollment and probe samples and identity-dependent factors these scores typically exhibit statistical variations that affect the verification performance of biometric systems. To mitigate these variations, scorenormalisation techniques, such as the z-norm, the t-norm or the zt-norm, are commonly adopted. In this study, the authors study the problem of score normalisation in the scope of biometric verification and introduce a new class of non-parametric normalisation techniques, which make no assumptions regarding the shape of the distribution from which the scores are drawn (as the parametric techniques do). Instead, they estimate the shape of the score distribution and use the estimate to map the initial distribution to a common (predefined) distribution. Based on the new class of normalisation techniques they also develop a hybrid normalisation scheme that combines non-parametric and parametric techniques into hybrid two-step procedures. They evaluate the performance of the non-parametric and hybrid techniques in face-verification experiments on the FRGCv2 and SCFace databases and show that the non-parametric techniques outperform their parametric counterparts and that the hybrid procedure is not only feasible, but also retains some desirable characteristics from both the non-parametric and the parametric techniques. |
Žiga Emeršič; Jernej Bule; Jerneja Žganec-Gros; Vitomir Štruc; Peter Peer A case study on multi-modal biometrics in the cloud Journal Article In: Electrotechnical Review, 81 (3), pp. 74, 2014. @article{emersic2014case, title = {A case study on multi-modal biometrics in the cloud}, author = { \v{Z}iga Emer\v{s}i\v{c} and Jernej Bule and Jerneja \v{Z}ganec-Gros and Vitomir \v{S}truc and Peter Peer}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Emersic.pdf}, year = {2014}, date = {2014-01-01}, journal = {Electrotechnical Review}, volume = {81}, number = {3}, pages = {74}, publisher = {Elektrotehniski Vestnik}, abstract = {Cloud computing is particularly interesting for the area of biometric recognition, where scalability, availability and accessibility are important aspects. In this paper we try to evaluate different strategies for combining existing uni-modal (cloud-based) biometric experts into a multi-biometric cloud-service. We analyze several fusion strategies from the perspective of performance gains, training complexity and resource consumption and discuss the results of our analysis. The experimental evaluation is conducted based on two biometric cloud-services developed in the scope of the Competence Centere CLASS, a face recognition service and a fingerprint recognition service, which are also briefly described in the paper. The presented results are important to researchers and developers working in the area of biometric services for the cloud looking for easy solutions for improving the quality of their services. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Cloud computing is particularly interesting for the area of biometric recognition, where scalability, availability and accessibility are important aspects. In this paper we try to evaluate different strategies for combining existing uni-modal (cloud-based) biometric experts into a multi-biometric cloud-service. We analyze several fusion strategies from the perspective of performance gains, training complexity and resource consumption and discuss the results of our analysis. The experimental evaluation is conducted based on two biometric cloud-services developed in the scope of the Competence Centere CLASS, a face recognition service and a fingerprint recognition service, which are also briefly described in the paper. The presented results are important to researchers and developers working in the area of biometric services for the cloud looking for easy solutions for improving the quality of their services. |
Janez Križaj; Vitomir Štruc; France Mihelič A Feasibility Study on the Use of Binary Keypoint Descriptors for 3D Face Recognition Conference Proceedings of the Mexican Conference on Pattern Recognition (MCPR), Springer 2014. @conference{krivzaj2014feasibility, title = {A Feasibility Study on the Use of Binary Keypoint Descriptors for 3D Face Recognition}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/MCPR2014.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the Mexican Conference on Pattern Recognition (MCPR)}, pages = {142--151}, organization = {Springer}, abstract = {Despite the progress made in the area of local image descriptors in recent years, virtually no literature is available on the use of more recent descriptors for the problem of 3D face recognition, such as BRIEF, ORB, BRISK or FREAK, which are binary in nature and, therefore, tend to be faster to compute and match, while requiring signicantly less memory for storage than, for example, SIFT or SURF. In this paper, we try to close this gap and present a feasibility study on the use of these descriptors for 3D face recognition. Descriptors are evaluated on the three challenging 3D face image datasets, namely, the FRGC, UMB and CASIA. Our experiments show the binary descriptors ensure slightly lower verication rates than SIFT, comparable to those of the SURF descriptor, while being an order of magnitude faster than SIFT. The results suggest that the use of binary descriptors represents a viable alternative to the established descriptors.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Despite the progress made in the area of local image descriptors in recent years, virtually no literature is available on the use of more recent descriptors for the problem of 3D face recognition, such as BRIEF, ORB, BRISK or FREAK, which are binary in nature and, therefore, tend to be faster to compute and match, while requiring signicantly less memory for storage than, for example, SIFT or SURF. In this paper, we try to close this gap and present a feasibility study on the use of these descriptors for 3D face recognition. Descriptors are evaluated on the three challenging 3D face image datasets, namely, the FRGC, UMB and CASIA. Our experiments show the binary descriptors ensure slightly lower verication rates than SIFT, comparable to those of the SURF descriptor, while being an order of magnitude faster than SIFT. The results suggest that the use of binary descriptors represents a viable alternative to the established descriptors. |
Janez Križaj; Vitomir Štruc; Simon Dobrišek; Darijan Marčetić; Slobodan Ribarić SIFT vs. FREAK: Assessing the usefulness of two keypoint descriptors for 3D face verification Inproceedings In: 37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO) , pp. 1336–1341, Mipro Opatija, Croatia, 2014. @inproceedings{krivzaj2014sift, title = {SIFT vs. FREAK: Assessing the usefulness of two keypoint descriptors for 3D face verification}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek and Darijan Mar\v{c}eti\'{c} and Slobodan Ribari\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/MIPRO2014a.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO) }, pages = {1336--1341}, address = {Opatija, Croatia}, organization = {Mipro}, abstract = {Many techniques in the area of 3D face recognition rely on local descriptors to characterize the surface-shape information around points of interest (or keypoints) in the 3D images. Despite the fact that a lot of advancements have been made in the area of keypoint descriptors over the last years, the literature on 3D-face recognition for the most part still focuses on established descriptors, such as SIFT and SURF, and largely neglects more recent descriptors, such as the FREAK descriptor. In this paper we try to bridge this gap and assess the usefulness of the FREAK descriptor for the task for 3D face recognition. Of particular interest to us is a direct comparison of the FREAK and SIFT descriptors within a simple verification framework. To evaluate our framework with the two descriptors, we conduct 3D face recognition experiments on the challenging FRGCv2 and UMBDB databases and show that the FREAK descriptor ensures a very competitive verification performance when compared to the SIFT descriptor, but at a fraction of the computational cost. Our results indicate that the FREAK descriptor is a viable alternative to the SIFT descriptor for the problem of 3D face verification and due to its binary nature is particularly useful for real-time recognition systems and verification techniques for low-resource devices such as mobile phones, tablets and alike.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } Many techniques in the area of 3D face recognition rely on local descriptors to characterize the surface-shape information around points of interest (or keypoints) in the 3D images. Despite the fact that a lot of advancements have been made in the area of keypoint descriptors over the last years, the literature on 3D-face recognition for the most part still focuses on established descriptors, such as SIFT and SURF, and largely neglects more recent descriptors, such as the FREAK descriptor. In this paper we try to bridge this gap and assess the usefulness of the FREAK descriptor for the task for 3D face recognition. Of particular interest to us is a direct comparison of the FREAK and SIFT descriptors within a simple verification framework. To evaluate our framework with the two descriptors, we conduct 3D face recognition experiments on the challenging FRGCv2 and UMBDB databases and show that the FREAK descriptor ensures a very competitive verification performance when compared to the SIFT descriptor, but at a fraction of the computational cost. Our results indicate that the FREAK descriptor is a viable alternative to the SIFT descriptor for the problem of 3D face verification and due to its binary nature is particularly useful for real-time recognition systems and verification techniques for low-resource devices such as mobile phones, tablets and alike. |
Darijan Marčetić; Slobodan Ribarić; Vitomir Štruc; Nikola Pavešić An experimental tattoo de-identification system for privacy protection in still images Inproceedings In: 37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO), pp. 1288–1293, Mipro IEEE, 2014. @inproceedings{marcetic2014experimental, title = {An experimental tattoo de-identification system for privacy protection in still images}, author = { Darijan Mar\v{c}eti\'{c} and Slobodan Ribari\'{c} and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/mipro_tatoo.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)}, pages = {1288--1293}, publisher = {IEEE}, organization = {Mipro}, abstract = {An experimental tattoo de-identification system for privacy protection in still images is described in the paper. The system consists of the following modules: skin detection, region of interest detection, feature extraction, tattoo database, matching, tattoo detection, skin swapping, and quality evaluation. Two methods for tattoo localization are presented. The first is a simple ad-hoc method based only on skin colour. The second is based on skin colour, texture and SIFT features. The appearance of each tattoo area is de-identified in such a way that its skin colour and skin texture are similar to the surrounding skin area. Experimental results for still images in which tattoo location, distance, size, illumination, and motion blur have large variability are presented. The system is subjectively evaluated based on the results of tattoo localization, the level of privacy protection and the naturalness of the de-identified still images. The level of privacy protection is estimated based on the quality of the removal of the tattoo appearance and the concealment of its location. }, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } An experimental tattoo de-identification system for privacy protection in still images is described in the paper. The system consists of the following modules: skin detection, region of interest detection, feature extraction, tattoo database, matching, tattoo detection, skin swapping, and quality evaluation. Two methods for tattoo localization are presented. The first is a simple ad-hoc method based only on skin colour. The second is based on skin colour, texture and SIFT features. The appearance of each tattoo area is de-identified in such a way that its skin colour and skin texture are similar to the surrounding skin area. Experimental results for still images in which tattoo location, distance, size, illumination, and motion blur have large variability are presented. The system is subjectively evaluated based on the results of tattoo localization, the level of privacy protection and the naturalness of the de-identified still images. The level of privacy protection is estimated based on the quality of the removal of the tattoo appearance and the concealment of its location. |
Boštjan Vesnicer; Jerneja Žganec-Gros; Simon Dobrišek; Vitomir Štruc Incorporating Duration Information into I-Vector-Based Speaker-Recognition Systems Conference Proceedings of Odyssey: The Speaker and Language Recognition Workshop, 2014. @conference{vesnicer2014incorporating, title = {Incorporating Duration Information into I-Vector-Based Speaker-Recognition Systems}, author = { Bo\v{s}tjan Vesnicer and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Odyssey.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of Odyssey: The Speaker and Language Recognition Workshop}, pages = {241--248}, abstract = {Most of the existing literature on i-vector-based speaker recognition focuses on recognition problems, where i-vectors are extracted from speech recordings of sufficient length. The majority of modeling/recognition techniques therefore simply ignores the fact that the i-vectors are most likely estimated unreliably when short recordings are used for their computation. Only recently, were a number of solutions proposed in the literature to address the problem of duration variability, all treating the i-vector as a random variable whose posterior distribution can be parameterized by the posterior mean and the posterior covariance. In this setting the covariance matrix serves as a measure of uncertainty that is related to the length of the available recording. In contract to these solutions, we address the problem of duration variability through weighted statistics. We demonstrate in the paper how established feature transformation techniques regularly used in the area of speaker recognition, such as PCA or WCCN, can be modified to take duration into account. We evaluate our weighting scheme in the scope of the i-vector challenge organized as part of the Odyssey, Speaker and Language Recognition Workshop 2014 and achieve a minimal DCF of 0.280, which at the time of writing puts our approach in third place among all the participating institutions.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Most of the existing literature on i-vector-based speaker recognition focuses on recognition problems, where i-vectors are extracted from speech recordings of sufficient length. The majority of modeling/recognition techniques therefore simply ignores the fact that the i-vectors are most likely estimated unreliably when short recordings are used for their computation. Only recently, were a number of solutions proposed in the literature to address the problem of duration variability, all treating the i-vector as a random variable whose posterior distribution can be parameterized by the posterior mean and the posterior covariance. In this setting the covariance matrix serves as a measure of uncertainty that is related to the length of the available recording. In contract to these solutions, we address the problem of duration variability through weighted statistics. We demonstrate in the paper how established feature transformation techniques regularly used in the area of speaker recognition, such as PCA or WCCN, can be modified to take duration into account. We evaluate our weighting scheme in the scope of the i-vector challenge organized as part of the Odyssey, Speaker and Language Recognition Workshop 2014 and achieve a minimal DCF of 0.280, which at the time of writing puts our approach in third place among all the participating institutions. |
Ross Beveridge; Hao Zhang; Patrick Flynn; Yooyoung Lee; Venice Erin Liong; Jiwen Lu; Marcus de Assis Angeloni; Tiago de Freitas Pereira; Haoxiang Li; Gang Hua; Vitomir Štruc; Janez Križaj; Jonathon Phillips The ijcb 2014 pasc video face and person recognition competition Conference Proceedings of the IEEE International Joint Conference on Biometrics (IJCB), IEEE 2014. @conference{beveridge2014ijcb, title = {The ijcb 2014 pasc video face and person recognition competition}, author = {Ross Beveridge and Hao Zhang and Patrick Flynn and Yooyoung Lee and Venice Erin Liong and Jiwen Lu and Marcus de Assis Angeloni and Tiago de Freitas Pereira and Haoxiang Li and Gang Hua and Vitomir \v{S}truc and Janez Kri\v{z}aj and Jonathon Phillips}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IJCB2014.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB)}, pages = {1--8}, organization = {IEEE}, abstract = {The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC. |
2013 |
Janez Križaj; Simon Dobrišek; Vitomir Štruc; Nikola Pavešić Robust 3D face recognition using adapted statistical models Inproceedings In: Proceedings of the Electrotechnical and Computer Science Conference (ERK'13), 2013. @inproceedings{krizajrobust, title = {Robust 3D face recognition using adapted statistical models}, author = {Janez Kri\v{z}aj and Simon Dobri\v{s}ek and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ERK2013b.pdf}, year = {2013}, date = {2013-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK'13)}, abstract = {The paper presents a novel framework to 3D face recognition that exploits region covariance matrices (RCMs), Gaussian mixture models (GMMs) and support vector machine (SVM) classifiers. The proposed framework first combines several 3D face representations at the feature level using RCM descriptors and then derives low-dimensional feature vectors from the computed descriptors with the unscented transform. By doing so, it enables computations in Euclidean space, and makes Gaussian mixture modeling feasible. Finally, a support vector classifier is used for identity inference. As demonstrated by our experimental results on the FRGCv2 and UMB databases, the proposed framework is highly robust and exhibits desirable characteristics such as an inherent mechanism for data fusion (through the RCMs), the ability to examine local as well as global structures of the face with the same descriptor, the ability to integrate domain-specific prior knowledge into the modeling procedure and consequently to handle missing or unreliable data. }, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The paper presents a novel framework to 3D face recognition that exploits region covariance matrices (RCMs), Gaussian mixture models (GMMs) and support vector machine (SVM) classifiers. The proposed framework first combines several 3D face representations at the feature level using RCM descriptors and then derives low-dimensional feature vectors from the computed descriptors with the unscented transform. By doing so, it enables computations in Euclidean space, and makes Gaussian mixture modeling feasible. Finally, a support vector classifier is used for identity inference. As demonstrated by our experimental results on the FRGCv2 and UMB databases, the proposed framework is highly robust and exhibits desirable characteristics such as an inherent mechanism for data fusion (through the RCMs), the ability to examine local as well as global structures of the face with the same descriptor, the ability to integrate domain-specific prior knowledge into the modeling procedure and consequently to handle missing or unreliable data. |
Vitomir Štruc; Jerneja Žganec-Gros; Nikola Pavešić; Simon Dobrišek Zlivanje informacij za zanseljivo in robustno razpoznavanje obrazov Journal Article In: Electrotechnical Review, 80 (3), pp. 1-12, 2013. @article{EV_Struc_2013, title = {Zlivanje informacij za zanseljivo in robustno razpoznavanje obrazov}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c} and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/StrucEV2013.pdf}, year = {2013}, date = {2013-09-01}, journal = {Electrotechnical Review}, volume = {80}, number = {3}, pages = {1-12}, abstract = {The existing face recognition technology has reached a performance level where it is possible to deploy it in various applications providing they are capable of ensuring controlled conditions for the image acquisition procedure. However, the technology still struggles with its recognition performance when deployed in uncontrolled and unconstrained conditions. In this paper, we present a novel approach to face recognition designed specifically for these challenging conditions. The proposed approach exploits information fusion to achieve robustness. In the first step, the approach crops the facial region from each input image in three different ways. It then maps each of the three crops into one of four color representations and finally extracts several feature types from each of the twelve facial representations. The described procedure results in a total of thirty facial representations that are combined at the matching score level using a fusion approach based on linear logistic regression (LLR) to arrive at a robust decision regarding the identity of the subject depicted in the input face image. The presented approach was enlisted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face-recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the approach, elaborate on the results of the competition and, most importantly, present some interesting findings made during our development work that are also of relevance to the research community working in the field of face recognition. }, keywords = {}, pubstate = {published}, tppubtype = {article} } The existing face recognition technology has reached a performance level where it is possible to deploy it in various applications providing they are capable of ensuring controlled conditions for the image acquisition procedure. However, the technology still struggles with its recognition performance when deployed in uncontrolled and unconstrained conditions. In this paper, we present a novel approach to face recognition designed specifically for these challenging conditions. The proposed approach exploits information fusion to achieve robustness. In the first step, the approach crops the facial region from each input image in three different ways. It then maps each of the three crops into one of four color representations and finally extracts several feature types from each of the twelve facial representations. The described procedure results in a total of thirty facial representations that are combined at the matching score level using a fusion approach based on linear logistic regression (LLR) to arrive at a robust decision regarding the identity of the subject depicted in the input face image. The presented approach was enlisted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face-recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the approach, elaborate on the results of the competition and, most importantly, present some interesting findings made during our development work that are also of relevance to the research community working in the field of face recognition. |
Vitomir Štruc; Jeneja Žganec Gros; Simon Dobrišek; Nikola Pavešić Exploiting representation plurality for robust and efficient face recognition Inproceedings In: Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13), pp. 121–124, Portorož, Slovenia, 2013. @inproceedings{ERK2013_Struc, title = {Exploiting representation plurality for robust and efficient face recognition}, author = {Vitomir \v{S}truc and Jeneja \v{Z}ganec Gros and Simon Dobri\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ERK2013a.pdf}, year = {2013}, date = {2013-09-01}, booktitle = {Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13)}, volume = {vol. B}, pages = {121--124}, address = {Portoro\v{z}, Slovenia}, abstract = {The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community. |
Janez Križaj; Vitomir Štruc; Simon Dobrišek Combining 3D face representations using region covariance descriptors and statistical models Conference Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition and Workshops (IEEE FG), Workshop on 3D Face Biometrics, IEEE, Shanghai, China, 2013. @conference{FG2013, title = {Combining 3D face representations using region covariance descriptors and statistical models}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/FG2013.pdf}, year = {2013}, date = {2013-05-01}, booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition and Workshops (IEEE FG), Workshop on 3D Face Biometrics}, publisher = {IEEE}, address = {Shanghai, China}, abstract = {The paper introduces a novel framework for 3D face recognition that capitalizes on region covariance descriptors and Gaussian mixture models. The framework presents an elegant and coherent way of combining multiple facial representations, while simultaneously examining all computed representations at various levels of locality. The framework first computes a number of region covariance matrices/descriptors from different sized regions of several image representations and then adopts the unscented transform to derive low-dimensional feature vectors from the computed descriptors. By doing so, it enables computations in the Euclidean space, and makes Gaussian mixture modeling feasible. In the last step a support vector machine classification scheme is used to make a decision regarding the identity of the modeled input 3D face image. The proposed framework exhibits several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrices), the ability to examine the facial images at different levels of locality, and the ability to integrate domain-specific prior knowledge into the modeling procedure. We assess the feasibility of the proposed framework on the Face Recognition Grand Challenge version 2 (FRGCv2) database with highly encouraging results.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper introduces a novel framework for 3D face recognition that capitalizes on region covariance descriptors and Gaussian mixture models. The framework presents an elegant and coherent way of combining multiple facial representations, while simultaneously examining all computed representations at various levels of locality. The framework first computes a number of region covariance matrices/descriptors from different sized regions of several image representations and then adopts the unscented transform to derive low-dimensional feature vectors from the computed descriptors. By doing so, it enables computations in the Euclidean space, and makes Gaussian mixture modeling feasible. In the last step a support vector machine classification scheme is used to make a decision regarding the identity of the modeled input 3D face image. The proposed framework exhibits several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrices), the ability to examine the facial images at different levels of locality, and the ability to integrate domain-specific prior knowledge into the modeling procedure. We assess the feasibility of the proposed framework on the Face Recognition Grand Challenge version 2 (FRGCv2) database with highly encouraging results. |
Simon Dobrišek; Rok Gajšek; France Mihelič; Nikola Pavešić; Vitomir Štruc Towards efficient multi-modal emotion recognition Journal Article In: International Journal of Advanced Robotic Systems, 10 (53), 2013. @article{dobrivsek2013towards, title = {Towards efficient multi-modal emotion recognition}, author = { Simon Dobri\v{s}ek and Rok Gaj\v{s}ek and France Miheli\v{c} and Nikola Pave\v{s}i\'{c} and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/multimodel-emotion.pdf}, doi = {10.5772/54002}, year = {2013}, date = {2013-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {10}, number = {53}, abstract = {The paper presents a multi-modal emotion recognition system exploiting audio and video (i.e., facial expression) information. The system first processes both sources of information individually to produce corresponding matching scores and then combines the computed matching scores to obtain a classification decision. For the video part of the system, a novel approach to emotion recognition, relying on image-set matching, is developed. The proposed approach avoids the need for detecting and tracking specific facial landmarks throughout the given video sequence, which represents a common source of error in video-based emotion recognition systems, and, therefore, adds robustness to the video processing chain. The audio part of the system, on the other hand, relies on utterance-specific Gaussian Mixture Models (GMMs) adapted from a Universal Background Model (UBM) via the maximum a posteriori probability (MAP) estimation. It improves upon the standard UBM-MAP procedure by exploiting gender information when building the utterance-specific GMMs, thus ensuring enhanced emotion recognition performance. Both the uni-modal parts as well as the combined system are assessed on the challenging multi-modal eNTERFACE'05 corpus with highly encouraging results. The developed system represents a feasible solution to emotion recognition that can easily be integrated into various systems, such as humanoid robots, smart surveillance systems and alike.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The paper presents a multi-modal emotion recognition system exploiting audio and video (i.e., facial expression) information. The system first processes both sources of information individually to produce corresponding matching scores and then combines the computed matching scores to obtain a classification decision. For the video part of the system, a novel approach to emotion recognition, relying on image-set matching, is developed. The proposed approach avoids the need for detecting and tracking specific facial landmarks throughout the given video sequence, which represents a common source of error in video-based emotion recognition systems, and, therefore, adds robustness to the video processing chain. The audio part of the system, on the other hand, relies on utterance-specific Gaussian Mixture Models (GMMs) adapted from a Universal Background Model (UBM) via the maximum a posteriori probability (MAP) estimation. It improves upon the standard UBM-MAP procedure by exploiting gender information when building the utterance-specific GMMs, thus ensuring enhanced emotion recognition performance. Both the uni-modal parts as well as the combined system are assessed on the challenging multi-modal eNTERFACE'05 corpus with highly encouraging results. The developed system represents a feasible solution to emotion recognition that can easily be integrated into various systems, such as humanoid robots, smart surveillance systems and alike. |
Peter Peer; Jernej Bule; Jerneja Žganec Gros; Vitomir Štruc Building cloud-based biometric services Journal Article In: Informatica, 37 (2), pp. 115, 2013. @article{peer2013building, title = {Building cloud-based biometric services}, author = { Peter Peer and Jernej Bule and Jerneja \v{Z}ganec Gros and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Peercloud.pdf}, year = {2013}, date = {2013-01-01}, journal = {Informatica}, volume = {37}, number = {2}, pages = {115}, publisher = {Slovenian Society Informatika/Slovensko drustvo Informatika}, abstract = {Over the next few years the amount of biometric data being at the disposal of various agencies and authentication service providers is expected to grow significantly. Such quantities of data require not only enormous amounts of storage but unprecedented processing power as well. To be able to face this future challenges more and more people are looking towards cloud computing, which can address these challenges quite effectively with its seemingly unlimited storage capacity, rapid data distribution and parallel processing capabilities. Since the available literature on how to implement cloud-based biometric services is extremely scarce, this paper capitalizes on the most important challenges encountered during the development work on biometric services, presents the most important standards and recommendations pertaining to biometric services in the cloud and ultimately, elaborates on the potential value of cloud-based biometric solutions by presenting a few existing (commercial) examples. In the final part of the paper, a case study on fingerprint recognition in the cloud and its integration into the e-learning environment Moodle is presented. }, keywords = {}, pubstate = {published}, tppubtype = {article} } Over the next few years the amount of biometric data being at the disposal of various agencies and authentication service providers is expected to grow significantly. Such quantities of data require not only enormous amounts of storage but unprecedented processing power as well. To be able to face this future challenges more and more people are looking towards cloud computing, which can address these challenges quite effectively with its seemingly unlimited storage capacity, rapid data distribution and parallel processing capabilities. Since the available literature on how to implement cloud-based biometric services is extremely scarce, this paper capitalizes on the most important challenges encountered during the development work on biometric services, presents the most important standards and recommendations pertaining to biometric services in the cloud and ultimately, elaborates on the potential value of cloud-based biometric solutions by presenting a few existing (commercial) examples. In the final part of the paper, a case study on fingerprint recognition in the cloud and its integration into the e-learning environment Moodle is presented. |
Vildana Sulič Kenk; Janez Križaj; Vitomir Štruc; Simon Dobrišek Smart surveillance technologies in border control Journal Article In: European Journal of Law and Technology, 4 (2), 2013. @article{kenk2013smart, title = {Smart surveillance technologies in border control}, author = { Vildana Suli\v{c} Kenk and Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Kenk.pdf}, year = {2013}, date = {2013-01-01}, journal = {European Journal of Law and Technology}, volume = {4}, number = {2}, abstract = {The paper addresses the technical and legal aspects of the existing and forthcoming intelligent ('smart') surveillance technologies that are (or are considered to be) employed in the border control application area. Such technologies provide a computerized decision-making support to border control authorities, and are intended to increase the reliability and efficiency of border control measures. However, the question that arises is how effective these technologies are, as well as at what price, economically, socially, and in terms of citizens' rights. The paper provides a brief overview of smart surveillance technologies in border control applications, especially those used for controlling cross-border traffic, discusses possible proportionality issues and privacy risks raised by the increasingly widespread use of such technologies, as well as good/best practises developed in this area. In a broader context, the paper presents the result of the research carried out as part of the SMART (Scalable Measures for Automated Recognition Technologies) project.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The paper addresses the technical and legal aspects of the existing and forthcoming intelligent ('smart') surveillance technologies that are (or are considered to be) employed in the border control application area. Such technologies provide a computerized decision-making support to border control authorities, and are intended to increase the reliability and efficiency of border control measures. However, the question that arises is how effective these technologies are, as well as at what price, economically, socially, and in terms of citizens' rights. The paper provides a brief overview of smart surveillance technologies in border control applications, especially those used for controlling cross-border traffic, discusses possible proportionality issues and privacy risks raised by the increasingly widespread use of such technologies, as well as good/best practises developed in this area. In a broader context, the paper presents the result of the research carried out as part of the SMART (Scalable Measures for Automated Recognition Technologies) project. |
Vitomir Štruc; Nikola Pavešić; Jerneja Žganec-Gros; Boštjan Vesnicer Patch-wise low-dimensional probabilistic linear discriminant analysis for Face Recognition Conference 2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE 2013. @conference{vstruc2013patch, title = {Patch-wise low-dimensional probabilistic linear discriminant analysis for Face Recognition}, author = { Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c} and Jerneja \v{Z}ganec-Gros and Bo\v{s}tjan Vesnicer}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICASSP2013.pdf}, doi = {10.1109/ICASSP.2013.6638075}, year = {2013}, date = {2013-01-01}, booktitle = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, pages = {2352--2356}, organization = {IEEE}, abstract = {The paper introduces a novel approach to face recognition based on the recently proposed low-dimensional probabilistic linear discriminant analysis (LD-PLDA). The proposed approach is specifically designed for complex recognition tasks, where highly nonlinear face variations are typically encountered. Such data variations are commonly induced by changes in the external illumination conditions, viewpoint changes or expression variations and represent quite a challenge even for state-of-the-art techniques, such as LD-PLDA. To overcome this problem, we propose here a patch-wise form of the LDPLDA technique (i.e., PLD-PLDA), which relies on local image patches rather than the entire image to make inferences about the identity of the input images. The basic idea here is to decompose the complex face recognition problem into simpler problems, for which the linear nature of the LD-PLDA technique may be better suited. By doing so, several similarity scores are derived from one facial image, which are combined at the final stage using a simple sum-rule fusion scheme to arrive at a single score that can be employed for identity inference. We evaluate the proposed technique on experiment 4 of the Face Recognition Grand Challenge (FRGCv2) database with highly promising results.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper introduces a novel approach to face recognition based on the recently proposed low-dimensional probabilistic linear discriminant analysis (LD-PLDA). The proposed approach is specifically designed for complex recognition tasks, where highly nonlinear face variations are typically encountered. Such data variations are commonly induced by changes in the external illumination conditions, viewpoint changes or expression variations and represent quite a challenge even for state-of-the-art techniques, such as LD-PLDA. To overcome this problem, we propose here a patch-wise form of the LDPLDA technique (i.e., PLD-PLDA), which relies on local image patches rather than the entire image to make inferences about the identity of the input images. The basic idea here is to decompose the complex face recognition problem into simpler problems, for which the linear nature of the LD-PLDA technique may be better suited. By doing so, several similarity scores are derived from one facial image, which are combined at the final stage using a simple sum-rule fusion scheme to arrive at a single score that can be employed for identity inference. We evaluate the proposed technique on experiment 4 of the Face Recognition Grand Challenge (FRGCv2) database with highly promising results. |
Manuel Günther; Artur Costa-Pazo; Changxing Ding; Elhocine Boutellaa; Giovani Chiachia; Honglei Zhang; Marcus de Assis Angeloni; Vitomir Štruc; Elie Khoury; Esteban Vazquez-Fernandez; others The 2013 face recognition evaluation in mobile environment Conference Proceedings of the IAPR International Conference on Biometrics (ICB), IAPR 2013. @conference{gunther20132013, title = {The 2013 face recognition evaluation in mobile environment}, author = {Manuel G\"{u}nther and Artur Costa-Pazo and Changxing Ding and Elhocine Boutellaa and Giovani Chiachia and Honglei Zhang and Marcus de Assis Angeloni and Vitomir \v{S}truc and Elie Khoury and Esteban Vazquez-Fernandez and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Gunther_ICB2013_2013.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the IAPR International Conference on Biometrics (ICB)}, pages = {1--7}, organization = {IAPR}, abstract = {Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources. |
2012 |
Janez Križaj; Vitomir Štruc; Simon Dobrišek Robust 3D Face Recognition Journal Article In: Electrotechnical Review, 79 (1-2), pp. 1-6, 2012. @article{Kri\v{z}aj-EV-2012, title = {Robust 3D Face Recognition}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/KrizajEV.pdf}, year = {2012}, date = {2012-06-01}, journal = {Electrotechnical Review}, volume = {79}, number = {1-2}, pages = {1-6}, abstract = {Face recognition in uncontrolled environments is hindered by variations in illumination, pose, expression and occlusions of faces. Many practical face-recognition systems are affected by these variations. One way to increase the robustness to illumination and pose variations is to use 3D facial images. In this paper 3D face-recognition systems are presented. Their structure and operation are described. The robustness of such systems to variations in uncontrolled environments is emphasized. We present some preliminary results of a system developed in our laboratory.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Face recognition in uncontrolled environments is hindered by variations in illumination, pose, expression and occlusions of faces. Many practical face-recognition systems are affected by these variations. One way to increase the robustness to illumination and pose variations is to use 3D facial images. In this paper 3D face-recognition systems are presented. Their structure and operation are described. The robustness of such systems to variations in uncontrolled environments is emphasized. We present some preliminary results of a system developed in our laboratory. |
Janez Križaj; Vitomir Štruc; Simon Dobrišek Towards robust 3D face verification using Gaussian mixture models Journal Article In: International Journal of Advanced Robotic Systems, 9 , 2012. @article{krizaj2012towards, title = {Towards robust 3D face verification using Gaussian mixture models}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IntechJanez-1.pdf}, doi = {10.5772/52200}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {9}, publisher = {InTech}, abstract = {This paper focuses on the use of Gaussian Mixture models (GMM) for 3D face verification. A special interest is taken in practical aspects of 3D face verification systems, where all steps of the verification procedure need to be automated and no meta-data, such as pre-annotated eye/nose/mouth positions, is available to the system. In such settings the performance of the verification system correlates heavily with the performance of the employed alignment (i.e., geometric normalization) procedure. We show that popular holistic as well as local recognition techniques, such as principal component analysis (PCA), or Scale-invariant feature transform (SIFT)-based methods considerably deteriorate in their performance when an “imperfect” geometric normalization procedure is used to align the 3D face scans and that in these situations GMMs should be preferred. Moreover, several possibilities to improve the performance and robustness of the classical GMM framework are presented and evaluated: i) explicit inclusion of spatial information, during the GMM construction procedure, ii) implicit inclusion of spatial information during the GMM construction procedure and iii) on-line evaluation and possible rejection of local feature vectors based on their likelihood. We successfully demonstrate the feasibility of the proposed modifications on the Face Recognition Grand Challenge data set.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper focuses on the use of Gaussian Mixture models (GMM) for 3D face verification. A special interest is taken in practical aspects of 3D face verification systems, where all steps of the verification procedure need to be automated and no meta-data, such as pre-annotated eye/nose/mouth positions, is available to the system. In such settings the performance of the verification system correlates heavily with the performance of the employed alignment (i.e., geometric normalization) procedure. We show that popular holistic as well as local recognition techniques, such as principal component analysis (PCA), or Scale-invariant feature transform (SIFT)-based methods considerably deteriorate in their performance when an “imperfect” geometric normalization procedure is used to align the 3D face scans and that in these situations GMMs should be preferred. Moreover, several possibilities to improve the performance and robustness of the classical GMM framework are presented and evaluated: i) explicit inclusion of spatial information, during the GMM construction procedure, ii) implicit inclusion of spatial information during the GMM construction procedure and iii) on-line evaluation and possible rejection of local feature vectors based on their likelihood. We successfully demonstrate the feasibility of the proposed modifications on the Face Recognition Grand Challenge data set. |
Bostjan Vesnicer; Jerneja Žganec Gros; Nikola Pavešić; Vitomir Štruc Face recognition using simplified probabilistic linear discriminant analysis Journal Article In: International Journal of Advanced Robotic Systems, 9 , 2012. @article{vesnicer2012face, title = {Face recognition using simplified probabilistic linear discriminant analysis}, author = { Bostjan Vesnicer and Jerneja \v{Z}ganec Gros and Nikola Pave\v{s}i\'{c} and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InTech-Face_recognition_using_simplified_probabilistic_linear_discriminant_analysis-1.pdf}, doi = {10.5772/52258}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {9}, publisher = {InTech}, abstract = {Face recognition in uncontrolled environments remains an open problem that has not been satisfactorily solved by existing recognition techniques. In this paper, we tackle this problem using a variant of the recently proposed Probabilistic Linear Discriminant Analysis (PLDA). We show that simplified versions of the PLDA model, which are regularly used in the field of speaker recognition, rely on certain assumptions that not only result in a simpler PLDA model, but also reduce the computational load of the technique and - as indicated by our experimental assessments - improve recognition performance. Moreover, we show that, contrary to the general belief that PLDA-based methods produce well calibrated verification scores, score normalization techniques can still deliver significant performance gains, but only if non-parametric score normalization techniques are employed. Last but not least, we demonstrate the competitiveness of the simplified PLDA model for face recognition by comparing our results with the state-of-the-art results from the literature obtained on the second version of the large-scale Face Recognition Grand Challenge (FRGC) database.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Face recognition in uncontrolled environments remains an open problem that has not been satisfactorily solved by existing recognition techniques. In this paper, we tackle this problem using a variant of the recently proposed Probabilistic Linear Discriminant Analysis (PLDA). We show that simplified versions of the PLDA model, which are regularly used in the field of speaker recognition, rely on certain assumptions that not only result in a simpler PLDA model, but also reduce the computational load of the technique and - as indicated by our experimental assessments - improve recognition performance. Moreover, we show that, contrary to the general belief that PLDA-based methods produce well calibrated verification scores, score normalization techniques can still deliver significant performance gains, but only if non-parametric score normalization techniques are employed. Last but not least, we demonstrate the competitiveness of the simplified PLDA model for face recognition by comparing our results with the state-of-the-art results from the literature obtained on the second version of the large-scale Face Recognition Grand Challenge (FRGC) database. |
2011 |
Vitomir Štruc; Nikola Pavešić Photometric normalization techniques for illumination invariance Book Chapter In: Zhang, Yu-Jin (Ed.): Advances in Face Image Analysis: Techniques and Technologies, pp. 279-300, IGI-Global, 2011. @inbook{IGI2011, title = {Photometric normalization techniques for illumination invariance}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Yu-Jin Zhang}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/LUKSreport.pdf}, doi = {10.4018/978-1-61520-991-0.ch015}, year = {2011}, date = {2011-01-01}, booktitle = {Advances in Face Image Analysis: Techniques and Technologies}, pages = {279-300}, publisher = {IGI-Global}, abstract = {Face recognition technology has come a long way since its beginnings in the previous century. Due to its countless application possibilities, it has attracted the interest of research groups from universities and companies around the world. Thanks to this enormous research effort, the recognition rates achievable with the state-of-the-art face recognition technology are steadily growing, even though some issues still pose major challenges to the technology. Amongst these challenges, coping with illumination-induced appearance variations is one of the biggest and still not satisfactorily solved. A number of techniques have been proposed in the literature to cope with the impact of illumination ranging from simple image enhancement techniques, such as histogram equalization, to more elaborate methods, such as anisotropic smoothing or the logarithmic total variation model. This chapter presents an overview of the most popular and efficient normalization techniques that try to solve the illumination variation problem at the preprocessing level. It assesses the techniques on the YaleB and XM2VTS databases and explores their strengths and weaknesses from the theoretical and implementation point of view.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } Face recognition technology has come a long way since its beginnings in the previous century. Due to its countless application possibilities, it has attracted the interest of research groups from universities and companies around the world. Thanks to this enormous research effort, the recognition rates achievable with the state-of-the-art face recognition technology are steadily growing, even though some issues still pose major challenges to the technology. Amongst these challenges, coping with illumination-induced appearance variations is one of the biggest and still not satisfactorily solved. A number of techniques have been proposed in the literature to cope with the impact of illumination ranging from simple image enhancement techniques, such as histogram equalization, to more elaborate methods, such as anisotropic smoothing or the logarithmic total variation model. This chapter presents an overview of the most popular and efficient normalization techniques that try to solve the illumination variation problem at the preprocessing level. It assesses the techniques on the YaleB and XM2VTS databases and explores their strengths and weaknesses from the theoretical and implementation point of view. |
Vitomir Štruc; Jerneja Žganec-Gros; Nikola Pavešić Principal directions of synthetic exact filters for robust real-time eye localization Conference Proceedings of the COST workshop on Biometrics and Identity Management (BioID), 6583/2011 , Lecture Notes on Computer Science Springer-Verlag, Berlin, Heidelberg, 2011. @conference{BioID_Struc_2011, title = {Principal directions of synthetic exact filters for robust real-time eye localization}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BioID.pdf}, doi = {10.1007/978-3-642-19530-3_17}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the COST workshop on Biometrics and Identity Management (BioID)}, volume = {6583/2011}, pages = {180/192}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The alignment of the facial region with a predefined canonical form is one of the most crucial steps in a face recognition system. Most of the existing alignment techniques rely on the position of the eyes and, hence, require an efficient and reliable eye localization procedure. In this paper we propose a novel technique for this purpose, which exploits a new class of correlation filters called Principal directions of Synthetic Exact Filters (PSEFs). The proposed filters represent a generalization of the recently proposed Average of Synthetic Exact Filters (ASEFs) and exhibit desirable properties, such as relatively short training times, computational simplicity, high localization rates and real time capabilities. We present the theory of PSEF filter construction, elaborate on their characteristics and finally develop an efficient procedure for eye localization using several PSEF filters. We demonstrate the effectiveness of the proposed class of correlation filters for the task of eye localization on facial images from the FERET database and show that for the tested task they outperform the established Haar cascade object detector as well as the ASEF correlation filters.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The alignment of the facial region with a predefined canonical form is one of the most crucial steps in a face recognition system. Most of the existing alignment techniques rely on the position of the eyes and, hence, require an efficient and reliable eye localization procedure. In this paper we propose a novel technique for this purpose, which exploits a new class of correlation filters called Principal directions of Synthetic Exact Filters (PSEFs). The proposed filters represent a generalization of the recently proposed Average of Synthetic Exact Filters (ASEFs) and exhibit desirable properties, such as relatively short training times, computational simplicity, high localization rates and real time capabilities. We present the theory of PSEF filter construction, elaborate on their characteristics and finally develop an efficient procedure for eye localization using several PSEF filters. We demonstrate the effectiveness of the proposed class of correlation filters for the task of eye localization on facial images from the FERET database and show that for the tested task they outperform the established Haar cascade object detector as well as the ASEF correlation filters. |
2010 |
Vitomir Štruc; Nikola Pavešić Face recogniton from color images using sparse projection analysis Conference Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010), Povoa de Varzim, Portugal, 2010. @conference{ICIAR2010_Sparse, title = {Face recogniton from color images using sparse projection analysis}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICIAR2010_1.pdf}, year = {2010}, date = {2010-06-01}, booktitle = {Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010)}, pages = {445-453}, address = {Povoa de Varzim, Portugal}, abstract = {The paper presents a novel feature extraction technique for face recognition which uses sparse projection axes to compute a lowdimensional representation of face images. The proposed technique derives the sparse axes by first recasting the problem of face recognition as a regression problem and then solving the new (under-determined) regression problem by computing the solution with minimum L1 norm. The developed technique, named Sparse Projection Analysis (SPA), is applied to color as well as grey-scale images from the XM2VTS database and compared to popular subspace projection techniques (with sparse and dense projection axes) from the literature. The results of the experimental assessment show that the proposed technique ensures promising results on un-occluded as well occluded images from the XM2VTS database.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper presents a novel feature extraction technique for face recognition which uses sparse projection axes to compute a lowdimensional representation of face images. The proposed technique derives the sparse axes by first recasting the problem of face recognition as a regression problem and then solving the new (under-determined) regression problem by computing the solution with minimum L1 norm. The developed technique, named Sparse Projection Analysis (SPA), is applied to color as well as grey-scale images from the XM2VTS database and compared to popular subspace projection techniques (with sparse and dense projection axes) from the literature. The results of the experimental assessment show that the proposed technique ensures promising results on un-occluded as well occluded images from the XM2VTS database. |
Janez Križaj; Vitomir Štruc; Nikola Pavešić Adaptation of SIFT Features for Robust Face Recognition Conference Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010), Povoa de Varzim, Portugal, 2010. @conference{ICIAR2010_Sift, title = {Adaptation of SIFT Features for Robust Face Recognition}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/FSIFT.pdf}, year = {2010}, date = {2010-06-01}, booktitle = {Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010)}, pages = {394-404}, address = {Povoa de Varzim, Portugal}, abstract = {The Scale Invariant Feature Transform (SIFT) is an algorithm used to detect and describe scale-, translation- and rotation-invariant local features in images. The original SIFT algorithm has been successfully applied in general object detection and recognition tasks, panorama stitching and others. One of its more recent uses also includes face recognition, where it was shown to deliver encouraging results. SIFT-based face recognition techniques found in the literature rely heavily on the so-called keypoint detector, which locates interest points in the given image that are ultimately used to compute the SIFT descriptors. While these descriptors are known to be among others (partially) invariant to illumination changes, the keypoint detector is not. Since varying illumination is one of the main issues affecting the performance of face recognition systems, the keypoint detector represents the main source of errors in face recognition systems relying on SIFT features. To overcome the presented shortcoming of SIFT-based methods, we present in this paper a novel face recognition technique that computes the SIFT descriptors at predefined (fixed) locations learned during the training stage. By doing so, it eliminates the need for keypoint detection on the test images and renders our approach more robust to illumination changes than related approaches from the literature. Experiments, performed on the Extended Yale B face database, show that the proposed technique compares favorably with several popular techniques from the literature in terms of performance.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The Scale Invariant Feature Transform (SIFT) is an algorithm used to detect and describe scale-, translation- and rotation-invariant local features in images. The original SIFT algorithm has been successfully applied in general object detection and recognition tasks, panorama stitching and others. One of its more recent uses also includes face recognition, where it was shown to deliver encouraging results. SIFT-based face recognition techniques found in the literature rely heavily on the so-called keypoint detector, which locates interest points in the given image that are ultimately used to compute the SIFT descriptors. While these descriptors are known to be among others (partially) invariant to illumination changes, the keypoint detector is not. Since varying illumination is one of the main issues affecting the performance of face recognition systems, the keypoint detector represents the main source of errors in face recognition systems relying on SIFT features. To overcome the presented shortcoming of SIFT-based methods, we present in this paper a novel face recognition technique that computes the SIFT descriptors at predefined (fixed) locations learned during the training stage. By doing so, it eliminates the need for keypoint detection on the test images and renders our approach more robust to illumination changes than related approaches from the literature. Experiments, performed on the Extended Yale B face database, show that the proposed technique compares favorably with several popular techniques from the literature in terms of performance. |
Vitomir Štruc; Boštjan Vesnicer; France Mihelič; Nikola Pavešić Removing Illumination Artifacts from Face Images using the Nuisance Attribute Projection Conference Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP'10), IEEE, Dallas, Texas, USA, 2010. @conference{ICASSP2010, title = {Removing Illumination Artifacts from Face Images using the Nuisance Attribute Projection}, author = {Vitomir \v{S}truc and Bo\v{s}tjan Vesnicer and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICASSP2010.pdf}, doi = {10.1109/ICASSP.2010.5495203}, year = {2010}, date = {2010-03-01}, booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP'10)}, pages = {846-849}, publisher = {IEEE}, address = {Dallas, Texas, USA}, abstract = {Illumination induced appearance changes represent one of the open challenges in automated face recognition systems still significantly influencing their performance. Several techniques have been presented in the literature to cope with this problem; however, a universal solution remains to be found. In this paper we present a novel normalization scheme based on the nuisance attribute projection (NAP), which tries to remove the effects of illumination by projecting away multiple dimensions of a low dimensional illumination subspace. The technique is assessed in face recognition experiments performed on the extended YaleB and XM2VTS databases. Comparative results with state-of-the-art techniques show the competitiveness of the proposed technique.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Illumination induced appearance changes represent one of the open challenges in automated face recognition systems still significantly influencing their performance. Several techniques have been presented in the literature to cope with this problem; however, a universal solution remains to be found. In this paper we present a novel normalization scheme based on the nuisance attribute projection (NAP), which tries to remove the effects of illumination by projecting away multiple dimensions of a low dimensional illumination subspace. The technique is assessed in face recognition experiments performed on the extended YaleB and XM2VTS databases. Comparative results with state-of-the-art techniques show the competitiveness of the proposed technique. |
Vitomir Štruc; Nikola Pavešić In: Oravec, Milos (Ed.): Face Recognition, pp. 215-238, In-Tech, Vienna, 2010. @inbook{InTech2010, title = {From Gabor Magnitude to Gabor Phase Features: Tackling the Problem of Face Recognition under Severe Illumination Changes}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Milos Oravec}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InTech.pdf}, doi = {10.5772/8938}, year = {2010}, date = {2010-01-01}, booktitle = {Face Recognition}, pages = {215-238}, publisher = {In-Tech}, address = {Vienna}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } |
Vitomir Štruc; Nikola Pavešić The Complete Gabor-Fisher Classifier for Robust Face Recognition Journal Article In: EURASIP Advances in Signal Processing, 2010 , pp. 26, 2010. @article{CGF-Struc_2010, title = {The Complete Gabor-Fisher Classifier for Robust Face Recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ASP2010.pdf}, doi = {10.1155/2010/847680}, year = {2010}, date = {2010-01-01}, journal = {EURASIP Advances in Signal Processing}, volume = {2010}, pages = {26}, abstract = {This paper develops a novel face recognition technique called Complete Gabor Fisher Classifier (CGFC). Different from existing techniques that use Gabor filters for deriving the Gabor face representation, the proposed approach does not rely solely on Gabor magnitude information but effectively uses features computed based on Gabor phase information as well. It represents one of the few successful attempts found in the literature of combining Gabor magnitude and phase information for robust face recognition. The novelty of the proposed CGFC technique comes from (1) the introduction of a Gabor phase-based face representation and (2) the combination of the recognition technique using the proposed representation with classical Gabor magnitude-based methods into a unified framework. The proposed face recognition framework is assessed in a series of face verification and identification experiments performed on the XM2VTS, Extended YaleB, FERET, and AR databases. The results of the assessment suggest that the proposed technique clearly outperforms state-of-the-art face recognition techniques from the literature and that its performance is almost unaffected by the presence of partial occlusions of the facial area, changes in facial expression, or severe illumination changes.}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper develops a novel face recognition technique called Complete Gabor Fisher Classifier (CGFC). Different from existing techniques that use Gabor filters for deriving the Gabor face representation, the proposed approach does not rely solely on Gabor magnitude information but effectively uses features computed based on Gabor phase information as well. It represents one of the few successful attempts found in the literature of combining Gabor magnitude and phase information for robust face recognition. The novelty of the proposed CGFC technique comes from (1) the introduction of a Gabor phase-based face representation and (2) the combination of the recognition technique using the proposed representation with classical Gabor magnitude-based methods into a unified framework. The proposed face recognition framework is assessed in a series of face verification and identification experiments performed on the XM2VTS, Extended YaleB, FERET, and AR databases. The results of the assessment suggest that the proposed technique clearly outperforms state-of-the-art face recognition techniques from the literature and that its performance is almost unaffected by the presence of partial occlusions of the facial area, changes in facial expression, or severe illumination changes. |
Poh, Norman; Chan, Chi Ho; Kittler, Josef; Marcel, Sebastien; Mc Cool, Christopher; Rua, Enrique Argones; Castro, Jose Luis Alba; Villegas, Mauricio; Paredes, Roberto; Struc, Vitomir; others An evaluation of video-to-video face verification Journal Article In: IEEE Transactions on Information Forensics and Security, 5 (4), pp. 781–801, 2010. @article{poh2010evaluation, title = {An evaluation of video-to-video face verification}, author = {Poh, Norman and Chan, Chi Ho and Kittler, Josef and Marcel, Sebastien and Mc Cool, Christopher and Rua, Enrique Argones and Castro, Jose Luis Alba and Villegas, Mauricio and Paredes, Roberto and Struc, Vitomir and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/TIFS.pdf}, doi = {10.1109/TIFS.2010.2077627}, year = {2010}, date = {2010-01-01}, journal = {IEEE Transactions on Information Forensics and Security}, volume = {5}, number = {4}, pages = {781--801}, publisher = {IEEE}, abstract = {Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realize facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents an evaluation of person identity verification using facial video data, organized in conjunction with the International Conference on Biometrics (ICB 2009). It involves 18 systems submitted by seven academic institutes. These systems provide for a diverse set of assumptions, including feature representation and preprocessing variations, allowing us to assess the effect of adverse conditions, usage of quality information, query selection, and template construction for video-to-video face authentication.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realize facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents an evaluation of person identity verification using facial video data, organized in conjunction with the International Conference on Biometrics (ICB 2009). It involves 18 systems submitted by seven academic institutes. These systems provide for a diverse set of assumptions, including feature representation and preprocessing variations, allowing us to assess the effect of adverse conditions, usage of quality information, query selection, and template construction for video-to-video face authentication. |
Vitomir Štruc; Simon Dobrišek; Nikola Pavešić Proceedings of the International Conference on Pattern Recognition (ICPR'10), Istanbul, Turkey, 2010. @conference{ICPR_Struc_2010, title = {Confidence Weighted Subspace Projection Techniques for Robust Face Recognition in the Presence of Partial Occlusions}, author = {Vitomir \v{S}truc and Simon Dobri\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICPR2010_CW.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference on Pattern Recognition (ICPR'10)}, pages = {1334-1338}, address = {Istanbul, Turkey}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
Rok Gajšek; Vitomir Štruc; France Mihelič Multi-modal Emotion Recognition using Canonical Correlations and Acustic Features Conference Proceedings of the International Conference on Pattern Recognition (ICPR), IAPR Istanbul, Turkey, 2010. @conference{ICPR_Gajsek_2010, title = {Multi-modal Emotion Recognition using Canonical Correlations and Acustic Features}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICPR2010_Emo.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference on Pattern Recognition (ICPR)}, pages = {4133-4136}, address = {Istanbul, Turkey}, organization = {IAPR}, abstract = {The information of the psycho-physical state of the subject is becoming a valuable addition to the modern audio or video recognition systems. As well as enabling a better user experience, it can also assist in superior recognition accuracy of the base system. In the article, we present our approach to multi-modal (audio-video) emotion recognition system. For audio sub-system, a feature set comprised of prosodic, spectral and cepstrum features is selected and support vector classifier is used to produce the scores for each emotional category. For video sub-system a novel approach is presented, which does not rely on the tracking of specific facial landmarks and thus, eliminates the problems usually caused, if the tracking algorithm fails at detecting the correct area. The system is evaluated on the eNTERFACE database and the recognition accuracy of our audio-video fusion is compared to the published results in the literature.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The information of the psycho-physical state of the subject is becoming a valuable addition to the modern audio or video recognition systems. As well as enabling a better user experience, it can also assist in superior recognition accuracy of the base system. In the article, we present our approach to multi-modal (audio-video) emotion recognition system. For audio sub-system, a feature set comprised of prosodic, spectral and cepstrum features is selected and support vector classifier is used to produce the scores for each emotional category. For video sub-system a novel approach is presented, which does not rely on the tracking of specific facial landmarks and thus, eliminates the problems usually caused, if the tracking algorithm fails at detecting the correct area. The system is evaluated on the eNTERFACE database and the recognition accuracy of our audio-video fusion is compared to the published results in the literature. |
Rok Gajšek; Vitomir Štruc; France Mihelič Multi-modal Emotion Recognition based on the Decoupling of Emotion and Speaker Information Conference Proceedings of Text, Speech and Dialogue (TSD), 6231/2010 , Lecture Notes on Computer Science Springer-Verlag, Berlin, Heidelberg, 2010. @conference{TSD_Emo_Gajsek, title = {Multi-modal Emotion Recognition based on the Decoupling of Emotion and Speaker Information}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/TSDEmo.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of Text, Speech and Dialogue (TSD)}, volume = {6231/2010}, pages = {275-282}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The standard features used in emotion recognition carry, besides the emotion related information, also cues about the speaker. This is expected, since the nature of emotionally colored speech is similar to the variations in the speech signal, caused by different speakers. Therefore, we present a gradient descent derived transformation for the decoupling of emotion and speaker information contained in the acoustic features. The Interspeech ’09 Emotion Challenge feature set is used as the baseline for the audio part. A similar procedure is employed on the video signal, where the nuisance attribute projection (NAP) is used to derive the transformation matrix, which contains information about the emotional state of the speaker. Ultimately, different NAP transformation matrices are compared using canonical correlations. The audio and video sub-systems are combined at the matching score level using different fusion techniques. The presented system is assessed on the publicly available eNTERFACE’05 database where significant improvements in the recognition performance are observed when compared to the stat-of-the-art baseline. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } The standard features used in emotion recognition carry, besides the emotion related information, also cues about the speaker. This is expected, since the nature of emotionally colored speech is similar to the variations in the speech signal, caused by different speakers. Therefore, we present a gradient descent derived transformation for the decoupling of emotion and speaker information contained in the acoustic features. The Interspeech ’09 Emotion Challenge feature set is used as the baseline for the audio part. A similar procedure is employed on the video signal, where the nuisance attribute projection (NAP) is used to derive the transformation matrix, which contains information about the emotional state of the speaker. Ultimately, different NAP transformation matrices are compared using canonical correlations. The audio and video sub-systems are combined at the matching score level using different fusion techniques. The presented system is assessed on the publicly available eNTERFACE’05 database where significant improvements in the recognition performance are observed when compared to the stat-of-the-art baseline. |
Vitomir Štruc; Jerneja Žganec-Gros; Nikola Pavešić Eye Localization using correlation filters Inproceedings In: Proceedings of the International Conference DOGS, pp. 188-191, Novi Sad, Serbia, 2010. @inproceedings{DOGS_Struc_2010, title = {Eye Localization using correlation filters}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c}}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference DOGS}, pages = {188-191}, address = {Novi Sad, Serbia}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
2009 |
Vitomir Štruc; Rok Gajšek; Nikola Pavešić Principal Gabor Filters for Face Recognition Conference Proceedings of the 3rd IEEE International Conference on Biometrics: Theory, Systems and Applications (BTAS'09), IEEE, Washington D.C., U.S.A., 2009. @conference{BTAS2009, title = {Principal Gabor Filters for Face Recognition}, author = {Vitomir \v{S}truc and Rok Gaj\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BTAS.pdf}, doi = {10.1109/BTAS.2009.5339020}, year = {2009}, date = {2009-09-01}, booktitle = {Proceedings of the 3rd IEEE International Conference on Biometrics: Theory, Systems and Applications (BTAS'09)}, pages = {1-6}, publisher = {IEEE}, address = {Washington D.C., U.S.A.}, abstract = {Gabor filters have proven themselves to be a powerful tool for facial feature extraction. An abundance of recognition techniques presented in the literature exploits these filters to achieve robust face recognition. However, while exhibiting desirable properties, such as orientational selectivity or spatial locality, Gabor filters have also some shortcomings which crucially affect the characteristics and size of the Gabor representation of a given face pattern. Amongst these shortcomings the fact that the filters are not orthogonal one to another and are, hence, correlated is probably the most important. This makes the information contained in the Gabor face representation redundant and also affects the size of the representation. To overcome this problem we propose in this paper to employ orthonormal linear combinations of the original Gabor filters rather than the filters themselves for deriving the Gabor face representation. The filters, named principal Gabor filters for the fact that they are computed by means of principal component analysis, are assessed in face recognition experiments performed on the XM2VTS and YaleB databases, where encouraging results are achieved.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Gabor filters have proven themselves to be a powerful tool for facial feature extraction. An abundance of recognition techniques presented in the literature exploits these filters to achieve robust face recognition. However, while exhibiting desirable properties, such as orientational selectivity or spatial locality, Gabor filters have also some shortcomings which crucially affect the characteristics and size of the Gabor representation of a given face pattern. Amongst these shortcomings the fact that the filters are not orthogonal one to another and are, hence, correlated is probably the most important. This makes the information contained in the Gabor face representation redundant and also affects the size of the representation. To overcome this problem we propose in this paper to employ orthonormal linear combinations of the original Gabor filters rather than the filters themselves for deriving the Gabor face representation. The filters, named principal Gabor filters for the fact that they are computed by means of principal component analysis, are assessed in face recognition experiments performed on the XM2VTS and YaleB databases, where encouraging results are achieved. |
Rok Gajšek; Vitomir Štruc; Simon Dobrišek; France Mihelič Emotion recognition using linear transformations in combination with video Conference Speech and intelligence: proceedings of Interspeech 2009, Brighton, UK, 2009. @conference{InterSp2009, title = {Emotion recognition using linear transformations in combination with video}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and Simon Dobri\v{s}ek and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InSP.pdf}, year = {2009}, date = {2009-09-01}, booktitle = {Speech and intelligence: proceedings of Interspeech 2009}, pages = {1967-1970}, address = {Brighton, UK}, abstract = {The paper discuses the usage of linear transformations of Hidden Markov Models, normally employed for speaker and environment adaptation, as a way of extracting the emotional components from the speech. A constrained version of Maximum Likelihood Linear Regression (CMLLR) transformation is used as a feature for classification of normal or aroused emotional state. We present a procedure of incrementally building a set of speaker independent acoustic models, that are used to estimate the CMLLR transformations for emotion classification. An audio-video database of spontaneous emotions (AvID) is briefly presented since it forms the basis for the evaluation of the proposed method. Emotion classification using the video part of the database is also described and the added value of combining the visual information with the audio features is shown.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The paper discuses the usage of linear transformations of Hidden Markov Models, normally employed for speaker and environment adaptation, as a way of extracting the emotional components from the speech. A constrained version of Maximum Likelihood Linear Regression (CMLLR) transformation is used as a feature for classification of normal or aroused emotional state. We present a procedure of incrementally building a set of speaker independent acoustic models, that are used to estimate the CMLLR transformations for emotion classification. An audio-video database of spontaneous emotions (AvID) is briefly presented since it forms the basis for the evaluation of the proposed method. Emotion classification using the video part of the database is also described and the added value of combining the visual information with the audio features is shown. |