@article{kravanja2016robust, title = {Robust Depth Image Acquisition Using Modulated Pattern Projection and Probabilistic Graphical Models}, author = { Jaka Kravanja and Mario \v{Z}ganec and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/11/sensors-16-01740-1.pdf}, doi = {10.3390/s16101740}, year = {2016}, date = {2016-10-20}, journal = {Sensors}, volume = {16}, number = {10}, pages = {1740}, publisher = {Multidisciplinary Digital Publishing Institute}, abstract = {Depth image acquisition with structured light approaches in outdoor environments is a challenging problem due to external factors, such as ambient sunlight, which commonly affect the acquisition procedure. This paper presents a novel structured light sensor designed specifically for operation in outdoor environments. The sensor exploits a modulated sequence of structured light projected onto the target scene to counteract environmental factors and estimate a spatial distortion map in a robust manner. The correspondence between the projected pattern and the estimated distortion map is then established using a probabilistic framework based on graphical models. Finally, the depth image of the target scene is reconstructed using a number of reference frames recorded during the calibration process. We evaluate the proposed sensor on experimental data in indoor and outdoor environments and present comparative experiments with other existing methods, as well as commercial sensors.}, keywords = {3d imaging, 3d sensor, depth imaging, depth sensor, graphical models, modulated pattern projection, outdoor deployment, robust operation, Sensors, structured light}, pubstate = {published}, tppubtype = {article} } @conference{BTAS2016, title = {Report on the BTAS 2016 Video Person Recognition Evaluation}, author = {Walter Scheirer and Patrick Flynn and Changxing Ding and Guodong Guo and Vitomir \v{S}truc and Mohamad Al Jazaery and Simon Dobri\v{s}ek and Klemen Grm and Dacheng Tao and Yu Zhu and Joel Brogan and Sandipan Banerjee and Aparna Bharati and Brandon Richard Webster}, year = {2016}, date = {2016-10-05}, booktitle = {Proceedings of the IEEE International Conference on Biometrics: Theory, Applications ans Systems (BTAS)}, publisher = {IEEE}, abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 8th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS). Two experiments required algorithms to recognize people in videos from the Pointand- Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1,401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. An additional experiment required algorithms to recognize people in videos from the Video Database of Moving Faces and People (VDMFP). There were 958 videos in this experiment of 297 subjects. Four groups from around the world participated in the evaluation. The top verification rate for PaSC from this evaluation is 0:98 at a false accept rate of 0:01 \textemdash a remarkable advancement in performance from the competition held at FG 2015.}, keywords = {biometrics, competition, face recognition, group evaluation, PaSC, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @inproceedings{ERK2016Janez, title = {Facial Landmark Localization from 3D Images}, author = {Janez Kri\v{z}aj and Simon Dobri\v{s}ek and France Miheli\v{c} and Vitomir \v{S}truc}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, abstract = {A novel method for automatic facial landmark localization is presented. The method builds on the supervised descent framework, which was shown to successfully localize landmarks in the presence of large expression variations and mild occlusions, but struggles when localizing landmarks on faces with large pose variations. We propose an extension of the supervised descent framework which trains multiple descent maps and results in increased robustness to pose variations. The performance of the proposed method is demonstrated on the Bosphorus database for the problem of facial landmark localization from 3D data. Our experimental results show that the proposed method exhibits increased robustness to pose variations, while retaining high performance in the case of expression and occlusion variations.}, keywords = {3D face data, 3d landmarking, Bosphorus, face alignment, face image processing, facial landmarking, SDM, supervised descent framework}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ERK2016_Seba, title = {Vpliv registracije obraznih podro\v{c}ij na u\v{c}inkovitost samodejnega razpoznavanja obrazov: \v{s}tudija z OpenBR}, author = {Sebastjan Fabijan and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/erk_2016_08_22.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, abstract = {Razpoznavanje obrazov je v zadnjih letih postalo eno najuspe\v{s}nej\v{s}ih podro\v{c}ij samodejne, ra\v{c}unalni\v{s}ko podprte analize slik, ki se lahko pohvali z razli\v{c}nimi primeri upor-abe v praksi. Enega klju\v{c}nih korakav za uspe\v{s}no razpoznavanje predstavlja poravnava obrazov na slikah. S poravnavo posku\v{s}amo zagotoviti neodvisnost razpozn-av-an-ja od sprememb zornih kotov pri zajemu slike, ki v slikovne podatke vna\v{s}ajo visoko stopnjo variabilnosti. V prispevku predstavimo tri postopke poravnavanja obrazov (iz literature) in prou\v{c}imo njihov vpliv na uspe\v{s}nost razpoznavanja s postopki, udejanjenimi v odprtokodnem programskem ogrodju Open Source Biometric Recognition (OpenBR). Vse poizkuse izvedemo na podatkovni zbirki Labeled Faces in the Wild (LFW).}, keywords = {4SF, biometrics, face alignment, face recognition, LFW, OpenBR, performance evaluation}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ERK2016_sebastjan, title = {U\v{c}enje podobnosti v globokih nevronskih omre\v{z}jih za razpoznavanje obrazov}, author = {\v{Z}iga Str\v{z}inar and Klemen Grm and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/erk_ziga_Vziga.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, abstract = {U\v{c}enje podobnosti med pari vhodnih slik predstavlja enega najpopularnej\v{s}ih pristopov k razpoznavanju na podro\v{c}ju globokega u\v{c}enja. Pri tem pristopu globoko nevronsko omre\v{z}je na vhodu sprejme par slik (obrazov) in na izhodu vrne mero podobnosti med vhodnima slikama, ki jo je mo\v{c} uporabiti za razpoznavanje. Izra\v{c}un podobnosti je pri tem lahko v celoti udejanjen z globokim omre\v{z}jem, lahko pa se omre\v{z}je uporabi zgolj za izra\v{c}un predstavitve vhodnega para slik, preslikava iz izra\v{c}unane predstavitve v mero podobnosti pa se izvede z drugim, potencialno primernej\v{s}im modelom. V tem prispevku preizkusimo 5 razli\v{c}nih modelov za izvedbo preslikave med izra\v{c}unano predstavitvijo in mero podobnosti, pri \v{c}emer za poizkuse uporabimo lastno nevronsko omre\v{z}je. Rezultati na\v{s}ih eksperimentov na problemu razpoznavanja obrazov ka\v{z}ejo na pomembnost izbire primernega modela, saj so razlike med uspe\v{s}nostjo razpoznavanje od modela do modela precej\v{s}nje.}, keywords = {biometrics, CNN, deep learning, difference space, face verification, LFW, performance evaluation}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SJDT, title = {Assessment of the Google Speech Application Programming Interface for Automatic Slovenian Speech Recognition}, author = {Simon Dobri\v{s}ek and David \v{C}efarin and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/jtdh16-ulfe-luks-sd-final-pdfa.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Jezikovne Tehnologije in Digitalna Humanistika}, abstract = {Automatic speech recognizers are slowly maturing into technologies that enable humans to communicate more naturally and effectively with a variety of smart devices and information-communication systems. Large global companies such as Google, Microsoft, Apple, IBM and Baidu compete in developing the most reliable speech recognizers, supporting as many of the main world languages as possible. Due to the relatively small number of speakers, the support for the Slovenian spoken language is lagging behind, and among the major global companies only Google has recently supported our spoken language. The paper presents the results of our independent assessment of the Google speech-application programming interface for automatic Slovenian speech recognition. For the experiments, we used speech databases that are otherwise used for the development and assessment of Slovenian speech recognizers.}, keywords = {Google, performance evaluation, speech API, speech technologies}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{RibicERK2016, title = {Influence of alignment on ear recognition : case study on AWE Dataset}, author = {Metod Ribi\v{c} and \v{Z}iga Emer\v{s}i\v{c} and Vitomir \v{S}truc and Peter Peer }, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Influence_of_Alignment_on_Ear_Recognitio.pdf}, year = {2016}, date = {2016-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, pages = {131-134}, address = {Portoro\v{z}, Slovenia}, abstract = {Ear as a biometric modality presents a viable source for automatic human recognition. In recent years local description methods have been gaining on popularity due to their invariance to illumination and occlusion. However, these methods require that images are well aligned and preprocessed as good as possible. This causes one of the greatest challenges of ear recognition: sensitivity to pose variations. Recently, we presented Annotated Web Ears dataset that opens new challenges in ear recognition. In this paper we test the influence of alignment on recognition performance and prove that even with the alignment the database is still very challenging, even-though the recognition rate is improved due to alignment. We also prove that more sophisticated alignment methods are needed to address the AWE dataset efficiently}, keywords = {AWE, AWE dataset, biometrics, ear alignment, ear recognition, image alignment, Ransac, SIFT}, pubstate = {published}, tppubtype = {inproceedings} } @article{kravanja2016exploiting, title = {Exploiting Spatio-Temporal Information for Light-Plane Labeling in Depth-Image Sensors Using Probabilistic Graphical Models}, author = { Jaka Kravanja and Mario \v{Z}ganec and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/11/jaka_informatica_camera.pdf}, year = {2016}, date = {2016-03-30}, journal = {Informatica}, volume = {27}, number = {1}, pages = {67--84}, publisher = {Vilnius University Institute of Mathematics and Informatics}, abstract = {This paper proposes a novel approach to light plane labeling in depth-image sensors relying on “uncoded” structured light. The proposed approach adopts probabilistic graphical models (PGMs) to solve the correspondence problem between the projected and the detected light patterns. The procedure for solving the correspondence problem is designed to take the spatial relations between the parts of the projected pattern and prior knowledge about the structure of the pattern into account, but it also exploits temporal information to achieve reliable light-plane labeling. The procedure is assessed on a database of light patterns detected with a specially developed imaging sensor that, unlike most existing solutions on the market, was shown to work reliably in outdoor environments as well as in the presence of other identical (active) sensors directed at the same scene. The results of our experiments show that the proposed approach is able to reliably solve the correspondence problem and assign light-plane labels to the detected pattern with a high accuracy, even when large spatial discontinuities are present in the observed scene.}, keywords = {3d imaging, correspondance, depth imaging, depth sensing, depth sensor, graphical models, sensor, structured light}, pubstate = {published}, tppubtype = {article} } @conference{grm2016deep, title = {Deep pair-wise similarity learning for face recognition}, author = { Klemen Grm and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IWBF_2016.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {4th International Workshop on Biometrics and Forensics (IWBF)}, pages = {1--6}, organization = {IEEE}, abstract = {Recent advances in deep learning made it possible to build deep hierarchical models capable of delivering state-of-the-art performance in various vision tasks, such as object recognition, detection or tracking. For recognition tasks the most common approach when using deep models is to learn object representations (or features) directly from raw image-input and then feed the learned features to a suitable classifier. Deep models used in this pipeline are typically heavily parameterized and require enormous amounts of training data to deliver competitive recognition performance. Despite the use of data augmentation techniques, many application domains, predefined experimental protocols or specifics of the recognition problem limit the amount of available training data and make training an effective deep hierarchical model a difficult task. In this paper, we present a novel, deep pair-wise similarity learning (DPSL) strategy for deep models, developed specifically to overcome the problem of insufficient training data, and demonstrate its usage on the task of face recognition. Unlike existing (deep) learning strategies, DPSL operates on image-pairs and tries to learn pair-wise image similarities that can be used for recognition purposes directly instead of feature representations that need to be fed to appropriate classification techniques, as with traditional deep learning pipelines. Since our DPSL strategy assumes an image pair as the input to the learning procedure, the amount of training data available to train deep models is quadratic in the number of available training images, which is of paramount importance for models with a large number of parameters. We demonstrate the efficacy of the proposed learning strategy by developing a deep model for pose-invariant face recognition, called Pose-Invariant Similarity Index (PISI), and presenting comparative experimental results on the FERET an IJB-A datasets.}, keywords = {CNN, deep learning, face recognition, IJB-A, IWBF, performance evaluation, similarity learning}, pubstate = {published}, tppubtype = {conference} } @incollection{ERK2015Klemen, title = {The pose-invariant similarity index for face recognition}, author = {Klemen Grm and Simon Dobri\v{s}ek and Vitomir \v{S}truc }, year = {2015}, date = {2015-04-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK)}, address = {Portoro\v{z}, Slovenia}, keywords = {biometrics, CNN, deep learning, deep models, face verification, similarity learning}, pubstate = {published}, tppubtype = {incollection} } @conference{struc2015modest, title = {Modest face recognition}, author = { Vitomir \v{S}truc and Janez Kri\v{z}aj and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IWBF2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the International Workshop on Biometrics and Forensics (IWBF)}, pages = {1--6}, publisher = {IEEE}, abstract = {The facial imagery usually at the disposal for forensics investigations is commonly of a poor quality due to the unconstrained settings in which it was acquired. The captured faces are typically non-frontal, partially occluded and of a low resolution, which makes the recognition task extremely difficult. In this paper we try to address this problem by presenting a novel framework for face recognition that combines diverse features sets (Gabor features, local binary patterns, local phase quantization features and pixel intensities), probabilistic linear discriminant analysis (PLDA) and data fusion based on linear logistic regression. With the proposed framework a matching score for the given pair of probe and target images is produced by applying PLDA on each of the four feature sets independently - producing a (partial) matching score for each of the PLDA-based feature vectors - and then combining the partial matching results at the score level to generate a single matching score for recognition. We make two main contributions in the paper: i) we introduce a novel framework for face recognition that relies on probabilistic MOdels of Diverse fEature SeTs (MODEST) to facilitate the recognition process and ii) benchmark it against the existing state-of-the-art. We demonstrate the feasibility of our MODEST framework on the FRGCv2 and PaSC databases and present comparative results with the state-of-the-art recognition techniques, which demonstrate the efficacy of our framework.}, keywords = {biometrics, face verification, Gabor features, image descriptors, LBP, multi modality, PaSC, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @conference{beveridge2015report, title = {Report on the FG 2015 video person recognition evaluation}, author = {Ross Beveridge and Hao Zhang and Bruce A Draper and Patrick J Flynn and Zhenhua Feng and Patrik Huber and Josef Kittler and Zhiwu Huang and Shaoxin Li and Yan Li and Vitomir \v{S}truc and Janez Kri\v{z}aj and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/fg2015videoEvalPreprint.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG)}, volume = {1}, pages = {1--8}, organization = {IEEE}, abstract = {This report presents results from the Video Person Recognition Evaluation held in conjunction with the 11th IEEE International Conference on Automatic Face and Gesture Recognition. Two experiments required algorithms to recognize people in videos from the Point-and-Shoot Face Recognition Challenge Problem (PaSC). The first consisted of videos from a tripod mounted high quality video camera. The second contained videos acquired from 5 different handheld video cameras. There were 1401 videos in each experiment of 265 subjects. The subjects, the scenes, and the actions carried out by the people are the same in both experiments. Five groups from around the world participated in the evaluation. The video handheld experiment was included in the International Joint Conference on Biometrics (IJCB) 2014 Handheld Video Face and Person Recognition Competition. The top verification rate from this evaluation is double that of the top performer in the IJCB competition. Analysis shows that the factor most effecting algorithm performance is the combination of location and action: where the video was acquired and what the person was doing.}, keywords = {biometrics, competition, face verification, FG, group evaluation, PaSC, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @conference{justin2015speaker, title = {Speaker de-identification using diphone recognition and speech synthesis}, author = { Tadej Justin and Vitomir \v{S}truc and Simon Dobri\v{s}ek and Bo\v{s}tjan Vesnicer and Ivo Ip\v{s}i\'{c} and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Deid2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): DeID 2015}, volume = {4}, pages = {1--7}, organization = {IEEE}, abstract = {The paper addresses the problem of speaker (or voice) de-identification by presenting a novel approach for concealing the identity of speakers in their speech. The proposed technique first recognizes the input speech with a diphone recognition system and then transforms the obtained phonetic transcription into the speech of another speaker with a speech synthesis system. Due to the fact that a Diphone RecOgnition step and a sPeech SYnthesis step are used during the deidentification, we refer to the developed technique as DROPSY. With this approach the acoustical models of the recognition and synthesis modules are completely independent from each other, which ensures the highest level of input speaker deidentification. The proposed DROPSY-based de-identification approach is language dependent, text independent and capable of running in real-time due to the relatively simple computing methods used. When designing speaker de-identification technology two requirements are typically imposed on the deidentification techniques: i) it should not be possible to establish the identity of the speakers based on the de-identified speech, and ii) the processed speech should still sound natural and be intelligible. This paper, therefore, implements the proposed DROPSY-based approach with two different speech synthesis techniques (i.e, with the HMM-based and the diphone TDPSOLA- based technique). The obtained de-identified speech is evaluated for intelligibility and evaluated in speaker verification experiments with a state-of-the-art (i-vector/PLDA) speaker recognition system. The comparison of both speech synthesis modules integrated in the proposed method reveals that both can efficiently de-identify the input speakers while still producing intelligible speech.}, keywords = {DEID, FG, speech deidentification, speech recognition, speech synthesis, speech technologies}, pubstate = {published}, tppubtype = {conference} } @conference{dobrivsek2015face, title = {Face recognition in the wild with the Probabilistic Gabor-Fisher Classifier}, author = { Simon Dobri\v{s}ek and Vitomir \v{S}truc and Janez Kri\v{z}aj and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Bwild2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (IEEE FG): BWild 2015}, volume = {2}, pages = {1--6}, organization = {IEEE}, abstract = {The paper addresses the problem of face recognition in the wild. It introduces a novel approach to unconstrained face recognition that exploits Gabor magnitude features and a simplified version of the probabilistic linear discriminant analysis (PLDA). The novel approach, named Probabilistic Gabor-Fisher Classifier (PGFC), first extracts a vector of Gabor magnitude features from the given input image using a battery of Gabor filters, then reduces the dimensionality of the extracted feature vector by projecting it into a low-dimensional subspace and finally produces a representation suitable for identity inference by applying PLDA to the projected feature vector. The proposed approach extends the popular Gabor-Fisher Classifier (GFC) to a probabilistic setting and thus improves on the generalization capabilities of the GFC method. The PGFC technique is assessed in face verification experiments on the Point and Shoot Face Recognition Challenge (PaSC) database, which features real-world videos of subjects performing everyday tasks. Experimental results on this challenging database show the feasibility of the proposed approach, which improves on the best results on this database reported in the literature by the time of writing.}, keywords = {biometrics, BWild, FG, Gabor features, PaSC, plda, probabilistic Gabor Fisher classifier, probabilistic linear discriminant analysis}, pubstate = {published}, tppubtype = {conference} } @conference{justin2015development, title = {Development and Evaluation of the Emotional Slovenian Speech Database-EmoLUKS}, author = { Tadej Justin and Vitomir \v{S}truc and Janez \v{Z}ibert and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/tsd2015.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the International Conference on Text, Speech, and Dialogue (TSD)}, pages = {351--359}, organization = {Springer}, abstract = {This paper describes a speech database built from 17 Slovenian radio dramas. The dramas were obtained from the national radio-and-television station (RTV Slovenia) and were given at the universities disposal with an academic license for processing and annotating the audio material. The utterances of one male and one female speaker were transcribed, segmented and then annotated with emotional states of the speakers. The annotation of the emotional states was conducted in two stages with our own web-based application for crowd sourcing. The final (emotional) speech database consists of 1385 recordings of one male (975 recordings) and one female (410 recordings) speaker and contains labeled emotional speech with a total duration of around 1 hour and 15 minutes. The paper presents the two-stage annotation process used to label the data and demonstrates the usefulness of the employed annotation methodology. Baseline emotion recognition experiments are also presented. The reported results are presented with the un-weighted as well as weighted average recalls and precisions for 2-class and 7-class recognition experiments.}, keywords = {annotated data, dataset, dataset of emotional speech, EmoLUKS, emotional speech synthesis, speech synthesis, speech technologies, transcriptions}, pubstate = {published}, tppubtype = {conference} } @conference{cihan2015facial, title = {Facial Landmark Localization in Depth Images using Supervised Ridge Descent}, author = { Necati Cihan Camgoz and Vitomir \v{S}truc and Berk Gokberk and Lale Akarun and Ahmet Alp Kindiroglu}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Camgoz_Facial_Landmark_Localization_ICCV_2015_paper.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the IEEE International Conference on Computer Vision Workshops (ICCVW): Chaa Learn}, pages = {136--141}, abstract = {Supervised Descent Method (SDM) has proven successful in many computer vision applications such as face alignment, tracking and camera calibration. Recent studies which used SDM, achieved state of the-art performance on facial landmark localization in depth images [4]. In this study, we propose to use ridge regression instead of least squares regression for learning the SDM, and to change feature sizes in each iteration, effectively turning the landmark search into a coarse to fine process. We apply the proposed method to facial landmark localization on the Bosphorus 3D Face Database; using frontal depth images with no occlusion. Experimental results confirm that both ridge regression and using adaptive feature sizes improve the localization accuracy considerably}, keywords = {3d landmarking, facial landmarking, landmark localization, landmarking, ridge regression, SDM}, pubstate = {published}, tppubtype = {conference} } @article{peer2014strategies, title = {Strategies for exploiting independent cloud implementations of biometric experts in multibiometric scenarios}, author = { Peter Peer and \v{Z}iga Emer\v{s}i\v{c} and Jernej Bule and Jerneja \v{Z}ganec-Gros and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/585139-1.pdf}, doi = {http://dx.doi.org/10.1155/2014/585139}, year = {2014}, date = {2014-01-01}, journal = {Mathematical problems in engineering}, volume = {2014}, publisher = {Hindawi Publishing Corporation}, abstract = {Cloud computing represents one of the fastest growing areas of technology and offers a new computing model for various applications and services. This model is particularly interesting for the area of biometric recognition, where scalability, processing power, and storage requirements are becoming a bigger and bigger issue with each new generation of recognition technology. Next to the availability of computing resources, another important aspect of cloud computing with respect to biometrics is accessibility. Since biometric cloud services are easily accessible, it is possible to combine different existing implementations and design new multibiometric services that next to almost unlimited resources also offer superior recognition performance and, consequently, ensure improved security to its client applications. Unfortunately, the literature on the best strategies of how to combine existing implementations of cloud-based biometric experts into a multibiometric service is virtually nonexistent. In this paper, we try to close this gap and evaluate different strategies for combining existing biometric experts into a multibiometric cloud service. We analyze the (fusion) strategies from different perspectives such as performance gains, training complexity, or resource consumption and present results and findings important to software developers and other researchers working in the areas of biometrics and cloud computing. The analysis is conducted based on two biometric cloud services, which are also presented in the paper.}, keywords = {application, biometrics, cloud computing, face recognition, fingerprint recognition, fusion}, pubstate = {published}, tppubtype = {article} } @article{struc2014beyond, title = {Beyond parametric score normalisation in biometric verification systems}, author = { Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Bo\v{s}tjan Vesnicer and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IET_Vito.pdf}, doi = {10.1049/iet-bmt.2013.0076}, year = {2014}, date = {2014-01-01}, journal = {IET Biometrics}, volume = {3}, number = {2}, pages = {62--74}, publisher = {IET}, abstract = {Similarity scores represent the basis for identity inference in biometric verification systems. However, because of the so-called miss-matched conditions across enrollment and probe samples and identity-dependent factors these scores typically exhibit statistical variations that affect the verification performance of biometric systems. To mitigate these variations, scorenormalisation techniques, such as the z-norm, the t-norm or the zt-norm, are commonly adopted. In this study, the authors study the problem of score normalisation in the scope of biometric verification and introduce a new class of non-parametric normalisation techniques, which make no assumptions regarding the shape of the distribution from which the scores are drawn (as the parametric techniques do). Instead, they estimate the shape of the score distribution and use the estimate to map the initial distribution to a common (predefined) distribution. Based on the new class of normalisation techniques they also develop a hybrid normalisation scheme that combines non-parametric and parametric techniques into hybrid two-step procedures. They evaluate the performance of the non-parametric and hybrid techniques in face-verification experiments on the FRGCv2 and SCFace databases and show that the non-parametric techniques outperform their parametric counterparts and that the hybrid procedure is not only feasible, but also retains some desirable characteristics from both the non-parametric and the parametric techniques.}, keywords = {biometrics, face verification, hybrid score normalization, score normalization, t-norm, tz-norm, z-norm, zt-norm}, pubstate = {published}, tppubtype = {article} } @article{emersic2014case, title = {A case study on multi-modal biometrics in the cloud}, author = { \v{Z}iga Emer\v{s}i\v{c} and Jernej Bule and Jerneja \v{Z}ganec-Gros and Vitomir \v{S}truc and Peter Peer}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Emersic.pdf}, year = {2014}, date = {2014-01-01}, journal = {Electrotechnical Review}, volume = {81}, number = {3}, pages = {74}, publisher = {Elektrotehniski Vestnik}, abstract = {Cloud computing is particularly interesting for the area of biometric recognition, where scalability, availability and accessibility are important aspects. In this paper we try to evaluate different strategies for combining existing uni-modal (cloud-based) biometric experts into a multi-biometric cloud-service. We analyze several fusion strategies from the perspective of performance gains, training complexity and resource consumption and discuss the results of our analysis. The experimental evaluation is conducted based on two biometric cloud-services developed in the scope of the Competence Centere CLASS, a face recognition service and a fingerprint recognition service, which are also briefly described in the paper. The presented results are important to researchers and developers working in the area of biometric services for the cloud looking for easy solutions for improving the quality of their services. }, keywords = {cloud, cloud computing, face recognition, face verification, fingerprint verification, fingerprints, fusion}, pubstate = {published}, tppubtype = {article} } @conference{krivzaj2014feasibility, title = {A Feasibility Study on the Use of Binary Keypoint Descriptors for 3D Face Recognition}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/MCPR2014.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the Mexican Conference on Pattern Recognition (MCPR)}, pages = {142--151}, organization = {Springer}, abstract = {Despite the progress made in the area of local image descriptors in recent years, virtually no literature is available on the use of more recent descriptors for the problem of 3D face recognition, such as BRIEF, ORB, BRISK or FREAK, which are binary in nature and, therefore, tend to be faster to compute and match, while requiring signi cantly less memory for storage than, for example, SIFT or SURF. In this paper, we try to close this gap and present a feasibility study on the use of these descriptors for 3D face recognition. Descriptors are evaluated on the three challenging 3D face image datasets, namely, the FRGC, UMB and CASIA. Our experiments show the binary descriptors ensure slightly lower veri cation rates than SIFT, comparable to those of the SURF descriptor, while being an order of magnitude faster than SIFT. The results suggest that the use of binary descriptors represents a viable alternative to the established descriptors.}, keywords = {3d face recognition, binary descriptors, biometrics, BRISK, CASIA, face verification, FREAK, FRGC, MCPR, ORB, performance evaluation, SIFT, SURF}, pubstate = {published}, tppubtype = {conference} } @inproceedings{krivzaj2014sift, title = {SIFT vs. FREAK: Assessing the usefulness of two keypoint descriptors for 3D face verification}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek and Darijan Mar\v{c}eti\'{c} and Slobodan Ribari\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/MIPRO2014a.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO) }, pages = {1336--1341}, address = {Opatija, Croatia}, organization = {Mipro}, abstract = {Many techniques in the area of 3D face recognition rely on local descriptors to characterize the surface-shape information around points of interest (or keypoints) in the 3D images. Despite the fact that a lot of advancements have been made in the area of keypoint descriptors over the last years, the literature on 3D-face recognition for the most part still focuses on established descriptors, such as SIFT and SURF, and largely neglects more recent descriptors, such as the FREAK descriptor. In this paper we try to bridge this gap and assess the usefulness of the FREAK descriptor for the task for 3D face recognition. Of particular interest to us is a direct comparison of the FREAK and SIFT descriptors within a simple verification framework. To evaluate our framework with the two descriptors, we conduct 3D face recognition experiments on the challenging FRGCv2 and UMBDB databases and show that the FREAK descriptor ensures a very competitive verification performance when compared to the SIFT descriptor, but at a fraction of the computational cost. Our results indicate that the FREAK descriptor is a viable alternative to the SIFT descriptor for the problem of 3D face verification and due to its binary nature is particularly useful for real-time recognition systems and verification techniques for low-resource devices such as mobile phones, tablets and alike.}, keywords = {3d face recognition, binary descriptors, face recognition, FREAK, performance comparison, performance evaluation, SIFT}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{marcetic2014experimental, title = {An experimental tattoo de-identification system for privacy protection in still images}, author = { Darijan Mar\v{c}eti\'{c} and Slobodan Ribari\'{c} and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/mipro_tatoo.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {37th International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)}, pages = {1288--1293}, publisher = {IEEE}, organization = {Mipro}, abstract = {An experimental tattoo de-identification system for privacy protection in still images is described in the paper. The system consists of the following modules: skin detection, region of interest detection, feature extraction, tattoo database, matching, tattoo detection, skin swapping, and quality evaluation. Two methods for tattoo localization are presented. The first is a simple ad-hoc method based only on skin colour. The second is based on skin colour, texture and SIFT features. The appearance of each tattoo area is de-identified in such a way that its skin colour and skin texture are similar to the surrounding skin area. Experimental results for still images in which tattoo location, distance, size, illumination, and motion blur have large variability are presented. The system is subjectively evaluated based on the results of tattoo localization, the level of privacy protection and the naturalness of the de-identified still images. The level of privacy protection is estimated based on the quality of the removal of the tattoo appearance and the concealment of its location. }, keywords = {computer vision, deidentification, MIPRO, privacy protection, tattoo deidentification}, pubstate = {published}, tppubtype = {inproceedings} } @conference{vesnicer2014incorporating, title = {Incorporating Duration Information into I-Vector-Based Speaker-Recognition Systems}, author = { Bo\v{s}tjan Vesnicer and Jerneja \v{Z}ganec-Gros and Simon Dobri\v{s}ek and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Odyssey.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of Odyssey: The Speaker and Language Recognition Workshop}, pages = {241--248}, abstract = {Most of the existing literature on i-vector-based speaker recognition focuses on recognition problems, where i-vectors are extracted from speech recordings of sufficient length. The majority of modeling/recognition techniques therefore simply ignores the fact that the i-vectors are most likely estimated unreliably when short recordings are used for their computation. Only recently, were a number of solutions proposed in the literature to address the problem of duration variability, all treating the i-vector as a random variable whose posterior distribution can be parameterized by the posterior mean and the posterior covariance. In this setting the covariance matrix serves as a measure of uncertainty that is related to the length of the available recording. In contract to these solutions, we address the problem of duration variability through weighted statistics. We demonstrate in the paper how established feature transformation techniques regularly used in the area of speaker recognition, such as PCA or WCCN, can be modified to take duration into account. We evaluate our weighting scheme in the scope of the i-vector challenge organized as part of the Odyssey, Speaker and Language Recognition Workshop 2014 and achieve a minimal DCF of 0.280, which at the time of writing puts our approach in third place among all the participating institutions.}, keywords = {acustic features, biometrics, duration, duration modeling, i-vector, i-vector challenge, Odyssey, performance evaluation, speaker recognition, speech technologies}, pubstate = {published}, tppubtype = {conference} } @conference{beveridge2014ijcb, title = {The ijcb 2014 pasc video face and person recognition competition}, author = {Ross Beveridge and Hao Zhang and Patrick Flynn and Yooyoung Lee and Venice Erin Liong and Jiwen Lu and Marcus de Assis Angeloni and Tiago de Freitas Pereira and Haoxiang Li and Gang Hua and Vitomir \v{S}truc and Janez Kri\v{z}aj and Jonathon Phillips}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IJCB2014.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the IEEE International Joint Conference on Biometrics (IJCB)}, pages = {1--8}, organization = {IEEE}, abstract = {The Point-and-Shoot Face Recognition Challenge (PaSC) is a performance evaluation challenge including 1401 videos of 265 people acquired with handheld cameras and depicting people engaged in activities with non-frontal head pose. This report summarizes the results from a competition using this challenge problem. In the Video-to-video Experiment a person in a query video is recognized by comparing the query video to a set of target videos. Both target and query videos are drawn from the same pool of 1401 videos. In the Still-to-video Experiment the person in a query video is to be recognized by comparing the query video to a larger target set consisting of still images. Algorithm performance is characterized by verification rate at a false accept rate of 0:01 and associated receiver operating characteristic (ROC) curves. Participants were provided eye coordinates for video frames. Results were submitted by 4 institutions: (i) Advanced Digital Science Center, Singapore; (ii) CPqD, Brasil; (iii) Stevens Institute of Technology, USA; and (iv) University of Ljubljana, Slovenia. Most competitors demonstrated video face recognition performance superior to the baseline provided with PaSC. The results represent the best performance to date on the handheld video portion of the PaSC.}, keywords = {biometrics, competition, face recognition, group evaluation, IJCB, PaSC, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @inproceedings{krizajrobust, title = {Robust 3D face recognition using adapted statistical models}, author = {Janez Kri\v{z}aj and Simon Dobri\v{s}ek and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ERK2013b.pdf}, year = {2013}, date = {2013-09-20}, booktitle = {Proceedings of the Electrotechnical and Computer Science Conference (ERK'13)}, abstract = {The paper presents a novel framework to 3D face recognition that exploits region covariance matrices (RCMs), Gaussian mixture models (GMMs) and support vector machine (SVM) classifiers. The proposed framework first combines several 3D face representations at the feature level using RCM descriptors and then derives low-dimensional feature vectors from the computed descriptors with the unscented transform. By doing so, it enables computations in Euclidean space, and makes Gaussian mixture modeling feasible. Finally, a support vector classifier is used for identity inference. As demonstrated by our experimental results on the FRGCv2 and UMB databases, the proposed framework is highly robust and exhibits desirable characteristics such as an inherent mechanism for data fusion (through the RCMs), the ability to examine local as well as global structures of the face with the same descriptor, the ability to integrate domain-specific prior knowledge into the modeling procedure and consequently to handle missing or unreliable data. }, keywords = {3d face recognition, biometrics, covariance descriptor, face verification, FRGC, GMM, modeling, performance evaluation, region-covariance matrix}, pubstate = {published}, tppubtype = {inproceedings} } @article{EV_Struc_2013, title = {Zlivanje informacij za zanseljivo in robustno razpoznavanje obrazov}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c} and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/StrucEV2013.pdf}, year = {2013}, date = {2013-09-01}, journal = {Electrotechnical Review}, volume = {80}, number = {3}, pages = {1-12}, abstract = {The existing face recognition technology has reached a performance level where it is possible to deploy it in various applications providing they are capable of ensuring controlled conditions for the image acquisition procedure. However, the technology still struggles with its recognition performance when deployed in uncontrolled and unconstrained conditions. In this paper, we present a novel approach to face recognition designed specifically for these challenging conditions. The proposed approach exploits information fusion to achieve robustness. In the first step, the approach crops the facial region from each input image in three different ways. It then maps each of the three crops into one of four color representations and finally extracts several feature types from each of the twelve facial representations. The described procedure results in a total of thirty facial representations that are combined at the matching score level using a fusion approach based on linear logistic regression (LLR) to arrive at a robust decision regarding the identity of the subject depicted in the input face image. The presented approach was enlisted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face-recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the approach, elaborate on the results of the competition and, most importantly, present some interesting findings made during our development work that are also of relevance to the research community working in the field of face recognition. }, keywords = {biometrics, face recognition, fusion, performance evaluation}, pubstate = {published}, tppubtype = {article} } @inproceedings{ERK2013_Struc, title = {Exploiting representation plurality for robust and efficient face recognition}, author = {Vitomir \v{S}truc and Jeneja \v{Z}ganec Gros and Simon Dobri\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ERK2013a.pdf}, year = {2013}, date = {2013-09-01}, booktitle = {Proceedings of the 22nd Intenational Electrotechnical and Computer Science Conference (ERK'13)}, volume = {vol. B}, pages = {121--124}, address = {Portoro\v{z}, Slovenia}, abstract = {The paper introduces a novel approach to face recognition that exploits plurality of representation to achieve robust face recognition. The proposed approach was submitted as a representative of the University of Ljubljana and Alpineon d.o.o. to the 2013 face recognition competition that was held in conjunction with the IAPR International Conference on Biometrics and achieved the best overall recognition results among all competition participants. Here, we describe the basic characteristics of the submitted approach, elaborate on the results of the competition and, most importantly, present some general findings made during our development work that are of relevance to the broader (face recognition) research community.}, keywords = {competition, erk, face recognition, face verification, group evaluation, ICB, mobile biometrics, MOBIO, performance evaluation}, pubstate = {published}, tppubtype = {inproceedings} } @conference{FG2013, title = {Combining 3D face representations using region covariance descriptors and statistical models}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/FG2013.pdf}, year = {2013}, date = {2013-05-01}, booktitle = {Proceedings of the IEEE International Conference on Automatic Face and Gesture Recognition and Workshops (IEEE FG), Workshop on 3D Face Biometrics}, publisher = {IEEE}, address = {Shanghai, China}, abstract = {The paper introduces a novel framework for 3D face recognition that capitalizes on region covariance descriptors and Gaussian mixture models. The framework presents an elegant and coherent way of combining multiple facial representations, while simultaneously examining all computed representations at various levels of locality. The framework first computes a number of region covariance matrices/descriptors from different sized regions of several image representations and then adopts the unscented transform to derive low-dimensional feature vectors from the computed descriptors. By doing so, it enables computations in the Euclidean space, and makes Gaussian mixture modeling feasible. In the last step a support vector machine classification scheme is used to make a decision regarding the identity of the modeled input 3D face image. The proposed framework exhibits several desirable characteristics, such as an inherent mechanism for data fusion/integration (through the region covariance matrices), the ability to examine the facial images at different levels of locality, and the ability to integrate domain-specific prior knowledge into the modeling procedure. We assess the feasibility of the proposed framework on the Face Recognition Grand Challenge version 2 (FRGCv2) database with highly encouraging results.}, keywords = {3d face recognition, biometrics, covariance descriptors, face recognition, face verification, FG, gaussian mixture models, GMM, unscented transform}, pubstate = {published}, tppubtype = {conference} } @article{dobrivsek2013towards, title = {Towards efficient multi-modal emotion recognition}, author = { Simon Dobri\v{s}ek and Rok Gaj\v{s}ek and France Miheli\v{c} and Nikola Pave\v{s}i\'{c} and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/multimodel-emotion.pdf}, doi = {10.5772/54002}, year = {2013}, date = {2013-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {10}, number = {53}, abstract = {The paper presents a multi-modal emotion recognition system exploiting audio and video (i.e., facial expression) information. The system first processes both sources of information individually to produce corresponding matching scores and then combines the computed matching scores to obtain a classification decision. For the video part of the system, a novel approach to emotion recognition, relying on image-set matching, is developed. The proposed approach avoids the need for detecting and tracking specific facial landmarks throughout the given video sequence, which represents a common source of error in video-based emotion recognition systems, and, therefore, adds robustness to the video processing chain. The audio part of the system, on the other hand, relies on utterance-specific Gaussian Mixture Models (GMMs) adapted from a Universal Background Model (UBM) via the maximum a posteriori probability (MAP) estimation. It improves upon the standard UBM-MAP procedure by exploiting gender information when building the utterance-specific GMMs, thus ensuring enhanced emotion recognition performance. Both the uni-modal parts as well as the combined system are assessed on the challenging multi-modal eNTERFACE'05 corpus with highly encouraging results. The developed system represents a feasible solution to emotion recognition that can easily be integrated into various systems, such as humanoid robots, smart surveillance systems and alike.}, keywords = {avid database, emotion recognition, facial expression recognition, multi modality, speech technologies}, pubstate = {published}, tppubtype = {article} } @article{peer2013building, title = {Building cloud-based biometric services}, author = { Peter Peer and Jernej Bule and Jerneja \v{Z}ganec Gros and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Peercloud.pdf}, year = {2013}, date = {2013-01-01}, journal = {Informatica}, volume = {37}, number = {2}, pages = {115}, publisher = {Slovenian Society Informatika/Slovensko drustvo Informatika}, abstract = {Over the next few years the amount of biometric data being at the disposal of various agencies and authentication service providers is expected to grow significantly. Such quantities of data require not only enormous amounts of storage but unprecedented processing power as well. To be able to face this future challenges more and more people are looking towards cloud computing, which can address these challenges quite effectively with its seemingly unlimited storage capacity, rapid data distribution and parallel processing capabilities. Since the available literature on how to implement cloud-based biometric services is extremely scarce, this paper capitalizes on the most important challenges encountered during the development work on biometric services, presents the most important standards and recommendations pertaining to biometric services in the cloud and ultimately, elaborates on the potential value of cloud-based biometric solutions by presenting a few existing (commercial) examples. In the final part of the paper, a case study on fingerprint recognition in the cloud and its integration into the e-learning environment Moodle is presented. }, keywords = {biometrics, cloud computing, development. SaaS, face recognition, fingerprint recognition}, pubstate = {published}, tppubtype = {article} } @article{kenk2013smart, title = {Smart surveillance technologies in border control}, author = { Vildana Suli\v{c} Kenk and Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Kenk.pdf}, year = {2013}, date = {2013-01-01}, journal = {European Journal of Law and Technology}, volume = {4}, number = {2}, abstract = {The paper addresses the technical and legal aspects of the existing and forthcoming intelligent ('smart') surveillance technologies that are (or are considered to be) employed in the border control application area. Such technologies provide a computerized decision-making support to border control authorities, and are intended to increase the reliability and efficiency of border control measures. However, the question that arises is how effective these technologies are, as well as at what price, economically, socially, and in terms of citizens' rights. The paper provides a brief overview of smart surveillance technologies in border control applications, especially those used for controlling cross-border traffic, discusses possible proportionality issues and privacy risks raised by the increasingly widespread use of such technologies, as well as good/best practises developed in this area. In a broader context, the paper presents the result of the research carried out as part of the SMART (Scalable Measures for Automated Recognition Technologies) project.}, keywords = {border control, proportionality, smart surveillance, surveillance, surveillance technology}, pubstate = {published}, tppubtype = {article} } @conference{vstruc2013patch, title = {Patch-wise low-dimensional probabilistic linear discriminant analysis for Face Recognition}, author = { Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c} and Jerneja \v{Z}ganec-Gros and Bo\v{s}tjan Vesnicer}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICASSP2013.pdf}, doi = {10.1109/ICASSP.2013.6638075}, year = {2013}, date = {2013-01-01}, booktitle = {2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, pages = {2352--2356}, organization = {IEEE}, abstract = {The paper introduces a novel approach to face recognition based on the recently proposed low-dimensional probabilistic linear discriminant analysis (LD-PLDA). The proposed approach is specifically designed for complex recognition tasks, where highly nonlinear face variations are typically encountered. Such data variations are commonly induced by changes in the external illumination conditions, viewpoint changes or expression variations and represent quite a challenge even for state-of-the-art techniques, such as LD-PLDA. To overcome this problem, we propose here a patch-wise form of the LDPLDA technique (i.e., PLD-PLDA), which relies on local image patches rather than the entire image to make inferences about the identity of the input images. The basic idea here is to decompose the complex face recognition problem into simpler problems, for which the linear nature of the LD-PLDA technique may be better suited. By doing so, several similarity scores are derived from one facial image, which are combined at the final stage using a simple sum-rule fusion scheme to arrive at a single score that can be employed for identity inference. We evaluate the proposed technique on experiment 4 of the Face Recognition Grand Challenge (FRGCv2) database with highly promising results.}, keywords = {biometrics, face verification, FRGC, ICASSP, patch-wise approach, plda, probabilistic linear discriminant analysis}, pubstate = {published}, tppubtype = {conference} } @conference{gunther20132013, title = {The 2013 face recognition evaluation in mobile environment}, author = {Manuel G\"{u}nther and Artur Costa-Pazo and Changxing Ding and Elhocine Boutellaa and Giovani Chiachia and Honglei Zhang and Marcus de Assis Angeloni and Vitomir \v{S}truc and Elie Khoury and Esteban Vazquez-Fernandez and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/Gunther_ICB2013_2013.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the IAPR International Conference on Biometrics (ICB)}, pages = {1--7}, organization = {IAPR}, abstract = {Automatic face recognition in unconstrained environments is a challenging task. To test current trends in face recognition algorithms, we organized an evaluation on face recognition in mobile environment. This paper presents the results of 8 different participants using two verification metrics. Most submitted algorithms rely on one or more of three types of features: local binary patterns, Gabor wavelet responses including Gabor phases, and color information. The best results are obtained from UNILJ-ALP, which fused several image representations and feature types, and UCHU, which learns optimal features with a convolutional neural network. Additionally, we assess the usability of the algorithms in mobile devices with limited resources.}, keywords = {biometrics, competition, face recognition, face verification, group evaluation, mobile biometrics, MOBIO, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @article{Kri\v{z}aj-EV-2012, title = {Robust 3D Face Recognition}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/KrizajEV.pdf}, year = {2012}, date = {2012-06-01}, journal = {Electrotechnical Review}, volume = {79}, number = {1-2}, pages = {1-6}, abstract = {Face recognition in uncontrolled environments is hindered by variations in illumination, pose, expression and occlusions of faces. Many practical face-recognition systems are affected by these variations. One way to increase the robustness to illumination and pose variations is to use 3D facial images. In this paper 3D face-recognition systems are presented. Their structure and operation are described. The robustness of such systems to variations in uncontrolled environments is emphasized. We present some preliminary results of a system developed in our laboratory.}, keywords = {3d face recognition, biometrics, gaussian mixture models, GMM, modeling}, pubstate = {published}, tppubtype = {article} } @article{krizaj2012towards, title = {Towards robust 3D face verification using Gaussian mixture models}, author = { Janez Kri\v{z}aj and Vitomir \v{S}truc and Simon Dobri\v{s}ek}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IntechJanez-1.pdf}, doi = {10.5772/52200}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {9}, publisher = {InTech}, abstract = {This paper focuses on the use of Gaussian Mixture models (GMM) for 3D face verification. A special interest is taken in practical aspects of 3D face verification systems, where all steps of the verification procedure need to be automated and no meta-data, such as pre-annotated eye/nose/mouth positions, is available to the system. In such settings the performance of the verification system correlates heavily with the performance of the employed alignment (i.e., geometric normalization) procedure. We show that popular holistic as well as local recognition techniques, such as principal component analysis (PCA), or Scale-invariant feature transform (SIFT)-based methods considerably deteriorate in their performance when an “imperfect” geometric normalization procedure is used to align the 3D face scans and that in these situations GMMs should be preferred. Moreover, several possibilities to improve the performance and robustness of the classical GMM framework are presented and evaluated: i) explicit inclusion of spatial information, during the GMM construction procedure, ii) implicit inclusion of spatial information during the GMM construction procedure and iii) on-line evaluation and possible rejection of local feature vectors based on their likelihood. We successfully demonstrate the feasibility of the proposed modifications on the Face Recognition Grand Challenge data set.}, keywords = {}, pubstate = {published}, tppubtype = {article} } @article{vesnicer2012face, title = {Face recognition using simplified probabilistic linear discriminant analysis}, author = { Bostjan Vesnicer and Jerneja \v{Z}ganec Gros and Nikola Pave\v{s}i\'{c} and Vitomir \v{S}truc}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InTech-Face_recognition_using_simplified_probabilistic_linear_discriminant_analysis-1.pdf}, doi = {10.5772/52258}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Advanced Robotic Systems}, volume = {9}, publisher = {InTech}, abstract = {Face recognition in uncontrolled environments remains an open problem that has not been satisfactorily solved by existing recognition techniques. In this paper, we tackle this problem using a variant of the recently proposed Probabilistic Linear Discriminant Analysis (PLDA). We show that simplified versions of the PLDA model, which are regularly used in the field of speaker recognition, rely on certain assumptions that not only result in a simpler PLDA model, but also reduce the computational load of the technique and - as indicated by our experimental assessments - improve recognition performance. Moreover, we show that, contrary to the general belief that PLDA-based methods produce well calibrated verification scores, score normalization techniques can still deliver significant performance gains, but only if non-parametric score normalization techniques are employed. Last but not least, we demonstrate the competitiveness of the simplified PLDA model for face recognition by comparing our results with the state-of-the-art results from the literature obtained on the second version of the large-scale Face Recognition Grand Challenge (FRGC) database.}, keywords = {biometrics, face recognition, plda, simplified PLDA}, pubstate = {published}, tppubtype = {article} } @inbook{IGI2011, title = {Photometric normalization techniques for illumination invariance}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Yu-Jin Zhang}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/LUKSreport.pdf}, doi = {10.4018/978-1-61520-991-0.ch015}, year = {2011}, date = {2011-01-01}, booktitle = {Advances in Face Image Analysis: Techniques and Technologies}, pages = {279-300}, publisher = {IGI-Global}, abstract = {Face recognition technology has come a long way since its beginnings in the previous century. Due to its countless application possibilities, it has attracted the interest of research groups from universities and companies around the world. Thanks to this enormous research effort, the recognition rates achievable with the state-of-the-art face recognition technology are steadily growing, even though some issues still pose major challenges to the technology. Amongst these challenges, coping with illumination-induced appearance variations is one of the biggest and still not satisfactorily solved. A number of techniques have been proposed in the literature to cope with the impact of illumination ranging from simple image enhancement techniques, such as histogram equalization, to more elaborate methods, such as anisotropic smoothing or the logarithmic total variation model. This chapter presents an overview of the most popular and efficient normalization techniques that try to solve the illumination variation problem at the preprocessing level. It assesses the techniques on the YaleB and XM2VTS databases and explores their strengths and weaknesses from the theoretical and implementation point of view.}, keywords = {biometrics, face recognition, illumination invariance, illumination normalization, photometric normalization}, pubstate = {published}, tppubtype = {inbook} } @conference{BioID_Struc_2011, title = {Principal directions of synthetic exact filters for robust real-time eye localization}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BioID.pdf}, doi = {10.1007/978-3-642-19530-3_17}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the COST workshop on Biometrics and Identity Management (BioID)}, volume = {6583/2011}, pages = {180/192}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The alignment of the facial region with a predefined canonical form is one of the most crucial steps in a face recognition system. Most of the existing alignment techniques rely on the position of the eyes and, hence, require an efficient and reliable eye localization procedure. In this paper we propose a novel technique for this purpose, which exploits a new class of correlation filters called Principal directions of Synthetic Exact Filters (PSEFs). The proposed filters represent a generalization of the recently proposed Average of Synthetic Exact Filters (ASEFs) and exhibit desirable properties, such as relatively short training times, computational simplicity, high localization rates and real time capabilities. We present the theory of PSEF filter construction, elaborate on their characteristics and finally develop an efficient procedure for eye localization using several PSEF filters. We demonstrate the effectiveness of the proposed class of correlation filters for the task of eye localization on facial images from the FERET database and show that for the tested task they outperform the established Haar cascade object detector as well as the ASEF correlation filters.}, keywords = {ASEF, correlation filters, eye localization, face image processing, landmark localization, landmarking, PSEF}, pubstate = {published}, tppubtype = {conference} } @conference{ICIAR2010_Sparse, title = {Face recogniton from color images using sparse projection analysis}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICIAR2010_1.pdf}, year = {2010}, date = {2010-06-01}, booktitle = {Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010)}, pages = {445-453}, address = {Povoa de Varzim, Portugal}, abstract = {The paper presents a novel feature extraction technique for face recognition which uses sparse projection axes to compute a lowdimensional representation of face images. The proposed technique derives the sparse axes by first recasting the problem of face recognition as a regression problem and then solving the new (under-determined) regression problem by computing the solution with minimum L1 norm. The developed technique, named Sparse Projection Analysis (SPA), is applied to color as well as grey-scale images from the XM2VTS database and compared to popular subspace projection techniques (with sparse and dense projection axes) from the literature. The results of the experimental assessment show that the proposed technique ensures promising results on un-occluded as well occluded images from the XM2VTS database.}, keywords = {biometrics, face verification, ICIAR, performance evaluation, sparse projection analysis}, pubstate = {published}, tppubtype = {conference} } @conference{ICIAR2010_Sift, title = {Adaptation of SIFT Features for Robust Face Recognition}, author = {Janez Kri\v{z}aj and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/FSIFT.pdf}, year = {2010}, date = {2010-06-01}, booktitle = {Proceedings of the 7th International Conference on Image Analysis and Recognition (ICIAR 2010)}, pages = {394-404}, address = {Povoa de Varzim, Portugal}, abstract = {The Scale Invariant Feature Transform (SIFT) is an algorithm used to detect and describe scale-, translation- and rotation-invariant local features in images. The original SIFT algorithm has been successfully applied in general object detection and recognition tasks, panorama stitching and others. One of its more recent uses also includes face recognition, where it was shown to deliver encouraging results. SIFT-based face recognition techniques found in the literature rely heavily on the so-called keypoint detector, which locates interest points in the given image that are ultimately used to compute the SIFT descriptors. While these descriptors are known to be among others (partially) invariant to illumination changes, the keypoint detector is not. Since varying illumination is one of the main issues affecting the performance of face recognition systems, the keypoint detector represents the main source of errors in face recognition systems relying on SIFT features. To overcome the presented shortcoming of SIFT-based methods, we present in this paper a novel face recognition technique that computes the SIFT descriptors at predefined (fixed) locations learned during the training stage. By doing so, it eliminates the need for keypoint detection on the test images and renders our approach more robust to illumination changes than related approaches from the literature. Experiments, performed on the Extended Yale B face database, show that the proposed technique compares favorably with several popular techniques from the literature in terms of performance.}, keywords = {biometrics, dense SIFT, face recognition, performance evaluation, SIFT, SIFT features}, pubstate = {published}, tppubtype = {conference} } @conference{ICASSP2010, title = {Removing Illumination Artifacts from Face Images using the Nuisance Attribute Projection}, author = {Vitomir \v{S}truc and Bo\v{s}tjan Vesnicer and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICASSP2010.pdf}, doi = {10.1109/ICASSP.2010.5495203}, year = {2010}, date = {2010-03-01}, booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP'10)}, pages = {846-849}, publisher = {IEEE}, address = {Dallas, Texas, USA}, abstract = {Illumination induced appearance changes represent one of the open challenges in automated face recognition systems still significantly influencing their performance. Several techniques have been presented in the literature to cope with this problem; however, a universal solution remains to be found. In this paper we present a novel normalization scheme based on the nuisance attribute projection (NAP), which tries to remove the effects of illumination by projecting away multiple dimensions of a low dimensional illumination subspace. The technique is assessed in face recognition experiments performed on the extended YaleB and XM2VTS databases. Comparative results with state-of-the-art techniques show the competitiveness of the proposed technique.}, keywords = {biometrics, face recognition, face verification, illumination changes, illumination invariance, nuisance attribute projection, robust recognition}, pubstate = {published}, tppubtype = {conference} } @inbook{InTech2010, title = {From Gabor Magnitude to Gabor Phase Features: Tackling the Problem of Face Recognition under Severe Illumination Changes}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Milos Oravec}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InTech.pdf}, doi = {10.5772/8938}, year = {2010}, date = {2010-01-01}, booktitle = {Face Recognition}, pages = {215-238}, publisher = {In-Tech}, address = {Vienna}, keywords = {biometrics, face recognition, feature extraction, Gabor features, Gabor filters, illumination changes, phase features}, pubstate = {published}, tppubtype = {inbook} } @article{CGF-Struc_2010, title = {The Complete Gabor-Fisher Classifier for Robust Face Recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ASP2010.pdf}, doi = {10.1155/2010/847680}, year = {2010}, date = {2010-01-01}, journal = {EURASIP Advances in Signal Processing}, volume = {2010}, pages = {26}, abstract = {This paper develops a novel face recognition technique called Complete Gabor Fisher Classifier (CGFC). Different from existing techniques that use Gabor filters for deriving the Gabor face representation, the proposed approach does not rely solely on Gabor magnitude information but effectively uses features computed based on Gabor phase information as well. It represents one of the few successful attempts found in the literature of combining Gabor magnitude and phase information for robust face recognition. The novelty of the proposed CGFC technique comes from (1) the introduction of a Gabor phase-based face representation and (2) the combination of the recognition technique using the proposed representation with classical Gabor magnitude-based methods into a unified framework. The proposed face recognition framework is assessed in a series of face verification and identification experiments performed on the XM2VTS, Extended YaleB, FERET, and AR databases. The results of the assessment suggest that the proposed technique clearly outperforms state-of-the-art face recognition techniques from the literature and that its performance is almost unaffected by the presence of partial occlusions of the facial area, changes in facial expression, or severe illumination changes.}, keywords = {biometrics, combined model, face recognition, feature extraction, Gabor features, phase features}, pubstate = {published}, tppubtype = {article} } @article{poh2010evaluation, title = {An evaluation of video-to-video face verification}, author = {Poh, Norman and Chan, Chi Ho and Kittler, Josef and Marcel, Sebastien and Mc Cool, Christopher and Rua, Enrique Argones and Castro, Jose Luis Alba and Villegas, Mauricio and Paredes, Roberto and Struc, Vitomir and others}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/TIFS.pdf}, doi = {10.1109/TIFS.2010.2077627}, year = {2010}, date = {2010-01-01}, journal = {IEEE Transactions on Information Forensics and Security}, volume = {5}, number = {4}, pages = {781--801}, publisher = {IEEE}, abstract = {Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realize facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents an evaluation of person identity verification using facial video data, organized in conjunction with the International Conference on Biometrics (ICB 2009). It involves 18 systems submitted by seven academic institutes. These systems provide for a diverse set of assumptions, including feature representation and preprocessing variations, allowing us to assess the effect of adverse conditions, usage of quality information, query selection, and template construction for video-to-video face authentication.}, keywords = {biometrics, competition, face recognition, face verification, group evaluation, video}, pubstate = {published}, tppubtype = {article} } @conference{ICPR_Struc_2010, title = {Confidence Weighted Subspace Projection Techniques for Robust Face Recognition in the Presence of Partial Occlusions}, author = {Vitomir \v{S}truc and Simon Dobri\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICPR2010_CW.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference on Pattern Recognition (ICPR'10)}, pages = {1334-1338}, address = {Istanbul, Turkey}, keywords = {biometrics, face recognition, face verification, ICPR, performance evaluation, subspace projection}, pubstate = {published}, tppubtype = {conference} } @conference{ICPR_Gajsek_2010, title = {Multi-modal Emotion Recognition using Canonical Correlations and Acustic Features}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICPR2010_Emo.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference on Pattern Recognition (ICPR)}, pages = {4133-4136}, address = {Istanbul, Turkey}, organization = {IAPR}, abstract = {The information of the psycho-physical state of the subject is becoming a valuable addition to the modern audio or video recognition systems. As well as enabling a better user experience, it can also assist in superior recognition accuracy of the base system. In the article, we present our approach to multi-modal (audio-video) emotion recognition system. For audio sub-system, a feature set comprised of prosodic, spectral and cepstrum features is selected and support vector classifier is used to produce the scores for each emotional category. For video sub-system a novel approach is presented, which does not rely on the tracking of specific facial landmarks and thus, eliminates the problems usually caused, if the tracking algorithm fails at detecting the correct area. The system is evaluated on the eNTERFACE database and the recognition accuracy of our audio-video fusion is compared to the published results in the literature.}, keywords = {acustic features, canonical correlations, emotion recognition, facial expression recognition, multi modality, speech processing, speech technologies}, pubstate = {published}, tppubtype = {conference} } @conference{TSD_Emo_Gajsek, title = {Multi-modal Emotion Recognition based on the Decoupling of Emotion and Speaker Information}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/TSDEmo.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of Text, Speech and Dialogue (TSD)}, volume = {6231/2010}, pages = {275-282}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The standard features used in emotion recognition carry, besides the emotion related information, also cues about the speaker. This is expected, since the nature of emotionally colored speech is similar to the variations in the speech signal, caused by different speakers. Therefore, we present a gradient descent derived transformation for the decoupling of emotion and speaker information contained in the acoustic features. The Interspeech ’09 Emotion Challenge feature set is used as the baseline for the audio part. A similar procedure is employed on the video signal, where the nuisance attribute projection (NAP) is used to derive the transformation matrix, which contains information about the emotional state of the speaker. Ultimately, different NAP transformation matrices are compared using canonical correlations. The audio and video sub-systems are combined at the matching score level using different fusion techniques. The presented system is assessed on the publicly available eNTERFACE’05 database where significant improvements in the recognition performance are observed when compared to the stat-of-the-art baseline. }, keywords = {emotion recognition, facial expression recognition, multi modality, speech processing, speech technologies, spontaneous emotions, video processing}, pubstate = {published}, tppubtype = {conference} } @inproceedings{DOGS_Struc_2010, title = {Eye Localization using correlation filters}, author = {Vitomir \v{S}truc and Jerneja \v{Z}ganec-Gros and Nikola Pave\v{s}i\'{c}}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the International Conference DOGS}, pages = {188-191}, address = {Novi Sad, Serbia}, keywords = {ASEF, correlation filters, eye localization, face image processing, landmark localization, PSEF}, pubstate = {published}, tppubtype = {inproceedings} } @conference{BTAS2009, title = {Principal Gabor Filters for Face Recognition}, author = {Vitomir \v{S}truc and Rok Gaj\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BTAS.pdf}, doi = {10.1109/BTAS.2009.5339020}, year = {2009}, date = {2009-09-01}, booktitle = {Proceedings of the 3rd IEEE International Conference on Biometrics: Theory, Systems and Applications (BTAS'09)}, pages = {1-6}, publisher = {IEEE}, address = {Washington D.C., U.S.A.}, abstract = {Gabor filters have proven themselves to be a powerful tool for facial feature extraction. An abundance of recognition techniques presented in the literature exploits these filters to achieve robust face recognition. However, while exhibiting desirable properties, such as orientational selectivity or spatial locality, Gabor filters have also some shortcomings which crucially affect the characteristics and size of the Gabor representation of a given face pattern. Amongst these shortcomings the fact that the filters are not orthogonal one to another and are, hence, correlated is probably the most important. This makes the information contained in the Gabor face representation redundant and also affects the size of the representation. To overcome this problem we propose in this paper to employ orthonormal linear combinations of the original Gabor filters rather than the filters themselves for deriving the Gabor face representation. The filters, named principal Gabor filters for the fact that they are computed by means of principal component analysis, are assessed in face recognition experiments performed on the XM2VTS and YaleB databases, where encouraging results are achieved.}, keywords = {biometrics, face verification, feature extraction, Gabor features, performance evaluation, principal Gabor filters}, pubstate = {published}, tppubtype = {conference} } @conference{InterSp2009, title = {Emotion recognition using linear transformations in combination with video}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and Simon Dobri\v{s}ek and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/InSP.pdf}, year = {2009}, date = {2009-09-01}, booktitle = {Speech and intelligence: proceedings of Interspeech 2009}, pages = {1967-1970}, address = {Brighton, UK}, abstract = {The paper discuses the usage of linear transformations of Hidden Markov Models, normally employed for speaker and environment adaptation, as a way of extracting the emotional components from the speech. A constrained version of Maximum Likelihood Linear Regression (CMLLR) transformation is used as a feature for classification of normal or aroused emotional state. We present a procedure of incrementally building a set of speaker independent acoustic models, that are used to estimate the CMLLR transformations for emotion classification. An audio-video database of spontaneous emotions (AvID) is briefly presented since it forms the basis for the evaluation of the proposed method. Emotion classification using the video part of the database is also described and the added value of combining the visual information with the audio features is shown.}, keywords = {emotion recognition, facial expression recognition, interspeech, speech, speech technologies, spontaneous emotions}, pubstate = {published}, tppubtype = {conference} } @inproceedings{ERK2009N, title = {Nuisance Attribute Projection in the Logarithm Domain for Face Recognition under Severe Illumination Changes}, author = {Vitomir \v{S}truc and Zongmin Ma and Nikola Pave\v{s}i\'{c}}, year = {2009}, date = {2009-09-01}, booktitle = {Proceedings of the IEEE International Electrotechnical and Computer Science Conference (ERK'09)}, pages = {279-281}, address = {Portoro\v{z}, Slovenia}, keywords = {biometrics, face verification, illumination changes, illumination invariance, nuisance attribute projection, performance evaluation, robust recognition}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ERK2009S, title = {Face Recognition using Sparse Projection Axes}, author = {Vitomir \v{S}truc and Zongmin Ma and Nikola Pave\v{s}i\'{c}}, year = {2009}, date = {2009-09-01}, booktitle = {Proceedings of the IEEE International Electrotechnical and Computer Science Conference (ERK'09)}, pages = {271-274}, address = {Portoro\v{z}, Slovenia}, keywords = {biometrics, erk, face recognition, face verification, performance evaluation, sparse projection analysis}, pubstate = {published}, tppubtype = {inproceedings} } @conference{FSKD208b, title = {A comparative assessment of appearance based feature extraction techniques and their susceptibility to image degradations in face recognition systems}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICMLPR.pdf}, year = {2009}, date = {2009-06-01}, booktitle = {Proceedings of the International Conference on Machine Learning and Pattern Recognition (ICMLPR'09)}, volume = {54}, pages = {326-334}, address = {Paris, France}, abstract = {Over the past decades, automatic face recognition has become a highly active research area, mainly due to the countless application possibilities in both the private as well as the public sector. Numerous algorithms have been proposed in the literature to cope with the problem of face recognition, nevertheless, a group of methods commonly referred to as appearance based have emerged as the dominant solution to the face recognition problem. Many comparative studies concerned with the performance of appearance based methods have already been presented in the literature, not rarely with inconclusive and often with contradictory results. No consent has been reached within the scientific community regarding the relative ranking of the efficiency of appearance based methods for the face recognition task, let alone regarding their susceptibility to appearance changes induced by various environmental factors. To tackle these open issues, this paper assess the performance of the three dominant appearance based methods: principal component analysis, linear discriminant analysis and independent component analysis, and compares them on equal footing (i.e., with the same preprocessing procedure, with optimized parameters for the best possible performance, etc.) in face verification experiments on the publicly available XM2VTS database. In addition to the comparative analysis on the XM2VTS database, ten degraded versions of the database are also employed in the experiments to evaluate the susceptibility of the appearance based methods on various image degradations which can occur in ”real-life” operating conditions. Our experimental results suggest that linear discriminant analysis ensures the most consistent verification rates across the tested databases.}, keywords = {biometrics, face recognition, face verification, image degradations, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @conference{Mathmod09, title = {A comparison of feature normalization techniques for PCA-based palmprint recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/MATHMOD.pdf}, year = {2009}, date = {2009-02-01}, booktitle = {Proceedings of the International Conference on Mathematical Modeling (MATHMOD'09)}, pages = {2450-2453}, address = {Viena, Austria}, abstract = {Computing user templates (or models) for biometric authentication systems is one of the most crucial steps towards efficient and accurate biometric recognition. The constructed templates should encode user specific information extracted from a sample of a given biometric modality, such as, for example, palmprints, and exhibit a sufficient level of dissimilarity with other templates stored in the systems database. Clearly, the characteristics of the user templates depend on the approach employed for the extraction of biometric features, as well as on the procedure used to normalize the extracted feature vectors. While feature-extraction methods are a well studied topic, for which a vast amount of comparative studies can be found in the literature, normalization techniques lack such studies and are only briefly mentioned in most cases. In this paper we, therefore, apply several normalization techniques to feature vectors extracted from palmprint images by means of principal component analysis (PCA) and perform a comparative analysis on the results. We show that the choice of an appropriate normalization technique greatly influences the performance of the palmprint-based authentication system and can result in error rate reductions of more than 30%. }, keywords = {biometrics, face verification, feature normalization, normalization, pca, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @conference{ISPRA09, title = {Image normalization techniques for robust face recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, year = {2009}, date = {2009-02-01}, booktitle = {Proceedings of the International Conference on Signal Processing, Robotics and Automation (ISPRA'09)}, pages = {155-160}, address = {Cambridge, UK}, keywords = {biometrics, face recognition, face verification, histogram remapping, performance evaluation, preprocessing}, pubstate = {published}, tppubtype = {conference} } @inbook{Springer2009, title = {Hand-Geometry Device}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Stan Z. Li}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ENCYCLO.pdf}, doi = {10.1007/978-0-387-73003-5_14}, year = {2009}, date = {2009-01-01}, booktitle = {Encyclopedia of biometrics}, pages = {693-698}, publisher = {Springer-Verlag}, address = {New York}, abstract = {Hand-geometry devices are specially designed biometric devices used for capturing the geometric characteristics (e.g., the length, width, thickness and curvature of the fingers, the palm size, and the distances between joints) of a human hand for hand-geometry-based identity verification. A typical hand-geometry device records images of the lateral and dorsal parts of the hand with a charge-coupled device (CCD) camera that is mounted above a flat surface on which the person presented to the device places his/her hand. The set of geometrical features extracted from these images is then matched against a pre-recorded template stored in the device’s database. Depending on the result of this matching procedure, the identity of the person presented to the device is either verified or not.}, keywords = {biometrics, device, encyclopedia, hand geometry, sensor}, pubstate = {published}, tppubtype = {inbook} } @article{Inform-Struc_2009, title = {Gabor-based kernel-partial-least-squares discrimination features for face recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/INFO744.pdf}, year = {2009}, date = {2009-01-01}, journal = {Informatica (Vilnius)}, volume = {20}, number = {1}, pages = {115-138}, abstract = {The paper presents a novel method for the extraction of facial features based on the Gabor-wavelet representation of face images and the kernel partial-least-squares discrimination (KPLSD) algorithm. The proposed feature-extraction method, called the Gabor-based kernel partial-least-squares discrimination (GKPLSD), is performed in two consecutive steps. In the first step a set of forty Gabor wavelets is used to extract discriminative and robust facial features, while in the second step the kernel partial-least-squares discrimination technique is used to reduce the dimensionality of the Gabor feature vector and to further enhance its discriminatory power. For optimal performance, the KPLSD-based transformation is implemented using the recently proposed fractional-power-polynomial models. The experimental results based on the XM2VTS and ORL databases show that the GKPLSD approach outperforms feature-extraction methods such as principal component analysis (PCA), linear discriminant analysis (LDA), kernel principal component analysis (KPCA) or generalized discriminant analysis (GDA) as well as combinations of these methods with Gabor representations of the face images. Furthermore, as the KPLSD algorithm is derived from the kernel partial-least-squares regression (KPLSR) model it does not suffer from the small-sample-size problem, which is regularly encountered in the field of face recognition.}, keywords = {biometrics, face recogntiion, kernel partial least squares, kpca, lda, pca}, pubstate = {published}, tppubtype = {article} } @article{WSEAS-Struc_2009, title = {Histogram remapping as a preprocessing step for robust face recognition}, author = {Vitomir \v{S}truc and Janez \v{Z}ibert and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/28-891.pdf}, year = {2009}, date = {2009-01-01}, journal = {WSEAS transactions on information science and applications}, volume = {6}, number = {3}, pages = {520-529}, abstract = {Image preprocessing techniques represent an essential part of a face recognition systems, which has a great impact on the performance and robustness of the recognition procedure. Amongst the number of techniques already presented in the literature, histogram equalization has emerged as the dominant preprocessing technique and is regularly used for the task of face recognition. With the property of increasing the global contrast of the facial image while simultaneously compensating for the illumination conditions present at the image acquisition stage, it represents a useful preprocessing step, which can ensure enhanced and more robust recognition performance. Even though, more elaborate normalization techniques, such as the multiscale retinex technique, isotropic and anisotropic smoothing, have been introduced to field of face recognition, they have been found to be more of a complement than a real substitute for histogram equalization. However, by closer examining the characteristics of histogram equalization, one can quickly discover that it represents only a specific case of a more general concept of histogram remapping techniques (which may have similar characteristics as histogram equalization does). While histogram equalization remapps the histogram of a given facial image to a uniform distribution, the target distribution could easily be replaced with an arbitrary one. As there is no theoretical justification of why the uniform distribution should be preferred to other target distributions, the question arises: how do other (non-uniform) target distributions influence the face recognition process and are they better suited for the recognition task. To tackle this issues, we present in this paper an empirical assessment of the concept of histogram remapping with the following target distributions: the uniform, the normal, the lognormal and the exponential distribution. We perform comparative experiments on the publicly available XM2VTS and YaleB databases and conclude that similar or even better recognition results that those ensured by histogram equalization can be achieved when other (non-uniform) target distribution are considered for the histogram remapping. This enhanced performance, however, comes at a price, as the nonuniform distributions rely on some parameters which have to be trained or selected appropriately to achieve the optimal performance.}, keywords = {biometrics, face recognition, histogram, histogram remapping, image processing, preprocessing}, pubstate = {published}, tppubtype = {article} } @article{Inform-Gajsek_2009, title = {Multi-modal emotional database: AvID}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and France Miheli\v{c} and Anja Podlesek and Luka Komidar and Gregor So\v{c}an and Bo\v{s}tjan Bajec}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/avid.pdf}, year = {2009}, date = {2009-01-01}, journal = {Informatica (Ljubljana)}, volume = {33}, number = {1}, pages = {101-106}, abstract = {This paper presents our work on recording a multi-modal database containing emotional audio and video recordings. In designing the recording strategies a special attention was payed to gather data involving spontaneous emotions and therefore obtain a more realistic training and testing conditions for experiments. With specially planned scenarios including playing computer games and conducting an adaptive intelligence test different levels of arousal were induced. This will enable us to both detect different emotional states as well as experiment in speaker identification/verification of people involved in communications. So far the multi-modal database has been recorded and basic evaluation of the data was processed.}, keywords = {avid, database, dataset, emotion recognition, facial expression recognition, speech, speech technologies, spontaneous emotions}, pubstate = {published}, tppubtype = {article} } @article{EV-Struc_2009, title = {Using regression techniques for coping with the one-sample-size problem of face recognition}, author = {Vitomir \v{S}truc and Rok Gaj\v{s}ek and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/EV2008.pdf}, year = {2009}, date = {2009-01-01}, journal = {Electrotechnical Review}, volume = {76}, number = {1-2}, pages = {7-12}, abstract = {There is a number of face recognition paradigms which ensure good recognition rates with frontal face images. However, the majority of them require an extensive training set and degrade in their performance when an insufficient number of training images is available. This is especially true for applications where only one image per subject is at hand for training. To cope with this one-sample-size (OSS) problem, we propose to employ subspace projection based regression techniques rather than modifications of the established face recognition paradigms, such as the principal component or linear discriminant analysis, as it was done in the past. Experiments performed on the XM2VTS and ORL databases show the effectiveness of the proposed approach. Also presented is a comparative assessment of several regression techniques and some popular face recognition methods. }, keywords = {biometrics, face recognition, one sample size problem, regression techniques, small sample size}, pubstate = {published}, tppubtype = {article} } @article{IET-Struc_2009, title = {Phase-congruency features for palm-print verification}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IET.pdf}, doi = {10.1049/iet-spr.2008.0152}, year = {2009}, date = {2009-01-01}, journal = {IET Signal Processing}, volume = {3}, number = {4}, pages = {258-268}, keywords = {biometrics, feature extraction, palmprint verification, palmprints, phase congruency features, recognition}, pubstate = {published}, tppubtype = {article} } @article{EV_2009_palms, title = {Gaussianization of image patches for efficient palmprint recognition}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/EV2009.pdf}, year = {2009}, date = {2009-01-01}, journal = {Electrotechnical Review}, volume = {76}, number = {5}, pages = {245-250}, abstract = {In this paper we present a comparison of the two dominant image preprocessing techniques for palmprint recognition, namely, histogram equalization and mean-variance normalization. We show that both techniques pursue a similar goal and that the difference in recognition efficiency stems from the fact that not all assumptions underlying the mean-variance normalization approach are always met. We present an alternative justification of why histogram equalization ensures enhanced verification performance, and, based on the findings, propose two novel preprocessing techniques: gaussianization of the palmprint images and gaussianization of image patches. We present comparative results obtained on the PolyU database and show that the patch-based normalization technique ensures stat-of-the-art recognition results with a simple feature extraction method and the nearest neighbor classifier.}, keywords = {biometrics, gaussianization, histogram remapping, palmprint recognition, palmprints, preprocessing}, pubstate = {published}, tppubtype = {article} } @conference{ICB2009, title = {Face Video Competition}, author = {Norman Poh and Chi Ho Chan and Josef Kittler and Sebastien Marcel and Christopher McCool and Enrique Argones-Rua and Jose Luis Alba-Castro and Mauricio Villegas and Roberto Paredes and Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c} and Albert Ali Salah and Hui Fang and Nicholas Costen}, editor = {Massimo Tistarelli and Mark Nixon}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ICB2009.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the international Conference on Biometrics (ICB)}, volume = {5558}, pages = {715-724}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {Person recognition using facial features, e.g., mug-shot images, has long been used in identity documents. However, due to the widespread use of web-cams and mobile devices embedded with a camera, it is now possible to realise facial video recognition, rather than resorting to just still images. In fact, facial video recognition offers many advantages over still image recognition; these include the potential of boosting the system accuracy and deterring spoof attacks. This paper presents the first known benchmarking effort of person identity verification using facial video data. The evaluation involves 18 systems submitted by seven academic institutes.}, keywords = {biometrics, competition, face recognition, face verification, ICB, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @conference{BioID_Multi2009, title = {Illumination Invariant Face Recognition by Non-Local Smoothing}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BIOID09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Biometric ID management and multimodal communication}, volume = {5707}, pages = {1-8}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {Existing face recognition techniques struggle with their performance when identities have to be determined (recognized) based on image data captured under challenging illumination conditions. To overcome the susceptibility of the existing techniques to illumination variations numerous normalization techniques have been proposed in the literature. These normalization techniques, however, still exhibit some shortcomings and, thus, offer room for improvement. In this paper we identify the most important weaknesses of the commonly adopted illumination normalization techniques and presents two novel approaches which make use of the recently proposed non-local means algorithm. We assess the performance of the proposed techniques on the YaleB face database and report preliminary results.}, keywords = {biometrics, face verification, illumination changes, illumination invariance, illumination normalization, pca, preprocessing}, pubstate = {published}, tppubtype = {conference} } @conference{BioID_Multi2009b, title = {Combining audio and video for detection of spontaneous emotions}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and Simon Dobri\v{s}ek and Janez \v{Z}ibert and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/BioID_R.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Biometric ID management and multimodal communication}, volume = {5707}, pages = {114-121}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The paper presents our initial attempts in building an audio video emotion recognition system. Both, audio and video sub-systems are discussed, and description of the database of spontaneous emotions is given. The task of labelling the recordings from the database according to different emotions is discussed and the measured agreement between multiple annotators is presented. Instead of focusing on the prosody in audio emotion recognition, we evaluate the possibility of using linear transformations (CMLLR) as features. The classification results from audio and video sub-systems are combined using sum rule fusion and the increase in recognition results, when using both modalities, is presented.}, keywords = {emotion recognition, facial expression recognition, performance evaluation, speech processing, speech technologies}, pubstate = {published}, tppubtype = {conference} } @conference{TSD2009, title = {Analysis and assessment of AvID: multi-modal emotional database}, author = {Rok Gaj\v{s}ek and Vitomir \v{S}truc and Bo\v{s}tjan Vesnicer and Anja Podlesek and Luka Komidar and France Miheli\v{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/TSD.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Text, speech and dialogue / 12th International Conference}, volume = {5729}, pages = {266-273}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The paper deals with the recording and the evaluation of a multi modal (audio/video) database of spontaneous emotions. Firstly, motivation for this work is given and different recording strategies used are described. Special attention is given to the process of evaluating the emotional database. Different kappa statistics normally used in measuring the agreement between annotators are discussed. Following the problems of standard kappa coefficients, when used in emotional database assessment, a new time-weighted free-marginal kappa is presented. It differs from the other kappa statistics in that it weights each utterance's particular score of agreement based on the duration of the utterance. The new method is evaluated and the superiority over the standard kappa, when dealing with a database of spontaneous emotions, is demonstrated.}, keywords = {avid database, database, emotion recognition, multimodal database, speech, speech technologies}, pubstate = {published}, tppubtype = {conference} } @conference{ICSPCS08, title = {The phase-based Gabor Fisher classifier and its application to face recognition under varying illumination conditions}, author = {Vitomir \v{S}truc and Bo\v{s}tjan Vesnicer and Nikola Pave\v{s}i\'{c}}, doi = {10.1109/ICSPCS.2008.4813663}, isbn = {978-1-4244-4243-0}, year = {2008}, date = {2008-12-01}, booktitle = {Proceedings of the IEEE International Conference on Signal Processing and Communication Systems (ICSPCS'08)}, pages = {1-6}, publisher = {IEEE}, address = {Gold Coast, Australia}, abstract = {The paper introduces a feature extraction technique for face recognition called the phase-based Gabor Fisher classifier (PBGFC). The PBGFC method constructs an augmented feature vector which encompasses Gabor-phase information derived from a novel representation of face images - the oriented Gabor phase congruency image (OGPCI) - and then applies linear discriminant analysis to the augmented feature vector to reduce its dimensionality. The feasibility of the proposed method was assessed in a series of face verification experiments performed on the XM2VTS database. The experimental results show that the PBGFC method performs better than other popular feature extraction techniques such as principal component analysis (PCA), the Fisherface method or the DCT-mod2 approach, while it ensures similar verification performance as the established Gabor Fisher Classifier (GFC). The results also show that the proposed phase-based Gabor Fisher classifier performs the best among the tested methods when severe illumination changes are introduced to the face images.}, keywords = {biometrics, face verification, feature extraction, Gabor features, performance evaluation, phase congruency features, phase features}, pubstate = {published}, tppubtype = {conference} } @conference{FSKD208, title = {The corrected normalized correlation coefficient: a novel way of matching score calculation for LDA-based face verification}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/FSKD.pdf}, doi = {10.1109/FSKD.2008.334}, isbn = {978-0-7695-3305-6}, year = {2008}, date = {2008-10-01}, booktitle = {Proceedings of the IEEE International Conference on Fuzzy Systems and Knowledge Discovery (FSKD'08)}, volume = {4}, pages = {110-115}, publisher = {IEEE}, address = {Jinan, China}, abstract = {The paper presents a novel way of matching score calculation for LDA-based face verification. Different from the classical matching schemes, where the decision regarding the identity of the user currently presented to the face verification system is made based on the similarity (or distance) between the "live" feature vector and the template of the claimed identity, we propose to employ a measure we named the corrected normalized correlation coefficient, which considers both the similarity with the template of the claimed identity as well as the similarity with all other templates stored in the database. The effectiveness of the proposed measure was assessed on the publicly available XM2VTS database where encouraging results were achieved.}, keywords = {biometrics, face verification, lda, matching score calculation}, pubstate = {published}, tppubtype = {conference} } @inproceedings{PHD2008, title = {Regression techniques versus discriminative methods for face recognition}, author = {Vitomir \v{S}truc and France Miheli\v{c} and Rok Gaj\v{s}ek and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/IZOLA.pdf}, year = {2008}, date = {2008-10-01}, booktitle = {Proceedings of the 9th international PhD Workshop on Systems and Control}, pages = {1-5}, address = {Izola, Slovenia}, abstract = {In the field of face recognition it is generally believed that ”state of the art” recognition rates can only be achieved when discriminative (e.g., linear or generalized discriminant analysis) rather than expressive (e.g., principal or kernel principal component analysis) methods are used for facial feature extraction. However, while being superior in terms of the recognition rates, the discriminative techniques still exhibit some shortcomings when compared to the expressive approaches. More specifically, they suffer from the so-called small sample size (SSS) problem which is regularly encountered in the field of face recognition and occurs when the sample dimensionality is larger than the number of available training samples per subject. In this type of problems, the discriminative techniques need modifications in order to be feasible, but even in their most elaborate forms require at least two training samples per subject. The expressive approaches, on the other hand, are not susceptible to the SSS problem and are thus applicable even in the most extreme case of the small sample size problem, i.e., when only one training sample per subject is available. Nevertheless, in this paper we will show that the recognition performance of the expressive methods can match (or in some cases surpass) that of the discriminative techniques if the expressive feature extraction approaches are used as multivariate regression techniques with a pre-designed response matrix that encodes the class membership of the training samples. The effectiveness of the regression techniques for face recognition is demonstrated in a series of experiments performed on the ORL database. Additionally a comparative assessment of the regression techniques and popular discriminative approaches is presented.}, keywords = {biometrics, face recognition, face verification, modeling, performance evaluation, regression techniques}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ERK2008, title = {Combining experts for improved face verification performance}, author = {Vitomir \v{S}truc and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ERK2008.pdf}, year = {2008}, date = {2008-09-01}, booktitle = {Proceedings of the IEEE International Electrotechnical and Computer Science Conference (ERK'08)}, pages = {233-236}, address = {Portoro\v{z}, Slovenia}, abstract = {Samodejno razpoznavanje (avtentikacija/identifikacija) obrazov predstavlja eno najaktivnej\v{s}ih raziskovalnih podro\v{c}ij biometrije. Avtentikacija oz. identifikacija oseb z razpoznavanjem obrazov ponuja mo\v{z}en na\v{c}in pove\v{c}anja varnosti pri razli\v{c}nih dejavnostih, (npr. pri elektronskem poslovanju na medmre\v{z}ju, pri ban\v{c}nih storitvah ali pri vstopu v dolo\v{c}ene prostore, stavbe in dr\v{z}ave). Ponuja univerzalen in nevsiljiv na\v{c}in razpoznavanja oseb, ki pa trenutno \v{s}e ni dovolj zanesljiv. Kot mo\v{z}na re\v{s}itev problema zanesljivosti razpoznavanja se v literaturi vse pogosteje pojavljajo ve\v{c}modalni pristopi, v katerih se razpoznavanje izvede na podlagi ve\v{c}jega \v{s}tevila postopkov razpoznavanja obrazov. V skladu z opisanim trendom, bomo v \v{c}lanku ovrednotili zanesljivost delovanja razli\v{c}nih postopkov razpoznavanja obrazov, ki jih bomo na koncu zdru\v{z}ili \v{s}e v ve\v{c}modalni pristop. S pomo\v{c}jo eksperimentov na podatkovni zbirki XM2VTS bomo preverili zanesljivost delovanja ve\v{c}modalnega pristopa in jo primerjali z zanesljivostjo uveljavljenih postopkov razpoznavanja.}, keywords = {biometrics, erk, face recognition, face verification, fusion, performance evaluation}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{rosus08, title = {Palmprint recognition using the trace transform}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ROSUS.pdf}, year = {2008}, date = {2008-03-01}, booktitle = {Proceedings of the national conference ROSUS'08}, pages = {41-48}, address = {Maribor, Slovenia}, abstract = {Biometrija je znanstvena veda o metodah razpoznavanja ljudi na podlagi njihovih fiziolo\v{s}kih in/ali vedenjskih zna\v{c}ilnosti. Sistemi, ki uporabljajo te metode, slu\v{z}ijo kot varnostni mehanizmi za omejevanje dostopa do dolo\v{c}enih prostorov, zgradb ali storitev ter kot pomo\v{c} pri kriminalisti\v{c}nih preiskavah. V \v{c}lanku predstavljamo primer biometri\v{c}nega sistema, ki preveri identiteto uporabnika na podlagi slike njegove dlani. Sistem temelji na novem, hibridnem postopku izpeljave zna\v{c}ilk, ki na slikovnem podro\v{c}ju dlani najprej izvede Kadyrov-Petrouvo transformacijo, transformirane slike pa s postopkom linearne diskriminantne analize v nadaljevanju pretvori v kompaktne vektorje zna\v{c}ilk. Uspe\v{s}nost razpoznavanja s predlaganim sistemom smo preizkusili na obse\v{z}ni podatkovni zbirki, kjer smo dosegli zadovoljive rezultate.}, keywords = {biometrics, palmprints, trace transform}, pubstate = {published}, tppubtype = {inproceedings} } @article{JEI-Struc_2008, title = {Face authentication using a hybrid approach}, author = {Vitomir \v{S}truc and France Miheli\v{c} and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/JEI.pdf}, doi = {10.1117/1.2885149}, year = {2008}, date = {2008-01-01}, journal = {Journal of Electronic Imaging}, volume = {17}, number = {1}, pages = {1-11}, abstract = {This paper presents a hybrid approach to face-feature extraction based on the trace transform and the novel kernel partial-least-squares discriminant analysis (KPA). The hybrid approach, called trace kernel partial-least-squares discriminant analysis (TKPA) first uses a set of fifteen trace functionals to derive robust and discriminative facial features and then applies the KPA method to reduce their dimensionality. The feasibility of the proposed approach was successfully tested on the XM2VTS database, where a false rejection rate (FRR) of 1.25% and a false acceptance rate (FAR) of 2.11% were achieved in our best-performing face-authentication experiment. The experimental results also show that the proposed approach can outperform kernel methods such as generalized discriminant analysis (GDA), kernel fisher analysis (KFA) and complete kernel fisher discriminant analysis (CKFA) as well as combinations of these methods with features extracted using the trace transform.}, keywords = {biometrics, face recognition, hybrid approach, kernel partial least squares, trace transform}, pubstate = {published}, tppubtype = {article} } @conference{BioID2008, title = {A palmprint verification system based on phase congruency features}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, editor = {Ben Schouten and Niels Christian Juul and Andrzej Drygajlo and Massimo Tistarelli}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/ROSKILDE.pdf}, doi = {10.1007/978-3-540-89991-4_12}, year = {2008}, date = {2008-01-01}, booktitle = {Biometrics and Identity Management}, volume = {5372}, pages = {110-119}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, series = {Lecture Notes on Computer Science}, abstract = {The paper presents a fully automatic palmprint verification system which uses 2D phase congruency to extract line features from a palmprint image and subsequently performs linear discriminant analysis on the computed line features to represent them in a more compact manner. The system was trained and tested on a database of 200 people (2000 hand images) and achieved a false acceptance rate (FAR) of 0.26% and a false rejection rate (FRR) of 1.39% in the best performing verification experiment. In a comparison, where in addition to the proposed system, three popular palmprint recognition techniques were tested for their verification accuracy, the proposed system performed the best.}, keywords = {feature extraction, palmprint recognition, palmprint verification, palmprints, performance evaluation}, pubstate = {published}, tppubtype = {conference} } @inproceedings{JJ2008, title = {AvID: audio-video emotional database}, author = {Rok Gaj\v{s}ek and Anja Podlesek and Luka Komidar and Grekor So\v{c}an and Bo\v{s}tjan Bajec and Vitomir \v{S}truc and Valentin Bucik and France Miheli\v{c}}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the 11th International Multi-conference Information Society (IS'08)}, volume = {C}, pages = {70-74}, address = {Ljubljana, Slovenia}, keywords = {database, dataset, emotion recognition, facial expression recognition, multimodal database, speech technology, spontaneous emotions}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ERK2007, title = {Color spaces for face recognition}, author = {Vitomir \v{S}truc, France Miheli\v{c}, Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2014/08/ERK2007.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the International Electrotechnical and Computer Science Conference (ERK'07)}, pages = {171-174}, address = {Portoro\v{z}, Slovenia}, abstract = {The paper investigates the impact that the face-image color space has on the verification performance of two popular face recognition procedures, i.e., the Fisherface approach and the Gabor-Fisher classifier - GFC. Experimental results on the XM2VTS database show that the Fisherface technique performs best when features are extracted from the Cr component of the YCbCr color space, while the performance of the Gabor-Fisher classifier is optimized when grey-scale intensity face-images are used for feature extraction. Based on these findings, a novel face recognition framework that combines the Fisherface and the GFC method is introduced in this paper and its feasibility demonstrated in a comparative study where, in addition to the proposed method, six widely used feature extraction techniques were tested for their face verification performance.}, key = {ERK2007}, keywords = {biometrics, color spaces, computer vision, erk, face recognition, local conference}, pubstate = {published}, tppubtype = {inproceedings} } @article{EV-Struc_2007, title = {Impact of image degradations on the face recognition accuracy}, author = {Vitomir \v{S}truc and Nikola Pave\v{s}i\'{c}}, url = {http://luks.fe.uni-lj.si/nluks/wp-content/uploads/2016/09/EV2007.pdf}, year = {2007}, date = {2007-01-01}, journal = {Electrotechnical Review}, volume = {74}, number = {3}, pages = {145-150}, abstract = {The accuracy of automatic face recognition systems depends on various factors among which robustness and accuracy of the face localization procedure, choice of an appropriate face-feature extraction procedure, as well as use of a suitable matching algorithm are the most important. Current systems perform relatively well whenever test images to be recognized are captured under conditions similar to those of the training images. However, they are not robust enough if there is a difference between test and training images. Changes in image characteristics such as noise, colour depth, background and compression all cause a drop in performance of even the best systems of today. At this point the main question is which image characteristics are the most important in terms of face recognition performance and how they affect the recognition accuracy. This paper addresses these issues and presents performance evaluation (Table 2.) of three popular subspace methods (PCA, LDA and ICA) using ten degraded versions of the XM2VTS face image database [10]. The presented experimental results show the effects of different changes in image characteristics on four score level fusion rules, namely, the maximum, minimum, sum and product rule. All of the feature extraction procedures as well as the fusion strategies are rather insensitive to the presence of noise, JPEG compression, colour depth reduction, and so forth, while on the other hand they all exhibit great sensitivity to degradations such as face occlusion and packet loss simulation}, keywords = {biometrics, face recognition, ica, image degradations, lda, pca}, pubstate = {published}, tppubtype = {article} }