@mastersthesis {192, title = {Making Tabletops Useful with Applications, Frameworks and Multi-Tasking}, year = {2015}, month = {01/2015}, pages = {210}, school = {Universitat Pompeu Fabra}, type = {phd}, address = {Barcelona}, abstract = {

The progressive appearance of affordable tabletop technology and devices urges human-computer interaction researchers to provide the necessary methods to make this kind of devices the most useful to their users. Studies show that tabletops have distinctive characteristics that can be specially useful to solve some types of problems, but this potential is arguably not yet translated into real-world applications. We theorize that the important components that can transform those systems into useful tools are application frameworks that take into account the devices affordances, a third party application ecosystem, and multi-application systems supporting concurrent multitasking. In this dissertation we approach these key components: First, we explore the distinctive affordances of tabletops, with two cases: TurTan, a tangible programming language in the education context, and SongExplorer, a music collection browser for large databases. Next, in order to address the difficulty of building such applications in a way that they can exploit these affordances, we focus on software frameworks to support the tabletop application making process, with two different approaches: ofxTableGestures, targeting programmers, and MTCF, designed for music and sound artists. Finally, recognizing that making useful applications is just one part of the problem, we focus on a fundamental issue of multi-application tabletop systems: the difficulty to support multi-user concurrent multitasking with third-party applications. After analyzing the possible approaches, we present GestureAgents, a content-based distributed application-centric disambiguation mechanism and its implementation, which solves this problem in a generic fashion, being also useful to other shareable interfaces, including uncoupled ones.

}, keywords = {Applications, Collaboration, Frameworks, HCI, interaction, Multi-Tasking, Shared interfaces, tabletop}, author = {Carles F. Juli{\`a}} } @conference {124, title = {GestureAgents: An Agent-Based Framework for Concurrent Multi-Task Multi-User Interaction}, booktitle = {TEI 2013}, year = {2013}, month = {10/02/2013}, publisher = {ACM}, organization = {ACM}, abstract = {

While the HCI community has been putting a lot of effort on creating physical interfaces for collaboration, studying multi-user interaction dynamics and creating specific applications to support (and test) this kind of phenomena, it has not addressed the problem of having multiple applications sharing the same interactive space. Having an ecology of rich interactive programs sharing the same interfaces poses questions on how to deal with interaction ambiguity in a cross-application way and still allow different programmers the freedom to program rich unconstrained interaction experiences. This paper describes GestureAgents, a framework demonstrating several techniques that can be used to coordinate different applications in order to have concurrent multi-user multi-tasking interaction and still dealing with gesture ambiguity across multiple applications.

}, keywords = {agent- exclusivity, Concurrent interaction, gesture framework, multi-user}, url = {http://www.mtg.upf.edu/system/files/publications/2013\%20TEI13\%20GestureAgents.pdf}, author = {Carles F. Juli{\`a} and Jord{\`a}, S. and Nicolas Earnshaw} }