@inproceedings{c9cd3a0305d743769411ede528f1d8f1,
title = "Deep models for ensemble touch-screen improvisation",
abstract = "For many, the pursuit and enjoyment of musical performance goes hand-in-hand with collaborative creativity, whether in a choir, jazz combo, orchestra, or rock band. However, few musical interfaces use the affordances of computers to create or enhance ensemble musical experiences. One possibility for such a system would be to use an artificial neural network (ANN) to model the way other musicians respond to a single performer. Some forms of music have well-understood rules for interaction; however, this is not the case for free improvisation with new touch-screen instruments where styles of interaction may be discovered in each new performance. This paper describes an ANN model of ensemble interactions trained on a corpus of such ensemble touch-screen improvisations. The results show realistic ensemble interactions and the model has been used to implement a live performance system where a performer is accompanied by the predicted and sonified touch gestures of three virtual players.",
keywords = "Deep learning, Ensemble interaction, Mobile music, RNN, Touch screen performance",
author = "Martin, {Charles P.} and Ellefsen, {Kai Olav} and Jim Torresen",
note = "Publisher Copyright: {\textcopyright} 2017 Copyright held by the owner/author(s).; 12th International Audio Mostly Conference, AM 2017 ; Conference date: 23-08-2017 Through 26-08-2017",
year = "2017",
month = aug,
day = "23",
doi = "10.1145/3123514.3123556",
language = "English",
series = "ACM International Conference Proceeding Series",
publisher = "Association for Computing Machinery (ACM)",
booktitle = "Proceedings of the 12th International Audio Mostly Conference",
address = "United States",
}