pubs2015.bib

@article{Robertson:2015,
  abstract = { This paper presents a Bayesian probabilistic framework for real-time alignment of a recording or score with a live performance using an event-based approach. Multitrack audio files are processed using existing onset detection and harmonic analysis algorithms to create a representation of a musical performance as a sequence of time-stamped events. We propose the use of distributions for the position and relative speed which are sequentially updated in real-time according to Bayes' theorem. We develop the methodology for this approach by describing its application in the case of matching a single MIDI track and then extend this to the case of multitrack recordings. An evaluation is presented that contrasts ourmultitrack alignment method with state-of-the-art alignment techniques. },
  author = {Robertson, A. and Plumbley, M. D.},
  doi = {10.1080/09298215.2015.1009839},
  journal = {Journal of New Music Research},
  number = {2},
  pages = {71-82},
  title = {Event-based Multitrack Alignment using a Probabilistic Framework},
  volume = {44},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1080/09298215.2015.1009839}
}
@inproceedings{Stowell:2015b,
  abstract = {Many current paradigms for acoustic event detection (AED) are not adapted to the organic variability of natural sounds, and/or they assume a limit on the number of simultaneous sources: often only one source, or one source of each type, may be active. These aspects are highly undesirable for applications such as bird population monitoring. We introduce a simple method modelling the onsets, durations and offsets of acoustic events to avoid intrinsic limits on polyphony or on inter-event temporal patterns. We evaluate the method in a case study with over 3000 zebra finch calls. In comparison against a HMM-based method we find it more accurate at recovering acoustic events, and more robust for estimating calling rates. },
  author = {Stowell, D. and Clayton, D.},
  booktitle = {Applications of Signal Processing to Audio and Acoustics (WASPAA), 2015 IEEE Workshop on},
  title = {Acoustic event detection for multiple overlapping similar sources},
  url = {http://arxiv.org/abs/1503.07150},
  year = {2015},
  bdsk-url-1 = {http://arxiv.org/abs/1503.07150}
}
@article{Stowell:2015,
  abstract = {For intelligent systems to make best use of the audio modality, it is important that they can recognise not just speech and music, which have been researched as specific tasks, but also general sounds in everyday environments. To stimulate research in this field we conducted a public research challenge: the IEEE Audio and Acoustic Signal Processing Technical Committee challenge on Detection and Classification of Acoustic Scenes and Events (DCASE). In this paper we report on the state of the art in automatically classifying audio scenes, and automatically detecting and classifying audio events. We survey prior work as well as the state of the art represented by the submissions to the challenge from various research groups. We also provide detail on the organisation of the challenge, so that our experience as challenge hosts may be useful to those organising challenges in similar domains. We created new audio datasets and baseline systems for the challenge: these, as well as some submitted systems, are publicly available under open licenses, to serve as benchmark for further research in general-purpose machine listening.},
  author = {Stowell, D. and Giannoulis, D. and Benetos, E. and Lagrange, M. and Plumbley, M. D.},
  doi = {10.1109/TMM.2015.2428998},
  journal = {{IEEE} Transactions on Multimedia},
  month = {October},
  number = {10},
  pages = {1733--1746},
  title = {Detection and classification of acoustic scenes and events},
  volume = {17},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/TMM.2015.2428998}
}
@article{Kereliuk2015,
  author = {Kereliuk, C and Sturm, BL and Larsen, J},
  doi = {10.1109/TMM.2015.2478068},
  eissn = {1941-0077},
  issn = {1520-9210},
  issue = {11},
  journal = {IEEE Transactions on Multimedia},
  month = {Nov},
  owner = {dan},
  pages = {2059--2071},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Deep Learning and Music Adversaries},
  volume = {17},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/TMM.2015.2478068}
}
@article{Rohrmeier2015,
  author = {Rohrmeier, M and Zuidema, W and Wiggins, GA and Scharff, C},
  day = {19},
  doi = {10.1098/rstb.2014.0097},
  issn = {0962-8436},
  issue = {1664},
  journal = {PHILOSOPHICAL TRANSACTIONS OF THE ROYAL SOCIETY B-BIOLOGICAL SCIENCES},
  keyword = {music},
  month = {Mar},
  number = {UNSP 20140097},
  owner = {dan},
  pages = {107--121},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Principles of structure building in music, language and animal song},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000350537300010\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {370},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000350537300010%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.1098/rstb.2014.0097}
}
@inproceedings{Sturm,
  abstract = {The ``winning'' system in the 2013 MIREX Latin Genre Classification Task was a deep neural network trained with simple features. An explanation for its winning performance has yet to be found. In previous work, we built similar systems using the \em BALLROOM music dataset, and found their performances to be greatly affected by slightly changing the tempo of the music of a test recording. In the MIREX task, however, systems are trained and tested using the \em Latin Music Dataset (LMD), which is 4.5 times larger than \em BALLROOM, and which does not seem to show as strong a relationship between tempo and label as \em BALLROOM. In this paper, we reproduce the ``winning'' deep learning system using \em LMD, and measure the effects of time dilation on its performance. We find that tempo changes of at most \$\pm 6\$\\% greatly diminish and improve its performance. Interpreted with the low-level nature of the input features, this supports the conclusion that the system is exploiting some low-level absolute time characteristics to reproduce ground truth in \em LMD.},
  author = {Sturm and Kereliuk, C and Larsen, J},
  conference = {Mathematics and Computation in Music},
  doi = {10.1007/978-3-319-20603-5_34},
  filedday = {6},
  filedmonth = {Mar},
  filedyear = {2015},
  finishday = {25},
  finishmonth = {Jun},
  finishyear = {2015},
  isbn = {978-3-319-20602-8},
  organization = {QMUL},
  owner = {dan},
  publicationstatus = {published},
  startday = {22},
  startmonth = {Jun},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {El Caballo Viejo? Latin genre recognition with deep learning and spectral periodicity},
  url = {http://link.springer.com/chapter/10.1007/978-3-319-20603-5_34},
  year = {2015},
  bdsk-url-1 = {http://link.springer.com/chapter/10.1007/978-3-319-20603-5_34},
  bdsk-url-2 = {http://dx.doi.org/10.1007/978-3-319-20603-5_34}
}
@inproceedings{Sturma,
  abstract = {We present the concept of \em adversarial audio in the context of deep neural networks (DNNs) for music content analysis. An adversary is an algorithm that makes minor perturbations to an input that cause major repercussions to the system response. In particular, we design an adversary for a DNN that takes as input short-time spectral magnitudes of recorded music and outputs a high-level music descriptor. We demonstrate how this adversary can make the DNN behave in any way with only extremely minor changes to the music recording signal. We show that the adversary cannot be neutralised by a simple filtering of the input. Finally, we discuss adversaries in the broader context of the evaluation of music content analysis systems.},
  author = {Sturm and Kereliuk, C and Larsen, J},
  conference = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
  filedday = {26},
  filedmonth = {Jun},
  filedyear = {2015},
  finishday = {21},
  finishmonth = {Oct},
  finishyear = {2015},
  organization = {Mohonk, NY},
  owner = {dan},
  startday = {18},
  startmonth = {Oct},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {Deep Learning, Audio Adversaries, and Music Content Analysis},
  year = {2015}
}
@inproceedings{Sturmb,
  abstract = {We discuss the problem of music content analysis within the formal framework of experimental design.},
  author = {Sturm, BLT and Maruri-Aguilar, H and Parker, B and Grossmann, H},
  conference = {International Conference on Machine Learning},
  finishday = {11},
  finishmonth = {Jul},
  finishyear = {2015},
  organization = {Lille, France},
  owner = {dan},
  startday = {6},
  startmonth = {Jul},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {The scientific evaluation of music content analysis systems: Valid empirical foundations for future real-world impact},
  year = {2015}
}
@incollection{Wiggins2015a,
  abstract = {We present progress towards a computational cognitive architecture, IDyOT (Information Dynamics of Thinking) that is intended to account for cer- tain aspects of human creativity and other forms of cognitive processing in terms of a pre-conscious predictive loop. The theory is motivated in terms of the evolutionary pressure to be efficient. It makes several predictions that may be tested by building computational implementations and studying their behaviour.},
  author = {Wiggins, GA and Forth, JC},
  booktitle = {Computational Creativity Research: Towards Creative Machines},
  doi = {10.2991/978-94-6239-085-0},
  editor = {Besold, TR and Schorlemmer, M and Smaill, A},
  isbn = {978-94-6239-084-3},
  number = {7},
  numberofpieces = {19},
  owner = {dan},
  pages = {127--148},
  publisher = {Atlantis Press},
  series = {Atlantis Thinking Machines},
  timestamp = {2016.04.04},
  title = {IDyOT: A Computational Theory of Creativity as Everyday Reasoning from Learned Information},
  url = {http://www.springer.com/gb/book/9789462390843},
  year = {2015},
  bdsk-url-1 = {http://www.springer.com/gb/book/9789462390843},
  bdsk-url-2 = {http://dx.doi.org/10.2991/978-94-6239-085-0}
}
@article{Wiggins2015,
  author = {Wiggins, GA and Tyack, P and Scharff, C and Rohrmeier, M},
  day = {19},
  doi = {10.1098/rstb.2014.0099},
  issn = {0962-8436},
  issue = {1664},
  journal = {PHILOSOPHICAL TRANSACTIONS OF THE ROYAL SOCIETY B-BIOLOGICAL SCIENCES},
  keyword = {creativity},
  month = {Mar},
  number = {ARTN 20140099},
  owner = {dan},
  pages = {129--137},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {The evolutionary roots of creativity: mechanisms and motivations},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000350537300012\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {370},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000350537300012%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.1098/rstb.2014.0099}
}
@inproceedings{Ewert2015,
  author = {Ewert, S and Plumbley, MD and Sandler, M and IEEE},
  booktitle = {2015 IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS, SPEECH, AND SIGNAL PROCESSING (ICASSP)},
  issn = {1520-6149},
  keyword = {Non-Negative Matrix Deconvolution},
  owner = {dan},
  pages = {569--573},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {A DYNAMIC PROGRAMMING VARIANT OF NON-NEGATIVE MATRIX DECONVOLUTION FOR THE TRANSCRIPTION OF STRUCK STRING INSTRUMENTS},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000368452400114\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000368452400114%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a}
}
@inproceedings{Tian2015,
  abstract = {This paper presents a new set of audio features to describe music content based on tempo cues. Tempogram, a mid-level representation of tempo information, is constructed to characterize tempo variation and local pulse in the audio signal. We introduce a collection of novel tempogram-based features inspired by musicological hypotheses about the relation between music structure and its rhythmic components prominent at different metrical levels. The strength of these features is demonstrated in music structural segmentation, an important task in Music information retrieval (MIR), using several published popular music datasets. Results indicate that incorporating tempo information into audio segmentation is a promising new direction.},
  author = {Tian, M and Fazekas, G and Black, DAA and Sandler, M},
  booktitle = {ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings},
  day = {1},
  doi = {10.1109/ICASSP.2015.7178003},
  isbn = {9781467369978},
  issn = {1520-6149},
  month = {Jan},
  owner = {dan},
  pages = {419--423},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {On the use of the tempogram to describe audio content and its application to Music structural segmentation},
  volume = {2015-August},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/ICASSP.2015.7178003}
}
@article{Saari2015,
  author = {Saari, P and Fazekas, G and Eerola, T and Barthet, M and Lartillot, O and Sandler, M},
  doi = {10.1109/TAFFC.2015.2462841},
  issn = {1949-3045},
  journal = {IEEE Transactions on Affective Computing},
  owner = {dan},
  pages = {1--1},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Genre-adaptive Semantic Computing and Audio-based Modelling for Music Mood Annotation},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/TAFFC.2015.2462841}
}
@inproceedings{FREEMAN2015,
  abstract = {How can we describe data when used as an art material? As the number of artists using data in their work increases, so too must our ability to describe the material in a way that is understood by both specialist and general audiences alike. In this paper we review existing vocabularies, glossaries, and taxonomies of data, and propose our own concise taxonomy. We present a number of examples of how existing data art works are described, and demonstrate our taxonomy by applying it to these works. To conclude we propose the adoption of this concise taxonomy by artists, critics, and curators, and suggest that on-going refinement of the taxonomy takes place through crowd-sourced knowledge sharing on the web.},
  author = {FREEMAN, J and SANDLER, M and WIGGINS, G and STARKS, G},
  booktitle = {Proceedings of the IEEE VIS Arts Program (VISAP)},
  conference = {IEEE VIS},
  day = {26},
  finishday = {30},
  finishmonth = {Oct},
  finishyear = {2015},
  keyword = {data art},
  month = {Oct},
  organization = {Chicago, Illinois},
  owner = {dan},
  pages = {22--29},
  publicationstatus = {online-published},
  publisher = {IEEE},
  startday = {25},
  startmonth = {Oct},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {A concise taxonomy for describing data as an art material.},
  url = {http://www.translatingdata.org/},
  year = {2015},
  bdsk-url-1 = {http://www.translatingdata.org/}
}
@inproceedings{Mycroft2015,
  author = {Mycroft, J and Reiss, JD and Stockman, T},
  conference = {International Conference on the Multimodal Experience of Music (ICMEM)},
  owner = {dan},
  school = {Sheffield},
  timestamp = {2016.04.04},
  title = {The effect of differing user interface presentation styles on audio mixing},
  year = {2015}
}
@inproceedings{Ronan2015,
  author = {Ronan, DM and DeMan, B and Gunes, H and Reiss, JD},
  conference = {139th AES Convention},
  owner = {dan},
  school = {New York},
  timestamp = {2016.04.04},
  title = {The impact of subgrouping practices on the perception of multitrack mixes},
  year = {2015}
}
@inproceedings{Jillings2015,
  author = {Jillings, Nicholas and De Man, Brecht and Moffat, David and Reiss, Joshua D.},
  booktitle = {12th Sound and Music Computing Conference},
  date-modified = {2016-04-07 15:10:14 +0000},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {Web Audio Evaluation Tool: A Browser-based Listening Test Environment},
  url = {http://smcnetwork.org/system/files/SMC2015_submission_88.pdf},
  year = {2015}
}
@article{Zacharakis2015,
  abstract = {The current study expands our previous work on interlanguage musical timbre semantics by examining the relationship between semantics and perception of timbre. Following Zacharakis, Pastiadis, and Reiss (2014), a pairwise dissimilarity listening test involving participants from two separate linguistic groups (Greek and English) was conducted. Subsequent multidimensional scaling analysis produced a 3D perceptual timbre space for each language. The comparison between perceptual spaces suggested that timbre perception is unaffected by native language. Additionally, comparisons between semantic and perceptual spaces revealed substantial similarities which suggest that verbal descriptions can convey a considerable amount of perceptual information. The previously determined semantic labels "auditory texture" and "luminance" featured the highest associations with perceptual dimensions for both languages. "Auditory mass" failed to show any strong correlations. Acoustic analysis identified energy distribution of harmonic partials, spectral detail, temporal/spectrotemporal characteristics and the fundamental frequency as the most salient acoustic correlates of perceptual dimensions.},
  author = {Zacharakis, A and Pastiadis, K and Reiss, JD},
  day = {1},
  doi = {10.1525/MP.2015.32.4.394},
  eissn = {1533-8312},
  issn = {0730-7829},
  issue = {4},
  journal = {Music Perception},
  month = {Jan},
  owner = {dan},
  pages = {394--412},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {An interlanguage unification of musical timbre: Bridging semantic, perceptual, and acoustic dimensions},
  volume = {32},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1525/MP.2015.32.4.394}
}
@article{Hafezi2015,
  author = {Hafezi, S and Reiss, JD},
  issn = {1549-4950},
  issue = {5},
  journal = {JOURNAL OF THE AUDIO ENGINEERING SOCIETY},
  month = {May},
  owner = {dan},
  pages = {312--323},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Autonomous Multitrack Equalization Based on Masking Reduction},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000355777700001\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {63},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000355777700001%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a}
}
@article{Hon2015,
  abstract = {We investigate the self-localization problem of an ad-hoc network of randomly distributed and independent devices in an open-space environment with low reverberation but heavy noise (e.g. smartphones recording videos of an outdoor event). Assuming a sufficient number of sound sources, we estimate the distance between a pair of devices from the extreme (minimum and maximum) time difference of arrivals (TDOAs) from the sources to the pair of devices without knowing the time offset. The obtained inter-device distances are then exploited to derive the geometrical configuration of the network. In particular, we propose a robust audio fingerprinting algorithm for noisy recordings and perform landmark matching to construct a histogram of the TDOAs of multiple sources. The extreme TDOAs can be estimated from this histogram. By using audio fingerprinting features, the proposed algorithm works robustly in very noisy environments. Experiments with free-field simulation and open-space recordings prove the effectiveness of the proposed algorithm.},
  author = {Hon, TK and Wang, L and Reiss, JD and Cavallaro, A},
  day = {1},
  doi = {10.1109/TASLP.2015.2442417},
  issn = {1558-7916},
  issue = {10},
  journal = {IEEE Transactions on Audio, Speech and Language Processing},
  month = {Oct},
  owner = {dan},
  pages = {1623--1636},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Audio fingerprinting for multi-device self-localization},
  volume = {23},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/TASLP.2015.2442417}
}
@inproceedings{Mason2015,
  abstract = {Audio quality is very important to broadcasters' audiences, and unwanted loudness variations do compromise the quality of experience for the listener. Dynamic range control applied by the broadcaster can go some way to avoiding problems but can never take the individual environment of the listener into account. The listening conditions are a significant factor to be taken into account when dynamic range control is applied. The web audio API provided by HTML5 offers the possibility of performing dynamic range control under the control of the listener, tailoring it optimally for their individual situation. We have developed a system that demonstrates that this is achievable in a modern web browser. The implementation controls the compressor based on environmental noise level measured using the microphone present in most mobile device audio players.},
  author = {Mason, A and Jillings, N and Ma, Z and Reiss, JD and Melchior, F},
  booktitle = {Proceedings of the AES International Conference},
  day = {1},
  isbn = {9781942220022},
  month = {Jan},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Adaptive audio reproduction using personalised compression},
  volume = {2015-January},
  year = {2015}
}
@inproceedings{Moffat2015,
  author = {Moffat, David and Ronan, David and Reiss, Joshua D.},
  booktitle = {18th International Conference on Digital Audio Effects (DAFx-15)},
  date-modified = {2016-04-07 15:11:11 +0000},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {An Evaluation of Audio Feature Extraction Toolboxes (honorable mention for Best Paper)},
  year = {2015}
}
@article{Man2015,
  author = {Man, BD and Reiss, JD},
  journal = {Journal on the Art of Record Production},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {Analysis of Peer Reviews in Music Production},
  volume = {10},
  year = {2015}
}
@inproceedings{Mycroft2015a,
  author = {Mycroft, J and Reiss, JD and Stockman, T},
  conference = {International Symposium on Computer Music Modeling and Retrieval (CMMR)},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {Audio Mixing Displays; The Influence of Overviews on Information Search and Critical Listening},
  year = {2015}
}
@inproceedings{Ronan2015a,
  author = {Ronan, David and Moffat, D and Gunes, H and Reiss, Joshua D.},
  booktitle = {18th Int. Conference on Digital Audio Effects (DAFx-15)},
  date-modified = {2016-04-07 15:11:22 +0000},
  owner = {dan},
  school = {Trondheim, Norway},
  timestamp = {2016.04.04},
  title = {Automatic subgrouping of multitrack audio},
  year = {2015}
}
@inproceedings{Pestana2015,
  author = {Pestana, P and Reiss, JD and Barbosa, A},
  conference = {138th Audio Engineering Society (AES) Convention},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {Cross-Adaptive Polarity Switching Strategies for Optimization of Audio Mixes},
  year = {2015}
}
@inproceedings{Hon2015a,
  author = {Hon, TK and Wang, L and Reiss, JD and Cavallaro, A},
  conference = {23rd European Signal Processing Conference (EUSIPCO)},
  owner = {dan},
  pages = {1341--1345},
  school = {Nice, France},
  timestamp = {2016.04.04},
  title = {Fine landmark-based synchronization of ad-hoc microphone arrays},
  year = {2015}
}
@inproceedings{Durr2015,
  author = {Durr, G and Peixoto, L and Souza, M and Tanoue, R and Reiss, JD},
  conference = {AES 56th International Conference},
  owner = {dan},
  school = {London, UK},
  timestamp = {2016.04.04},
  title = {Implementation and evaluation of dynamic level of audio detail},
  year = {2015}
}
@article{Ma2015,
  abstract = {We present an intelligent approach to multitrack dynamic range compression where all parameters are configured automatically based on side-chain feature extraction from the input signals. A method of adjustment experiment to explore how audio engineers set the ratio and threshold is described. We use multiple linear regression to model the relationship between different features and the experimental results. Parameter automations incorporate control assumptions based on this experiment and those derived from mixing literature and analysis. Subjective evaluation of the intelligent system is provided in the form of a multiple stimulus listening test where the system is compared against a no-compression mix, two human mixes, and an alternative approach. Results showed that mixes devised by our system are able to compete with or outperform manual mixes by semi-professionals under a variety of subjective criteria.},
  author = {Ma, Z and De Man, B and Pestana, PDL and Black, DAA and Reiss, JD},
  day = {1},
  doi = {10.17743/jaes.2015.0053},
  issn = {1549-4950},
  issue = {6},
  journal = {AES: Journal of the Audio Engineering Society},
  month = {Jan},
  owner = {dan},
  pages = {412--426},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Intelligent multitrack dynamic range compression},
  volume = {63},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.17743/jaes.2015.0053}
}
@inproceedings{DeMan2015,
  abstract = {The relation of music production practices to preference is still poorly understood. Due to the highly complex process of mixing music, few studies have been able to reliably investigate mixing engineering, as investigating one process parameter or feature without considering the correlation with other parameters inevitably oversimplifies the problem. In this work, we present an experiment where different mixes of different songs, obtained with a representative set of audio engineering tools, are rated by experienced subjects. The relation between the perceived mix quality and sonic features extracted from the mixes is investigated, and we find that a number of features correlate with quality.},
  author = {De Man, B and Boerum, M and Leonard, B and King, R and Massenburg, G and Reiss, JD},
  booktitle = {138th Audio Engineering Society Convention 2015},
  day = {1},
  isbn = {9781510806597},
  month = {Jan},
  owner = {dan},
  pages = {129--136},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Perceptual evaluation of music mixing practices},
  volume = {1},
  year = {2015}
}
@article{McGregor2015,
  author = {McGregor, S and Agres, K and Purver, M and Wiggins, GA},
  journal = {J. Artificial General Intelligence},
  number = {1},
  owner = {dan},
  pages = {55--86},
  timestamp = {2016.04.04},
  title = {From Distributional Semantics to Conceptual Spaces: A Novel Computational Method for Concept Creation.},
  volume = {6},
  year = {2015}
}
@inproceedings{Griffiths2015,
  abstract = {We present computational experiments on language segmentation using a general information-theoretic cognitive model. We present a method which uses the statistical regularities of language to segment a continuous stream of symbols into "meaningful units" at a range of levels. Given a string of symbols-in the present approach, textual representations of phonemes-we attempt to find the syllables such as grea and sy (in the word greasy); words such as in, greasy, wash, and water ; and phrases such as in greasy wash water. The approach is entirely information-theoretic, and requires no knowledge of the units themselves; it is thus assumed to require only general cognitive abilities, and has previously been applied to music. We tested our approach on two spoken language corpora, and we discuss our results in the context of learning as a statistical processes.},
  author = {Griffiths, SS and McGinity, MM and Forth, J and Purver, M and Wiggins, GA},
  booktitle = {CEUR Workshop Proceedings},
  day = {1},
  issn = {1613-0073},
  month = {Jan},
  owner = {dan},
  pages = {54--67},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Information-theoretic segmentation of natural language},
  volume = {1510},
  year = {2015}
}
@incollection{Yuan2015,
  abstract = {We describe an experiment into detecting emotions in texts on the Chinese microblog service SinaWeibo (www.weibo.com) using distant supervision via various author-supplied emotion labels (emoticons and smilies). Existing word segmentation tools proved unreliable; better accuracy was achieved using characterbased features. Higher-order n-grams proved to be useful features. Accuracy varied according to label and emotion: while smilies are used more often, emoticons are more reliable. Happiness is the most accurately predicted emotion, with accuracies around 90\% on both distant and gold-standard labels. This approach works well and achieves high accuracies for happiness and anger, while it is less effective for sadness, surprise, disgust and fear, which are also difficult for human annotators to detect.},
  author = {Yuan, Z and Purver, M},
  day = {1},
  doi = {10.1007/978-3-319-18458-6_7},
  issn = {1860-949X},
  journal = {Studies in Computational Intelligence},
  month = {Jan},
  owner = {dan},
  pages = {129--149},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Predicting emotion labels for Chinese microblog texts},
  volume = {602},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-319-18458-6_7}
}
@inproceedings{Eshghi2015,
  address = {London, UK},
  author = {Eshghi, A and Howes, C and Gregoromichelaki, E and Hough, J and Purver, M},
  booktitle = {Proceedings of the 11th International Conference on Computational Semantics},
  isbn = {978-1-941643-33-4},
  month = {Apr},
  owner = {dan},
  pages = {261--271},
  publisher = {Association for Computational Linguistics},
  timestamp = {2016.04.04},
  title = {Feedback in Conversation as Incremental Semantic Update},
  url = {http://www.aclweb.org/anthology/W15-0130},
  year = {2015},
  bdsk-url-1 = {http://www.aclweb.org/anthology/W15-0130}
}
@inproceedings{Purver2015,
  address = {London, UK},
  author = {Purver, M and Sadrzadeh, M},
  booktitle = {Proceedings of the IWCS 2015 Workshop on Interactive Meaning Construction},
  month = {Apr},
  owner = {dan},
  pages = {21--22},
  timestamp = {2016.04.04},
  title = {From Distributional Semantics to Distributional Pragmatics?},
  url = {http://www.eecs.qmul.ac.uk/\%20mpurver/papers/purver-sadrzadeh15imc.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/%5C%20mpurver/papers/purver-sadrzadeh15imc.pdf}
}
@inproceedings{Sadrzadeh2015,
  address = {London, UK},
  author = {Sadrzadeh, M and Purver, M},
  booktitle = {Proceedings of the IWCS 2015 Workshop on Advances in Distributional Semantics},
  month = apr,
  owner = {dan},
  timestamp = {2016.04.04},
  title = {Geometry of Meaning from Words to Dialogue Acts},
  url = {http://www.eecs.qmul.ac.uk/\%20mpurver/papers/sadrzadeh-purver15ads.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/%5C%20mpurver/papers/sadrzadeh-purver15ads.pdf}
}
@inproceedings{McGregor2015a,
  abstract = {This paper seeks to situate the computational modelling of metaphor within the context of questions about the relationship between the meaning and use of language. The results of this pragmatic assessment are used as the theoretical basis for a proposed computational implementation that seeks metaphor in the geometry of a vector space model of distributional semantics. This statistical approach to the analysis and generation of metaphor is taken as a platform for a consideration of the fraught relationship between computational models of cognitive processes and the study of consciousness.},
  author = {McGregor, S and Purver, M and Wiggins, G},
  booktitle = {AISB Convention 2015},
  day = {1},
  month = {Jan},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Metaphor, meaning, computers and consciousness},
  year = {2015}
}
@inproceedings{Agres2015,
  address = {Park City, UT},
  author = {Agres, K and McGregor, S and Purver, M and Wiggins, G},
  booktitle = {Proceedings of the 6th International Conference on Computational Creativity (ICCC)},
  isbn = {978-0-8425-2970-9},
  month = {Jun},
  owner = {dan},
  pages = {118--125},
  timestamp = {2016.04.04},
  title = {Conceptualizing Creativity: From Distributional Semantics to Conceptual Spaces},
  url = {http://www.eecs.qmul.ac.uk/\%20mpurver/papers/agres-et-al15iccc.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/%5C%20mpurver/papers/agres-et-al15iccc.pdf}
}
@inproceedings{McGregor2015b,
  address = {Newcastle-upon-Tyne},
  author = {McGregor, S and Purver, M and Wiggins, G},
  booktitle = {Proceedings of the 13th International Cognitive Linguistics Conference (ICLC)},
  month = {Jul},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {An Emergent Model of Metaphors as Transformations of Vector Spaces},
  url = {http://www.eecs.qmul.ac.uk/\%20mpurver/papers/mcgregor-et-al15iclc.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/%5C%20mpurver/papers/mcgregor-et-al15iclc.pdf}
}
@inproceedings{Mylonas2015,
  address = {Tokyo},
  author = {Mylonas, D and Purver, M and Sadrzadeh, M and MacDonald, L and Griffin, L},
  booktitle = {Proceedings of the 2015 Meeting of the International Colour Association (AIC)},
  month = {May},
  owner = {dan},
  timestamp = {2016.04.04},
  title = {The Use of English Colour Terms in Big Data},
  url = {http://www.eecs.qmul.ac.uk/\%20mpurver/papers/mylonas-et-al15aic.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/%5C%20mpurver/papers/mylonas-et-al15aic.pdf}
}
@incollection{Kempson2015,
  author = {Kempson, R and Cann, R and Eshghi, A and Gregoromichelaki, E and Purver, M},
  booktitle = {Handbook of Contemporary Semantic Theory},
  edition = {2nd},
  editor = {Lappin, S and Fox, C},
  isbn = {978-0-470-67073-6},
  month = {Sep},
  number = {4},
  owner = {dan},
  publisher = {Wiley},
  timestamp = {2016.04.04},
  title = {Ellipsis},
  url = {http://eu.wiley.com/WileyCDA/WileyTitle/productCd-0470670738.html},
  year = {2015},
  bdsk-url-1 = {http://eu.wiley.com/WileyCDA/WileyTitle/productCd-0470670738.html}
}
@article{Sylwester2015,
  author = {Sylwester, K and Purver, M},
  day = {16},
  doi = {10.1371/journal.pone.0137422},
  editor = {Danforth, CM},
  eissn = {1932-6203},
  issue = {9},
  journal = {PLOS ONE},
  month = {Sep},
  owner = {dan},
  pages = {e0137422--e0137422},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Twitter Language Use Reflects Psychological Differences between Democrats and Republicans},
  volume = {10},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1371/journal.pone.0137422}
}
@article{Carey2015,
  abstract = {Performing musicians invest thousands of hours becoming experts in a range of perceptual, attentional, and cognitive skills. The duration and intensity of musicians' training - far greater than that of most educational or rehabilitation programs - provides a useful model to test the extent to which skills acquired in one particular context (music) generalize to different domains. Here, we asked whether the instrument-specific and more instrument-general skills acquired during professional violinists' and pianists' training would generalize to superior performance on a wide range of analogous (largely non-musical) skills, when compared to closely matched non-musicians. Violinists and pianists outperformed non-musicians on fine-grained auditory psychophysical measures, but surprisingly did not differ from each other, despite the different demands of their instruments. Musician groups did differ on a tuning system perception task: violinists showed clearest biases towards the tuning system specific to their instrument, suggesting that long-term experience leads to selective perceptual benefits given a training-relevant context. However, we found only weak evidence of group differences in non-musical skills, with musicians differing marginally in one measure of sustained auditory attention, but not significantly on auditory scene analysis or multi-modal sequencing measures. Further, regression analyses showed that this sustained auditory attention metric predicted more variance in one auditory psychophysical measure than did musical expertise. Our findings suggest that specific musical expertise may yield distinct perceptual outcomes within contexts close to the area of training. Generalization of expertise to relevant cognitive domains may be less clear, particularly where the task context is non-musical.},
  address = {Centre for Brain and Cognitive Development, Birkbeck College, University of London, UK; Department of Psychological Sciences, Birkbeck College, University of London, UK. Electronic address: d.carey@bbk.ac.uk.},
  author = {Carey, D and Rosen, S and Krishnan, S and Pearce, MT and Shepherd, A and Aydelott, J and Dick, F},
  doi = {10.1016/j.cognition.2014.12.005},
  eissn = {1873-7838},
  howpublished = {Print-Electronic},
  issn = {0010-0277},
  journal = {Cognition},
  language = {eng},
  month = {Apr},
  owner = {dan},
  pages = {81--105},
  timestamp = {2016.04.04},
  title = {Generality and specificity in the effects of musical expertise on perception and cognition.},
  volume = {137},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1016/j.cognition.2014.12.005}
}
@article{Pearce2015,
  author = {Pearce, MT and Halpern, AR},
  doi = {10.1037/a0039279},
  issn = {1931-3896},
  issue = {3},
  journal = {PSYCHOLOGY OF AESTHETICS CREATIVITY AND THE ARTS},
  keyword = {aging},
  month = {Aug},
  owner = {dan},
  pages = {248--253},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Age-Related Patterns in Emotions Evoked by Music},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000359379300007\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {9},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000359379300007%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.1037/a0039279}
}
@inproceedings{Jack2015,
  author = {Jack, RH and MCPHERSON, A and Stockman, T},
  conference = {International Conference on the Multimodal Experience of Music},
  organization = {Sheffield, UK},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {The design of tactile musical devices for the deaf},
  year = {2015}
}
@article{MacRitchie2015,
  author = {MacRitchie, J and McPherson, AP},
  day = {2},
  doi = {10.3389/fpsyg.2015.00702},
  eissn = {1664-1078},
  journal = {Frontiers in Psychology},
  month = {Jun},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Integrating optical finger motion tracking with surface touch events},
  volume = {6},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.3389/fpsyg.2015.00702}
}
@article{Pardue2015,
  author = {Pardue, LS and Harte, C and McPherson, AP},
  day = {2},
  doi = {10.1080/09298215.2015.1087575},
  issn = {0929-8215},
  issue = {4},
  journal = {JOURNAL OF NEW MUSIC RESEARCH},
  keyword = {violin},
  month = {Oct},
  owner = {dan},
  pages = {305--323},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {A Low-Cost Real-Time Tracking System for Violin},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000366320300002\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {44},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000366320300002%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.1080/09298215.2015.1087575}
}
@inproceedings{Donovan2015,
  author = {Donovan, L and MCPHERSON, A},
  conference = {138th Audio Engineering Society Convention},
  organization = {Warsaw, Poland},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Active control of a string instrument bridge using the Posicast technique},
  year = {2015}
}
@inproceedings{McPherson2015b,
  author = {McPherson, A and Zappi, V},
  conference = {138th Audio Engineering Society Convention},
  organization = {Warsaw, Poland},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {An environment for submillisecond-latency audio and sensor processing on BeagleBone Black},
  year = {2015}
}
@article{McPherson2015,
  author = {McPherson, A},
  doi = {10.1162/COMJ_a_00297},
  issn = {0148-9267},
  issue = {2},
  journal = {COMPUTER MUSIC JOURNAL},
  owner = {dan},
  pages = {28--46},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Buttons, Handles, and Keys: Advances in Continuous-Control Keyboard Instruments},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000355321900002\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {39},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000355321900002%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.1162/COMJ_a_00297}
}
@inproceedings{Zappi2015,
  author = {Zappi, V and MCPHERSON, A},
  conference = {New Interfaces for Musical Expression},
  organization = {Baton Rouge, USA},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Exposing the scaffolding of digital instruments with hardware-software feedback loops},
  year = {2015}
}
@inproceedings{Menzies2015,
  author = {Menzies, DWH and MCPHERSON, A},
  conference = {New Interfaces for Musical Expression},
  organization = {Baton Rouge, USA},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Highland piping ornament recognition using Dynamic Time Warping},
  year = {2015}
}
@inproceedings{Concannon2015,
  address = {Gothenburg},
  author = {Concannon, S and Healey, P and Purver, M},
  booktitle = {Proceedings of the 19th SemDial Workshop on the Semantics and Pragmatics of Dialogue (goDIAL)},
  conference = {19th SemDial Workshop on the Semantics and Pragmatics of Dialogue (goDIAL)},
  finishday = {26},
  finishmonth = {Aug},
  finishyear = {2015},
  issn = {2308-2275},
  month = {Aug},
  organization = {Gothenberg, Sweden},
  owner = {dan},
  pages = {15--23},
  publicationstatus = {published},
  startday = {24},
  startmonth = {Aug},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {Shifting Opinions: Experiments on Agreement and Disagreement in Dialogue},
  url = {http://www.eecs.qmul.ac.uk/~mpurver/papers/concannon-et-al15semdialexp.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/~mpurver/papers/concannon-et-al15semdialexp.pdf}
}
@inproceedings{Concannon2015a,
  address = {Gothenburg},
  author = {Concannon, S and Healey, PGT and Purver, M},
  booktitle = {Proceedings of the 19th SemDial Workshop on the Semantics and Pragmatics of Dialogue (goDIAL)},
  conference = {19th SemDial Workshop on the Semantics and Pragmatics of Dialogue (goDIAL)},
  finishday = {25},
  finishmonth = {Aug},
  finishyear = {2015},
  issn = {2308-2275},
  month = {Aug},
  organization = {Gothenburg, Sweden},
  owner = {dan},
  pages = {6--14},
  publicationstatus = {published},
  startday = {23},
  startmonth = {Aug},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {Taking a Stance: a Corpus Study of Reported Speech},
  url = {http://www.eecs.qmul.ac.uk/~mpurver/papers/concannon-et-al15semdialcorpus.pdf},
  year = {2015},
  bdsk-url-1 = {http://www.eecs.qmul.ac.uk/~mpurver/papers/concannon-et-al15semdialcorpus.pdf}
}
@article{Katevas2015,
  author = {Katevas, K and Healey, PGT and Harris, MT},
  day = {25},
  doi = {10.3389/fpsyg.2015.01253},
  issn = {1664-1078},
  journal = {FRONTIERS IN PSYCHOLOGY},
  keyword = {human robot interaction},
  month = {Aug},
  number = {ARTN 1253},
  owner = {dan},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Robot Comedy Lab: experimenting with the social dynamics of live performance},
  url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2\&SrcApp=PARTNER_APP\&SrcAuth=LinksAMR\&KeyUT=WOS:000360045400001\&DestLinkType=FullRecord\&DestApp=ALL_WOS\&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  volume = {6},
  year = {2015},
  bdsk-url-1 = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2%5C&SrcApp=PARTNER_APP%5C&SrcAuth=LinksAMR%5C&KeyUT=WOS:000360045400001%5C&DestLinkType=FullRecord%5C&DestApp=ALL_WOS%5C&UsrCustomerID=612ae0d773dcbdba3046f6df545e9f6a},
  bdsk-url-2 = {http://dx.doi.org/10.3389/fpsyg.2015.01253}
}
@inproceedings{Tubb2015,
  author = {Tubb, R and Dixon, S},
  booktitle = {Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
  owner = {dan},
  publisher = {ACM},
  timestamp = {2016.04.04},
  title = {An Evaluation of Multidimensional Controllers for Sound Design Tasks},
  year = {2015}
}
@inproceedings{Dai2015,
  author = {Dai, J and Mauch, M and Dixon, S},
  booktitle = {16th International Society for Music Information Retrieval Conference},
  owner = {dan},
  pages = {420--426},
  timestamp = {2016.04.04},
  title = {Analysis of Intonation Trajectories in Solo Singing},
  year = {2015}
}
@inproceedings{Sigtia2015,
  author = {Sigtia, S and Boulanger-Lewandowski, N and Dixon, S},
  booktitle = {ISMIR},
  editor = {Muller, M and Wiering, F},
  isbn = {978-84-606-8853-2},
  owner = {dan},
  pages = {127--133},
  timestamp = {2016.04.04},
  title = {Audio Chord Recognition with a Hybrid Recurrent Neural Network.},
  url = {http://www.informatik.uni-trier.de/~ley/db/conf/ismir/ismir2015.html},
  year = {2015},
  bdsk-url-1 = {http://www.informatik.uni-trier.de/~ley/db/conf/ismir/ismir2015.html}
}
@inproceedings{Wilmering2015,
  author = {Wilmering, T and Bechhofer, S and Dixon, S and Fazekas, G and Page, K},
  booktitle = {Third International Workshop on Linked Media (LiME 2015), Proceedings of the 24th International Conference on World Wide Web Companion},
  owner = {dan},
  pages = {737--738},
  timestamp = {2016.04.04},
  title = {Automating Annotation of Media with Linked Data Workflows},
  year = {2015}
}
@inproceedings{Osmalskyj2015,
  author = {Osmalskyj, J and Foster, P and Dixon, S and Embrechts, J-J},
  booktitle = {16th International Society for Music Information Retrieval Conference},
  owner = {dan},
  pages = {462--468},
  timestamp = {2016.04.04},
  title = {Combining Features for Cover Song Identification},
  year = {2015}
}
@inproceedings{Cheng2015,
  author = {Cheng, T and Dixon, S and Mauch, M},
  booktitle = {IEEE International Conference on Acoustics Speech and Signal Processing},
  conference = {IEEE International Conference on Acoustics Speech and Signal Processing},
  owner = {dan},
  pages = {594--598},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Modelling the Decay of Piano Sounds},
  year = {2015}
}
@inproceedings{Wang2015,
  abstract = {The goal of score-performance synchronisation is to align a given musical score to an audio recording of a performance of the same piece. A major challenge in computing such alignments is to account for musical parameters including the local tempo or playing style. To increase the overall robustness, current methods assume that notes occurring simultaneously in the score are played concurrently in a performance. Musical voices such as the melody, however, are often played asynchronously to other voices, which can lead to significant local alignment errors. In this paper, we present a novel method that handles asynchronies between the melody and the accompaniment by treating the voices as separate timelines in a multi-dimensional variant of dynamic time warping (DTW). Constraining the alignment with information obtained via classical DTW, our method measurably improves the alignment accuracy for pieces with asynchronous voices and preserves the accuracy otherwise.},
  author = {Wang, S and Ewert, S and Dixon, S},
  booktitle = {ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings},
  day = {1},
  doi = {10.1109/ICASSP.2015.7178037},
  isbn = {9781467369978},
  issn = {1520-6149},
  month = {Jan},
  owner = {dan},
  pages = {589--593},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Compensating for asynchronies between musical voices in score-performance alignment},
  volume = {2015-August},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/ICASSP.2015.7178037}
}
@inproceedings{Sigtia2015a,
  abstract = {We investigate the problem of incorporating higher-level symbolic score-like information into Automatic Music Transcription (AMT) systems to improve their performance. We use recurrent neural networks (RNNs) and their variants as music language models (MLMs) and present a generative architecture for combining these models with predictions from a frame level acoustic classifier. We also compare different neural network architectures for acoustic modeling. The proposed model computes a distribution over possible output sequences given the acoustic input signal and we present an algorithm for performing a global search for good candidate transcriptions. The performance of the proposed model is evaluated on piano music from the MAPS dataset and we observe that the proposed model consistently outperforms existing transcription methods.},
  author = {Sigtia, S and Benetos, E and Boulanger-Lewandowski, N and Weyde, T and D'Avila Garcez, AS and Dixon, S},
  booktitle = {ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings},
  day = {1},
  doi = {10.1109/ICASSP.2015.7178333},
  isbn = {9781467369978},
  issn = {1520-6149},
  month = {Jan},
  owner = {dan},
  pages = {2061--2065},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {A hybrid recurrent neural network for music transcription},
  volume = {2015-August},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/ICASSP.2015.7178333}
}
@inproceedings{Song2015,
  author = {Song, Y and Dixon, S},
  booktitle = {12th International Conference on Sound and Music Computing},
  owner = {dan},
  pages = {387--392},
  timestamp = {2016.04.04},
  title = {How Well Can a Music Emotion Recognition System Predict the Emotional Responses of Participants?},
  year = {2015}
}
@inproceedings{Mauch2015,
  author = {Mauch, M and Cannam, C and Bittner, R and Fazekas, G and Salamon, J and Dai, J and Bello, J and Dixon, S},
  booktitle = {First International Conference on Technologies for Music Notation and Representation (TENOR 2015)},
  owner = {dan},
  pages = {23--30},
  timestamp = {2016.04.04},
  title = {Computer-aided Melody Note Transcription Using the Tony Software: Accuracy and Efficiency},
  year = {2015}
}
@article{Foster2015,
  abstract = {This paper investigates methods for quantifying similarity between audio signals, specifically for the task of cover song detection. We consider an information-theoretic approach, where we compute pairwise measures of predictability between time series. We compare discrete-valued approaches operating on quantized audio features, to continuous-valued approaches. In the discrete case, we propose a method for computing the normalized compression distance, where we account for correlation between time series. In the continuous case, we propose to compute information-based measures of similarity as statistics of the prediction error between time series. We evaluate our methods on two cover song identification tasks using a data set comprised of 300 Jazz standards and using the Million Song Dataset. For both datasets, we observe that continuous-valued approaches outperform discrete-valued approaches. We consider approaches to estimating the normalized compression distance (NCD) based on string compression and prediction, where we observe that our proposed normalized compression distance with alignment (NCDA) improves average performance over NCD, for sequential compression algorithms. Finally, we demonstrate that continuous-valued distances may be combined to improve performance with respect to baseline approaches. Using a large-scale filter-and-refine approach, we demonstrate state-of-the-art performance for cover song identification using the Million Song Dataset.},
  author = {Foster, P and Dixon, S and Klapuri, A},
  day = {1},
  doi = {10.1109/TASLP.2015.2416655},
  issn = {1558-7916},
  issue = {6},
  journal = {IEEE Transactions on Audio, Speech and Language Processing},
  month = {Jun},
  owner = {dan},
  pages = {993--1005},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Identifying Cover Songs Using Information-Theoretic Measures of Similarity},
  volume = {23},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1109/TASLP.2015.2416655}
}
@inproceedings{Bengler2015,
  author = {Bengler, B and Bryan-Kinns, N},
  booktitle = {Creativity \& Cognition},
  doi = {10.1145/2757226.2764548},
  editor = {Maver, T and Do, EY-L},
  isbn = {978-1-4503-3598-0},
  owner = {dan},
  pages = {177--180},
  publisher = {ACM},
  timestamp = {2016.04.04},
  title = {"I could play here for hours.." (thinks the visitor and leaves): Why People Disengage from Public Interactives.},
  url = {http://dl.acm.org/citation.cfm?id=2757226},
  year = {2015},
  bdsk-url-1 = {http://dl.acm.org/citation.cfm?id=2757226},
  bdsk-url-2 = {http://dx.doi.org/10.1145/2757226.2764548}
}
@inproceedings{Morgan2015,
  abstract = {This paper describes the LuminUs-a device that we designed in order to explore how new technologies could influence the inter-personal aspects of co-present musical collaborations. The LuminUs uses eye-tracking headsets and small wireless accelerometers to measure the gaze and body motion of each musician. A small light display then provides visual feedback to each musician, based either on the gaze or the body motion of their co-performer. We carried out an experiment with 15 pairs of music students in order to investigate how the LuminUs would influence their musical interactions. Preliminary results suggest that visual feedback provided by the LuminUs led to significantly increased glancing between the two musicians, whilst motion based feedback appeared to lead to a decrease in body motion for both participants.},
  author = {Morgan, E and Gunes, H and Bryan-Kinns, N},
  booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  day = {1},
  doi = {10.1007/978-3-319-22668-2_4},
  eissn = {1611-3349},
  isbn = {9783319226675},
  issn = {0302-9743},
  month = {Jan},
  owner = {dan},
  pages = {47--54},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {The LuminUs: Providing musicians with visual feedback on the gaze and body motion of their co-performers},
  volume = {9297},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-319-22668-2_4}
}
@article{Metatla2015,
  author = {Metatla, O and Bryan-Kinns, N and Stockman, T and Martin, F},
  doi = {10.7287/peerj.preprints.1376v1},
  journal = {PeerJ PrePrints},
  owner = {dan},
  pages = {e1376--e1376},
  timestamp = {2016.04.04},
  title = {Sonification of reference markers for auditory graphs: Effects on non-visual point estimation tasks.},
  volume = {3},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.7287/peerj.preprints.1376v1}
}
@article{Morgan2015a,
  abstract = {Our research considers the role that new technologies could play in supporting emotional and non-verbal interactions between musicians during co-present music making. To gain a better understanding of the underlying affective and communicative processes that occur during such interactions, we carried out an exploratory study where we collected self-report and continuous behavioural and physiological measures from pairs of improvising drummers. Our analyses revealed interesting relationships between creative decisions and changes in heart rate. Self-reported measures of creativity, engagement, and energy were correlated with body motion; whilst EEG beta-band activity was correlated with self-reported positivity and leadership. Regarding co-visibility, lack of visual contact between musicians had a negative influence on self reported creativity. The number of glances between musicians was positively correlated with rhythmic synchrony, and the average length of glances was correlated with self-reported boredom. Our results indicate that ECG, motion, and glance measurements could be particularly suitable for the investigation of collaborative music making.},
  author = {Morgan, E and Gunes, H and Bryan-Kinns, N},
  day = {30},
  doi = {10.1016/j.ijhcs.2015.05.002},
  eissn = {1095-9300},
  issn = {1071-5819},
  journal = {International Journal of Human Computer Studies},
  month = {May},
  owner = {dan},
  pages = {31--47},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Using affective and behavioural sensors to explore aspects of collaborative music making},
  volume = {82},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1016/j.ijhcs.2015.05.002}
}
@article{Metatla2015a,
  abstract = {Methods used to engage users in the design process often rely on visual techniques, such as paper prototypes, to facilitate the expression and communication of design ideas. The visual nature of these tools makes them inaccessible to people living with visual impairments. In addition, while using visual means to express ideas for designing graphical interfaces is appropriate, it is harder to use them to articulate the design of non-visual displays. In this article, we present an approach to conducting participatory design with people living with visual impairments incorporating various techniques to help make the design process accessible. We reflect on the benefits and challenges that we encountered when employing these techniques in the context of designing cross-modal interactive tools.},
  author = {Metatla, O and Bryan-Kinns, N and Stockman, T and Martin, F},
  day = {1},
  doi = {10.1080/15710882.2015.1007877},
  eissn = {1745-3755},
  issn = {1571-0882},
  issue = {1},
  journal = {CoDesign},
  month = {Jan},
  owner = {dan},
  pages = {35--48},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Designing with and for people living with visual impairments: audio-tactile mock-ups, audio diaries and participatory prototyping},
  volume = {11},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1080/15710882.2015.1007877}
}
@article{Mazzoni2015,
  author = {Mazzoni, A and Bryan-Kinns, N},
  doi = {10.4108/icst.intetain.2015.259625},
  journal = {ICST Trans. e-Education e-Learning},
  number = {8},
  owner = {dan},
  pages = {e2--e2},
  timestamp = {2016.04.04},
  title = {How Does It Feel Like? An Exploratory Study of a Prototype System to Convey Emotion through Haptic Wearable Devices.},
  volume = {2},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.4108/icst.intetain.2015.259625}
}
@inproceedings{McDonald2015,
  author = {McDonald, S and Kirk, DS and Bryan-Kinns, N},
  booktitle = {Creativity \& Cognition},
  doi = {10.1145/2757226.2764547},
  editor = {Maver, T and Do, EY-L},
  isbn = {978-1-4503-3598-0},
  owner = {dan},
  pages = {173--176},
  publisher = {ACM},
  timestamp = {2016.04.04},
  title = {Nature Bot: Experiencing Nature in the Built Environment.},
  url = {http://dl.acm.org/citation.cfm?id=2757226},
  year = {2015},
  bdsk-url-1 = {http://dl.acm.org/citation.cfm?id=2757226},
  bdsk-url-2 = {http://dx.doi.org/10.1145/2757226.2764547}
}
@inproceedings{Mazzoni2015a,
  abstract = {This paper reports on the design and implementation of a portable, hands-free, wearable haptic device that maps the emotions evoked by the music in a movie into vibrations, with the aim that hearing-impaired audience can get a sense of the emotional content carried by the music in specific movie scenes, and therefore feel (hear) the music through the sense of touch. A study of the use of the technology is reported which found that high arousal and high valence were reliably conveyed through haptic patterns with high intensity and high frequency, whereas haptic patterns with low intensity and low frequency conveyed low arousal and low valence.},
  author = {Mazzoni, A and Bryan-Kinns, N},
  booktitle = {Proceedings of the 2015 7th International Conference on Intelligent Technologies for Interactive Entertainment, INTETAIN 2015},
  day = {10},
  doi = {10.4108/icst.intetain.2015.259625},
  isbn = {9781631900617},
  month = {Nov},
  owner = {dan},
  pages = {64--68},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {How does it feel like? An exploratory study of a prototype system to convey emotion through haptic wearable devices},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.4108/icst.intetain.2015.259625}
}
@inproceedings{Abdallah2015,
  author = {Abdallah, S and Alencar-Brayner, A and BENETOS, E and Cottrell, S and Dykes, J and Gold, N and Kachkaev, A and Mahey, M and Tidhar, D and Tovell, A and Weyde, T and Wolff, D},
  booktitle = {http://fma2015.sciencesconf.org/conference/fma2015/FMA2015_OfficialProceedings.pdf},
  conference = {5th International Workshop on Folk Music Analysis},
  day = {10},
  finishday = {12},
  finishmonth = {Jun},
  finishyear = {2015},
  isbn = {9-791095-209003},
  month = {Jun},
  organization = {Paris, France},
  owner = {dan},
  pages = {10--12},
  publicationstatus = {published},
  publisher = {Association Dirac},
  startday = {10},
  startmonth = {Jun},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {Automatic transcription and pitch analysis of the British Library World \& Traditional Music Collection},
  year = {2015}
}
@article{Benetos2015,
  abstract = {Automatic music transcription, a central topic in music signal analysis, is typically limited to equal-tempered music and evaluated on a quartertone tolerance level. A system is proposed to automatically transcribe microtonal and heterophonic music as applied to the makam music of Turkey. Specific traits of this music that deviate from properties targeted by current transcription tools are discussed, and a collection of instrumental and vocal recordings is compiled, along with aligned microtonal reference pitch annotations. An existing multi-pitch detection algorithm is adapted for transcribing music with 20 cent resolution, and a method for converting a multi-pitch heterophonic output into a single melodic line is proposed. Evaluation metrics for transcribing microtonal music are applied, which use various levels of tolerance for inaccuracies with respect to frequency and time. Results show that the system is able to transcribe microtonal instrumental music at 20 cent resolution with an F-measure of 56.7\%, outperforming state-of-the-art methods for the same task. Case studies on transcribed recordings are provided, to demonstrate the shortcomings and the strengths of the proposed method.},
  author = {Benetos, E and Holzapfel, A},
  day = {1},
  doi = {10.1121/1.4930187},
  issn = {0001-4966},
  issue = {4},
  journal = {Journal of the Acoustical Society of America},
  month = {Oct},
  owner = {dan},
  pages = {2118--2130},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Automatic transcription of Turkish microtonal music},
  volume = {138},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1121/1.4930187}
}
@inproceedings{Rossignol2015,
  author = {Rossignol, M and Lagrange, M and Lafay, G and Benetos, E},
  booktitle = {EUSIPCO},
  doi = {10.1109/EUSIPCO.2015.7362739},
  isbn = {978-0-9928-6263-3},
  owner = {dan},
  pages = {2023--2027},
  publisher = {IEEE},
  timestamp = {2016.04.04},
  title = {Alternate level clustering for drum transcription.},
  url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7362053},
  year = {2015},
  bdsk-url-1 = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=7362053},
  bdsk-url-2 = {http://dx.doi.org/10.1109/EUSIPCO.2015.7362739}
}
@inproceedings{Benetos2015a,
  author = {Benetos, E and Weyde, T},
  booktitle = {ISMIR},
  editor = {Muller, M and Wiering, F},
  isbn = {978-84-606-8853-2},
  owner = {dan},
  pages = {701--707},
  timestamp = {2016.04.04},
  title = {An Efficient Temporally-Constrained Probabilistic Model for Multiple-Instrument Music Transcription.},
  url = {http://www.informatik.uni-trier.de/~ley/db/conf/ismir/ismir2015.html},
  year = {2015},
  bdsk-url-1 = {http://www.informatik.uni-trier.de/~ley/db/conf/ismir/ismir2015.html}
}
@inproceedings{Sigtia2015b,
  author = {Sigtia, S and Benetos, E and Boulanger-Lewandowski, N and Weyde, T and Garcez, ASDA and Dixon, S},
  booktitle = {IEEE International Conference on Acoustics Speech and Signal Processing},
  conference = {2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
  day = {19},
  doi = {10.1109/ICASSP.2015.7178333},
  finishday = {24},
  finishmonth = {Apr},
  finishyear = {2015},
  month = {Apr},
  organization = {Brisbane, Australia},
  owner = {dan},
  pages = {2061--2065},
  publicationstatus = {published},
  publisher = {IEEE},
  startday = {19},
  startmonth = {Apr},
  startyear = {2015},
  timestamp = {2016.04.04},
  title = {A Hybrid Recurrent Neural Network for Music Transcription},
  url = {http://www.ieee.org/},
  year = {2015},
  bdsk-url-1 = {http://www.ieee.org/},
  bdsk-url-2 = {http://dx.doi.org/10.1109/ICASSP.2015.7178333}
}
@article{balliauw2015amusic,
  abstract = {A piano fingering indicates which finger should play each note in a piece. Such a guideline is very helpful for both amateur and experienced players in order to play a piece fluently. In this paper, we propose a variable neighborhood search algorithm to generate piano fingerings for complex polyphonic music, a frequently encountered case that was ignored in previous research. The algorithm takes into account the biomechanical properties of the pianist's hand in order to generate a fingering that is user-specific and as easy to play as possible. An extensive statistical analysis was carried out in order to tune the parameters of the algorithm and evaluate its performance. The results of computational experiments show that the algorithm generates good fingerings that are very similar to those published in sheet music books.},
  author = {Balliauw, M and Herremans, D and Palhazi cuervo, D and S{\"o}rensen, K},
  day = {1},
  doi = {10.1111/itor.12211},
  eissn = {1475-3995},
  issn = {0969-6016},
  journal = {International Transactions in Operational Research},
  month = {Jan},
  publicationstatus = {accepted},
  timestamp = {2016.04.04},
  title = {A variable neighborhood search algorithm to generate piano fingerings for polyphonic sheet music},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1111/itor.12211}
}
@article{herremans2015classificationsearch,
  abstract = {In this article a number of musical features are extracted from a large musical database and these were subsequently used to build four composer-classification models. The first two models, an ifthen rule set and a decision tree, result in an understanding of stylistic differences between Bach, Haydn, and Beethoven. The other two models, a logistic regression model and a support vector machine classifier, are more accurate. The probability of a piece being composed by a certain composer given by the logistic regression model is integrated into the objective function of a previously developed variable neighborhood search algorithm that can generate counterpoint. The result is a system that can generate an endless stream of contrapuntal music with composer-specific characteristics that sounds pleasing to the ear. This system is implemented as an Android app called FuX.},
  author = {Herremans, D and S{\"o}rensen, K and Martens, D},
  day = {1},
  doi = {10.1162/COMJ-a-00316},
  eissn = {1531-5169},
  issn = {0148-9267},
  issue = {3},
  journal = {Computer Music Journal},
  month = {Jan},
  pages = {71--91},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Classification and Generation of Composer-Specific Music Using Global Feature Models and Variable Neighborhood Search},
  volume = {39},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1162/COMJ-a-00316}
}
@article{herremans2015composecompute,
  author = {Herremans, D},
  doi = {10.1007/s10288-015-0282-y},
  eissn = {1614-2411},
  issn = {1619-4500},
  issue = {3},
  journal = {4OR},
  month = sep,
  pages = {335--336},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Compose {$\equiv$} compute},
  volume = {13},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1007/s10288-015-0282-y}
}
@inproceedings{balliauw2015generatingalgorithm,
  abstract = {A piano fingering is an indication of which finger is to be used to play each note in a piano composition. Good piano fingerings enable pianists to study, remember and play pieces in an optimal way. In this paper, we propose a tabu search algorithm to find a good piano fingering automatically and in a short amount of time. An innovative feature of the proposed algorithm is that it implements an objective function that takes into account the characteristics of the pianist?s hand and that it can be used for complex polyphonic music.},
  author = {Balliauw, M and Herremans, D and Cuervo, DP and S{\"o}rensen, K},
  booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  day = {1},
  doi = {10.1007/978-3-319-20603-5_15},
  eissn = {1611-3349},
  isbn = {9783319206028},
  issn = {0302-9743},
  month = {Jan},
  pages = {149--160},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Generating fingerings for polyphonic piano music with a tabu search algorithm},
  volume = {9110},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1007/978-3-319-20603-5_15}
}
@article{herremans2015generatingmodels,
  abstract = {In this research, a system is built that generates bagana music, a traditional lyre from Ethiopia, based on a first order Markov model. Due to the size of many datasets it is often only possible to get rich and reliable statistics for low order models, yet these do not handle structure very well and their output is often very repetitive. A first contribution of this paper is to propose a method that allows the enforcement of structure and repetition within music, thus handling long term coherence with a first order model. The second goal of this research is to explain and propose different ways in which low order Markov models can be used to build quality assessment metrics for an optimization algorithm. These are then implemented in a variable neighborhood search algorithm that generates bagana music. The results are examined and thoroughly evaluated.},
  author = {Herremans, D and Weisser, S and S{\"o}rensen, K and Conklin, D},
  day = {23},
  doi = {10.1016/j.eswa.2015.05.043},
  issn = {0957-4174},
  issue = {21},
  journal = {Expert Systems with Applications},
  month = {Jun},
  pages = {7424--7435},
  publicationstatus = {published},
  timestamp = {2016.04.04},
  title = {Generating structured music for bagana using quality metrics based on Markov models},
  volume = {42},
  year = {2015},
  bdsk-url-1 = {http://dx.doi.org/10.1016/j.eswa.2015.05.043}
}

This file was generated by bibtex2html 1.98.