@article {9999, title = {Gamers, citizen scientists, and data: Exploring participant contributions in two games with a purpose}, journal = {Computers in Human Behavior}, volume = {68}, year = {2017}, pages = {254{\textendash}268}, abstract = {

Two key problems for crowd-sourcing systems are motivating contributions from participants and ensuring the quality of these contributions. Games have been suggested as a motivational approach to encourage contribution, but attracting participation through game play rather than intrinsic interest raises concerns about the quality of the contributions provided. These concerns are particularly important in the context of citizen science projects, when the contributions are data to be used for scientific research. To assess the validity of concerns about the effects of gaming on data quality, we compare the quality of data obtained from two citizen science games, one a {\textquotedblleft}gamified{\textquotedblright} version of a species classification task and one a fantasy game that used the classification task only as a way to advance in the game play. Surprisingly, though we did observe cheating in the fantasy game, data quality (i.e., classification accuracy) from participants in the two games was not significantly different. As well, data from short-time contributors was also at a usable level of accuracy. Finally, learning did not seem to affect data quality in our context. These findings suggest that various approaches to gamification can be useful for motivating contributions to citizen science projects.

}, doi = {10.1016/j.chb.2016.11.035}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/chb2016.pdf}, author = {Nathan Prestopnik and Kevin Crowston and Wang, Jun} } @proceedings {9999, title = {Exploring data quality in games with a purpose}, year = {2014}, month = {3/2014}, address = {Berlin, Germany}, abstract = {

A key problem for crowd-sourcing systems is motivating contributions from participants and ensuring the quality of these contributions. Games have been suggested as a motivational approach to encourage contribution, but attracting participation through game play rather than scientific interest raises concerns about the quality of the data provided, which is particularly important when the data are to be used for scientific research. To assess whether these concerns are justified, we compare the quality of data obtained from two citizen science games, one a {\textquotedblleft}gamified{\textquotedblright} version of a species classification task and one a fantasy game that used the classification task only as a way to advance in the game play. Surprisingly, though we did observe cheating in the fantasy game, data quality (i.e., classification accuracy) from participants in the two games was not significantly different. As well, the quality of data from short-time contributors was at a usable level of accuracy. These findings suggest that various approaches to gamification can be useful for motivating contributions to citizen science projects.

}, doi = {10.9776/14066}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/gamedataquality_cameraready_4.pdf}, author = {Nathan Prestopnik and Kevin Crowston and Wang, Jun} } @conference {2013, title = {Forgotten island: A story-driven citizen science adventure}, booktitle = {CHI {\textquoteright}13 Extended Abstracts on Human Factors in Computing Systems}, year = {2013}, month = {4/2013}, pages = {2643{\textendash}2646}, publisher = {ACM Press}, organization = {ACM Press}, address = {Paris, France}, abstract = {

Forgotten Island, a citizen science video game, is part of an NSF-funded design science research project, Citizen Sort. It is a mechanism to help life scientists classify photographs of living things and a research tool to help HCI and information science scholars explore storytelling, engagement, and the quality of citizenproduced data in the context of citizen science.

}, isbn = {9781450319522}, doi = {10.1145/2468356.2479484}, url = {http://delivery.acm.org/10.1145/2480000/2479484/p2643-prestopnik.pdf}, author = {Nathan Prestopnik and Souid, Dania} } @conference {Crowston:2013, title = {Motivation and data quality in a citizen science game: A design science evaluation}, booktitle = {Forty-sixth Hawai{\textquoteright}i International Conference on System Sciences (HICSS-46)}, year = {2013}, month = {1/2013}, address = {Wailea, HI}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/hicss2013citizensort_cameraready.pdf}, author = {Kevin Crowston and Nathan Prestopnik} } @proceedings {9999, title = {Citizen science system assemblages: Understanding the technologies that support crowdsourced science}, year = {2012}, month = {2/2012}, address = {Toronto, Ontario}, abstract = {We explore the nature of technologies to support citizen science, a method of inquiry that leverages the power of crowds to collect and analyze scientific data. We evaluate these technologies as system assemblages, collections of interrelated functionalities that support specific activities in pursuit of overall project goals. The notion of system assemblages helps us to explain how different citizen science platforms may be comprised of widely varying functionalities, yet still support relatively similar goals. Related concepts of build vs. buy and web satisfiers vs. web motivators are used to explore how different citizen science functionalities may lead to successful project outcomes. Four detailed case studies of current citizen science projects encompassing a cross-section of varying project sizes, resource levels, technologies, and approaches to inquiry help us to answer the following research questions: 1) What do typical system assemblages for citizen science look like? 2) What factors influence the composition of a system assemblage for citizen science? 3) What effect does the assemblage composition have on scientific goals, participant support, motivation, and satisfaction? and 4) What are the design implications for the system assemblage perspective on citizen science technologies?}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/citizensciencesystemassemblage.pdf}, author = {Nathan Prestopnik and Kevin Crowston} } @conference {Prestopnik:2012a, title = {Purposeful gaming \& socio-computational systems: A citizen science design case}, booktitle = {Group {\textquoteright}12 Conference}, year = {2012}, month = {10/2012}, address = {Sanibel Island, FL, USA}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/citizensort_cameraready.pdf}, author = {Nathan Prestopnik and Kevin Crowston} } @article {374, title = {Citizen science system assemblages: Toward greater understanding of technologies to support crowdsourced science}, year = {2011}, month = {06/2011}, publisher = {Syracuse University School of Information Studies}, type = {Unpublished working paper}, abstract = {We explore the nature of technologies to support citizen science, a method of inquiry that leverages the power of crowds to collect and analyze scientific data. We evaluate these technologies as system assemblages, collections of interrelated functionalities that support specific activities in pursuit of overall project goals. The notion of system assemblages helps us to explain how different citizen science platforms may be comprised of widely varying functionalities, yet still support relatively similar goals. Related concepts of build vs. buy, support for science vs. support for participants, and web satisfiers vs. web motivators are used to explore how different citizen science functionalities may lead to successful project outcomes. Four detailed case studies of current citizen science projects encompassing a cross-section of varying project sizes, resource levels, technologies, and approaches to inquiry help us to answer the following research questions: 1) What factors influence the composition of a system assemblage for citizen science? 2) What do typical system assemblages for citizen science look like? 3) What effect does the assemblage composition have on scientific goals, participant support, motivation, and satisfaction? and 4) What are the design implications for the system assemblage perspective on citizen science technologies?}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/system_assemblage.pdf}, author = {Nathan Prestopnik and Kevin Crowston} } @article {192, title = {Citizen Science System Assemblages: Toward Greater Understanding of Technologies to Support Crowdsourced Science}, year = {2011}, month = {6/2011}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/system_assemblage_0.pdf}, author = {Nathan Prestopnik and Kevin Crowston} } @unpublished {9999, title = {Exploring Collective Intelligence Games With Design Science: A Citizen Science Design Case}, year = {2011}, abstract = {Citizen science is a form of collective intelligence where members of the public are recruited to contribute to scientific investigations. Citizen science projects often use web-based systems to support collaborative scientific activities, but finding ways to attract participants and confirm the veracity of the data produced by non-scientists are key research questions. We describe a series of web-based tools and games currently under development to support taxonomic classification of organisms in photographs collected by citizen science projects. In the design science tradition, the systems are purpose-built to test hypotheses about participant motivation and techniques for ensuring data quality. Findings from preliminary evaluation and the design process itself are discussed.}, attachments = {https://citsci.syr.edu/sites/crowston.syr.edu/files/designing\%20citizen\%20science\%20games.pdf}, author = {Nathan Prestopnik and Kevin Crowston} }