@article{Ternovski_Kalla_Aronow_2022, title={The Negative Consequences of Informing Voters about Deepfakes: Evidence from Two Survey Experiments}, volume={1}, url={https://tsjournal.org/index.php/jots/article/view/28}, DOI={10.54501/jots.v1i2.28}, abstractNote={<p> Advances in machine learning have made possible “deepfakes,” or realistic, computer-generated videos of public figures saying something they have not actually said. Policymakers have expressed concern that deepfakes could mislead voters, but prior research has found that such videos have minimal effects. There has nevertheless been extensive media coverage of the dangers of deepfakes, urging voters to be critical consumers of political videos. We explore whether these well-intentioned messages have an unintended consequence: if voters are warned about deepfakes, they may begin to distrust <em>all</em> political videos. We conducted two online survey experiments, and found that informing participants about deepfakes did not enhance participants’ ability to successfully spot manipulated videos but consistently induced them to believe the videos they watched were fake, even when they were real. Our findings suggest that even if deepfakes are not themselves persuasive, information about deepfakes can nevertheless be weaponized to dismiss real political videos.</p>}, number={2}, journal={Journal of Online Trust and Safety}, author={Ternovski, John and Kalla, Joshua and Aronow, Peter}, year={2022}, month={Feb.} }