@article{PREETI20232153, title = {A GAN-Based Model of Deepfake Detection in Social Media}, journal = {Procedia Computer Science}, volume = {218}, pages = {2153-2162}, year = {2023}, note = {International Conference on Machine Learning and Data Engineering}, issn = {1877-0509}, doi = {https://doi.org/10.1016/j.procs.2023.01.191}, url = {https://www.sciencedirect.com/science/article/pii/S1877050923001916}, author = { Preeti and Manoj Kumar and Hitesh Kumar Sharma}, keywords = {Digital Forensics, Image Vision, Deep Learning, Generative adversarial network, Deep Fakes, Media Forensics, Face Manipulation, Face Recognition}, abstract = {DeepFake uses Generative + Adversarial Network for successfully switching the identities of two people. Large public databases and deep learning methods are now rapidly available because of the proliferation of easily accessible tools online. It has resulted in the emergence of very real appealing fake content that produced a bad impact and challenges for society to deal. Pre-trained generative adversarial networks (GANs) that can flawlessly substitute one person's face in a video or image for that other are proving supportive for implementing deepfake. This paper primarily presented a study of methods used to implement deepfake. Also, discuss the main deepfake's manipulation and detection techniques, and the implementation and detection of deepfake using Deep Convolution-based GAN models. A study of Comparative analyses of proposed GAN with other exiting GAN models using parameters Inception Score “IS” and Fréchet Inception Distance “FID” is also embedded. Along with the abovementioned, the paper discusses open issues and future trends that should be considered to advance in the field.} } @INPROCEEDINGS{9142077, author={Younus, Mohammed Akram and Hasan, Taha Mohammed}, booktitle={2020 International Conference on Computer Science and Software Engineering (CSASE)}, title={Effective and Fast DeepFake Detection Method Based on Haar Wavelet Transform}, year={2020}, volume={}, number={}, pages={186-190}, abstract={DeepFake using Generative Adversarial Networks (GANs) tampered videos reveals a new challenge in today's life. With the inception of GANs, generating high-quality fake videos becomes much easier and in a very realistic manner. Therefore, the development of efficient tools that can automatically detect these fake videos is of paramount importance. The proposed DeepFake detection method takes the advantage of the fact that current DeepFake generation algorithms cannot generate face images with varied resolutions, it is only able to generate new faces with a limited size and resolution, a further distortion and blur is needed to match and fit the fake face with the background and surrounding context in the source video. This transformation causes exclusive blur inconsistency between the generated face and its background in the outcome DeepFake videos, in turn, these artifacts can be effectively spotted by examining the edge pixels in the wavelet domain of the faces in each frame compared to the rest of the frame. A blur inconsistency detection scheme relied on the type of edge and the analysis of its sharpness using Haar wavelet transform as shown in this paper, by using this feature, it can determine if the face region in a video has been blurred or not and to what extent it has been blurred. Thus will lead to the detection of DeepFake videos. The effectiveness of the proposed scheme is demonstrated in the experimental results where the “UADFV” dataset has been used for the evaluation, a very successful detection rate with more than 90.5% was gained.}, keywords={}, doi={10.1109/CSASE48920.2020.9142077}, ISSN={}, month={April},} @article{DBLP:journals/corr/abs-2008-11363, author = {Umur Aybars Ciftci and Ilke Demir and Lijun Yin}, title = {How Do the Hearts of Deep Fakes Beat? Deep Fake Source Detection via Interpreting Residuals with Biological Signals}, abstract = {Fake portrait video generation techniques have been posing a new threat to the society with photorealistic deep fakes for political propaganda, celebrity imitation, forged evidences, and other identity related manipulations. Following these generation techniques, some detection approaches have also been proved useful due to their high classification accuracy. Nevertheless, almost no effort was spent to track down the source of deep fakes. We propose an approach not only to separate deep fakes from real videos, but also to discover the specific generative model behind a deep fake. Some pure deep learning based approaches try to classify deep fakes using CNNs where they actually learn the residuals of the generator. We believe that these residuals contain more information and we can reveal these manipulation artifacts by disentangling them with biological signals. Our key observation yields that the spatiotemporal patterns in biological signals can be conceived as a representative projection of residuals. To justify this observation, we extract PPG cells from real and fake videos and feed these to a state-of-the-art classification network for detecting the generative model per video. Our results indicate that our approach can detect fake videos with 97.29% accuracy, and the source model with 93.39% accuracy.}, journal = {CoRR}, volume = {abs/2008.11363}, year = {2020}, url = {https://arxiv.org/abs/2008.11363}, eprinttype = {arXiv}, eprint = {2008.11363}, timestamp = {Tue, 15 Sep 2020 20:52:22 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2008-11363.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } @article{doi:10.1080/13600834.2020.1794615, author = {Tyrone Kirchengast}, title = {Deepfakes and image manipulation: criminalisation and control}, journal = {Information \& Communications Technology Law}, volume = {29}, number = {3}, pages = {308-323}, year = {2020}, publisher = {Routledge}, doi = {10.1080/13600834.2020.1794615}, URL = {https://doi.org/10.1080/13600834.2020.1794615}, eprint = {https://doi.org/10.1080/13600834.2020.1794615}, abstract = { ABSTRACTDeepfakes are a form of human image synthesis where an existing picture or image is superimposed into a video to change the identity of those depicted in the video. The technology relies on machine learning or artificial intelligence to map an existing image, usually a photo of a person's face, to transfer that image to an existing video image. The technology emerged into the latter part of 2017, and has since given rise to apps and other programmes that allow users to create their own deepfakes. We already use filters and emojis to alter images by consent, however, deepfakes are particularly problematic because they allow for production of videos that are highly convincing, taken to be a real video of the person depicted. Deepfakes provide for the manipulation of all manner of video, but particular risks include videos produced to incite political deception, voter manipulation, commercial fraud, and ‘revenge porn’. The production of deepfake ‘revenge porn’ presents as especially insidious given the ability to transfer the face of any person onto an already existing pornographic video. Harm is exacerbated where that video is then disseminated, via the internet or by social media. } } @article{https://doi.org/10.1002/wmh3.487, author = {Neylan, Julian H. and Patel, Sonny S. and Erickson, Timothy B.}, title = {Strategies to counter disinformation for healthcare practitioners and policymakers}, journal = {World Medical \& Health Policy}, volume = {14}, number = {2}, pages = {428-436}, keywords = {disinformation, global health, health communication, health policy, misinformation, desinformación, política de salud, comunicaciones de salud, 虚假信息, 卫生政策, 卫生传播}, doi = {https://doi.org/10.1002/wmh3.487}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/wmh3.487}, eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/wmh3.487}, abstract = {Abstract Medical disinformation has interfered with healthcare workers' ability to communicate with the general population in a wide variety of public health contexts globally. This has limited the effectiveness of evidence-based medicine and healthcare capacity. Disinformation campaigns often try to integrate or co-opt healthcare workers in their practices which hinders effective health communication. We describe a critical overview of issues health practitioners and communicators have experienced when dealing with medical disinformation online and offline as well as best practices to overcome these issues when disseminating health information. This article lists disinformation techniques that have yet to be used against the medical community but need to be considered in future communication planning as they may be highly effective. We also present broad policy recommendations and considerations designed to mitigate the effectiveness of medical disinformation campaigns.}, year = {2022} } @article{sachs2020fake, title={"Fake" Makeup Isn't So Pretty: Revising the Vicarious Liability Standard for Consumers Injured by Counterfeit Cosmetics}, author={Sachs, Rebecca}, journal={AIPLA QJ}, volume={48}, pages={363}, year={2020}, publisher={HeinOnline} } @article{https://search.informit.org/doi/10.3316/informit.807638896756480, author = {Klein, David O and Wueller, Joshua R}, year = {2018}, title = {Fake News: A legal perspective}, issue_date = {2018}, publisher = {Australasian Institute of Policing}, address = {Pascoe Vale South,, VIC, Australia}, volume = {10}, number = {2}, url = {https://search.informit.org/doi/10.3316/informit.807638896756480}, abstract = {The concept of “fake news” has garnered substantial attention in recent years, evolving from its satirical literary origins into a passionately criticized Internet phenomenon. Whether described as rumors, “counterknowledge,” misinformation, “post-truths,” “alternative facts” or just plain damned lies, these false statements of fact typically are published on Web sites and disseminated via social media for profit or social influence.}, journal = {Australasian Policing}, pages = {11–15, 17}, numpages = {7} } @article{meskys2020regulating, title={Regulating deep fakes: legal and ethical considerations}, abstract={Deep fakes have become a buzzword discussed widely among legal and technology experts. The term ‘deep fakes’ refers to face-swapping technologies that enable a quick creation of fake images or videos which appear incredibly realistic. This paper explains the technologies behind the creation of deep fakes and offers four categories of deep fakes (deep fake porn, deep fakes in political campaigns, deep fakes for commercial uses and creative deep fakes). The authors of this paper address ethical and regulatory aspects each of those four categories of deep fakes. Since the first deep fakes were used for malicious and socially detrimental purposes (e.g., revenge porn and political campaigns), the authors highlight that these deep fakes - like many other technologies in the past - initially are confronted with fear. Since deep fakes are likely to be more widely adopted in the future, the authors highlight various social and legal challenges which the regulators and the society will have to face. The paper also touches upon the potential role of online content dissemination platforms and governments in addressing deep fakes. The authors of this paper also offer three suggestions - one from a technology point of view and two from a regulatory angle - on how to curtail the advancement of deep fakes.}, keywords={Deep fakes, privacy, face-swapping, artificial intelligence, machine learning, law, copyright, regulation, free speech}, author={Meskys, Edvinas and Kalpokiene, Julija and Jurcys, Paulius and Liaudanskas, Aidas}, journal={Journal of Intellectual Property Law \& Practice}, url={https://ssrn.com/abstract=3497144}, volume={15}, number={1}, pages={24--31}, year={2020} } @Article{Chesney2019, author={Chesney, Bobby and Citron, Danielle}, title={Deep Fakes: A Looming Challenge for Privacy, Democracy, and National Security}, journal={California Law Review}, year={2019}, volume={107}, number={6}, pages={1753-1820}, url={https://heinonline.org/HOL/P?h=hein.journals/calr107&i=1789}, language={eng} }