Ming-Chang Chiu, Tiantian Feng, Xiang Ren, and Shrikanth Narayanan. Screenplay Quality Assessment: Can We Predict Who Gets Nominated?. In Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events, pp. 11–16, Association for Computational Linguistics, Online, July 2020.

Download

[PDF] 

Abstract

Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.

BibTeX Entry

@inproceedings{chiu-etal-2020-screenplay,
 abstract = {Deciding which scripts to turn into movies is a costly and time-consuming process for filmmakers. Thus, building a tool to aid script selection, an initial phase in movie production, can be very beneficial. Toward that goal, in this work, we present a method to evaluate the quality of a screenplay based on linguistic cues. We address this in a two-fold approach: (1) we define the task as predicting nominations of scripts at major film awards with the hypothesis that the peer-recognized scripts should have a greater chance to succeed. (2) based on industry opinions and narratology, we extract and integrate domain-specific features into common classification techniques. We face two challenges (1) scripts are much longer than other document datasets (2) nominated scripts are limited and thus difficult to collect. However, with narratology-inspired modeling and domain features, our approach offers clear improvements over strong baselines. Our work provides a new approach for future work in screenplay analysis.},
 address = {Online},
 author = {Chiu, Ming-Chang and Feng, Tiantian  and Ren, Xiang and Narayanan, Shrikanth},
 booktitle = {Proceedings of the First Joint Workshop on Narrative Understanding, Storylines, and Events},
 doi = {10.18653/v1/2020.nuse-1.2},
 link = {http://sail.usc.edu/publications/files/Chiu-2020.pdf},
 month = {July},
 pages = {11--16},
 publisher = {Association for Computational Linguistics},
 title = {Screenplay Quality Assessment: Can We Predict Who Gets Nominated?},
 url = {https://www.aclweb.org/anthology/2020.nuse-1.2},
 year = {2020}
}

Generated by bib2html.pl (written by Patrick Riley ) on Mon Dec 02, 2024 08:34:32