@phdthesis{kabbach2024, abstract = {Do we really understand each other when we speak, sign, or use language in general? This (computational) linguistics thesis begins with a fundamental consideration: we, as individuals, are all singular beings who always make sense of language and the world around us through the privacy of our own minds and the potential idiosyncrasy of our mental content. From there on it asks: how could we possibly understand one another given the singularity and potential incommensurability of our respective subjectivities? It then makes three contributions. First, it argues that mutual understanding is actually a presupposition in our theories of language and communication—an assumption that we take from granted rather than one that unfolds naturally from careful empirical observation. Second, it introduces Subjective Coordination Theory—a new proposal for a theory of communication that does not rest on the presupposition of mutual understanding and, as such, dispenses itself from having to posit the existence of shared meaning, shared language and objective communication success in general. Last, it contrasts the concept of subjectivity with that of normality so as to better make sense of language models and the field of artificial intelligence at large. This last contribution itself is threefold. First, it argues that language models such as ChatGPT are the product of a particular epistemology called the epistemology of normalism which corresponds to a major shift in the philosophy of science in the nineteenth century made possible by the emergence of statistics. Second, it introduces normal language—the ontology of language behind language models (i.e. what it is that language models are actually models of)—and explains why if language models speak normal language nobody actually speaks normal language in practice. Last, it distinguishes intelligence from smartness to explain precisely why language models such as ChatGPT cannot be expected to ever pass the Turing test. Such models model ideal human behavior—ideally error-free—while passing the Turing test specifically requires machines to demonstrate real human behavior that deviates from this normative ideal of human behavior. In practice indeed, real people "make mistakes"—they always deviate from whichever normative ideal they live by—and that is also precisely what makes them human. It concludes that such models are therefore models of artificial smartness rather than artificial intelligence per se and that subjectivity, then characterized as the intrinsic and singular deviation from the norm of our respective individualities, can be understood as yet another fundamental expression of our humanity.}, author = {Kabbach, Alexandre}, doi = {10.13097/archive-ouverte/unige:182066}, school = {University of Geneva}, title = {{Language against Communication: a matter of subjectivity}}, year = {2024}, url = {https://doi.org/10.13097/archive-ouverte/unige:182066} }