<?xml version="1.0" encoding="UTF-8"?>
 <rdf:RDF xmlns="http://purl.org/rss/1.0/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://web.resource.org/cc/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns:admin="http://webns.net/mvcb/">
  <channel rdf:about="http://pinboard.in">
    <title>Pinboard (cshalizi)</title>
    <link>https://pinboard.in/u:cshalizi/public/</link>
    <description>recent bookmarks from cshalizi</description>
    <items>
      <rdf:Seq>	<rdf:li rdf:resource="https://stackingthebricks.com/how-blogs-broke-the-web/"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2502.09192"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2504.09762"/>
	<rdf:li rdf:resource="https://direct.mit.edu/books/monograph/6123/Artificial-ReligionOn-AI-Myth-and-Power"/>
	<rdf:li rdf:resource="https://faculty.washington.edu/yenchic/short_note/note_MoM.pdf"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2604.19560"/>
	<rdf:li rdf:resource="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(26)00052-5"/>
	<rdf:li rdf:resource="https://www.journals.uchicago.edu/doi/10.1086/724447"/>
	<rdf:li rdf:resource="https://www.journals.uchicago.edu/doi/10.1086/723623"/>
	<rdf:li rdf:resource="https://www.journals.uchicago.edu/doi/10.1086/724448"/>
	<rdf:li rdf:resource="https://www.journals.uchicago.edu/doi/10.1086/724449"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/elements/boolean-networks-as-predictive-models-of-emergent-biological-behaviors/0D2383F0D64543A77892CEBD5C6A964B"/>
	<rdf:li rdf:resource="https://doi.org/10.1017/9781009029346"/>
	<rdf:li rdf:resource="https://digressionsimpressions.substack.com/p/scientific-models-and-political-decision"/>
	<rdf:li rdf:resource="https://www.pnas.org/doi/abs/10.1073/pnas.2021865119"/>
	<rdf:li rdf:resource="https://henrich.fas.harvard.edu/sites/g/files/omnuum5811/files/henrich/files/hong_henrich_-_2021_-_the_cultural_evolution_of_epistemic_practices.pdfd"/>
	<rdf:li rdf:resource="https://www.academia.edu/106616966/Landemore_Can_AI_bring_deliberation_to_the_masses"/>
	<rdf:li rdf:resource="https://theoryandpractice.org/2024/10/Yes,%20we%20did%20discover%20the%20Higgs!/"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2410.18858"/>
	<rdf:li rdf:resource="https://www.mariangoodman.com/exhibitions/agnieszka-kurant-recursion-new-york/"/>
	<rdf:li rdf:resource="https://direct.mit.edu/books/monograph/5848/AI-amp-IAn-Intellectual-History-of-Artificial"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/philosophy-of-science/article/parsimony-and-overfitting/B6A58202B75D1099BB22A95EDE1F8F58?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles"/>
	<rdf:li rdf:resource="https://direct.mit.edu/books/monograph/6116/The-Great-Energy-TransitionAmerica-from-1876-to"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/philosophy-of-science/article/abs/carnapian-inductive-logic-for-exponential-smoothing/84DD31142459DFD0289CCE8915E79952?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles"/>
	<rdf:li rdf:resource="https://academic.oup.com/qje/article/141/2/1581/8435315?nbd=45497357721"/>
	<rdf:li rdf:resource="https://github.com/xyz2tex/dot2tex"/>
	<rdf:li rdf:resource="https://taggart-tech.com/reckoning/"/>
	<rdf:li rdf:resource="https://link.springer.com/article/10.1007/s11023-025-09755-8"/>
	<rdf:li rdf:resource="https://link.springer.com/article/10.1007/s11023-026-09767-y"/>
	<rdf:li rdf:resource="https://www.aeaweb.org/articles?id=10.1257/aer.20240246"/>
	<rdf:li rdf:resource="https://www.aeaweb.org/articles?id=10.1257/aer.20241056"/>
	<rdf:li rdf:resource="https://www.ucs.org/resources/independent-science-initiative"/>
	<rdf:li rdf:resource="https://direct.mit.edu/books/oa-edited-volume/6112/Dennett-s-Real-Patterns-in-Science-and-Nature"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/royal-institute-of-philosophy-supplements/article/abs/mind-as-a-control-system/501BF772FCAADCB00A1F576602E771F9"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2601.10825"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/philosophy-of-science/article/measured-inference-scales-statistics-and-scientific-inference/527F6793A1C954F01A321E72F780E931?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/philosophy-of-science/article/we-should-not-align-quantitative-measures-with-stakeholder-values/1C7DBA5E3D5904AB023268C97EACB0F2?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues"/>
	<rdf:li rdf:resource="https://www.cambridge.org/core/journals/philosophy-of-science/article/science-without-trust/13A5FD15D5ADFF4C67DDC1DF8D8FEB6C?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/math/0504472"/>
	<rdf:li rdf:resource="https://direct.mit.edu/books/monograph/6030/Strange-AttractorThe-Hallucinatory-Life-of-Terence"/>
	<rdf:li rdf:resource="https://ergosphere.blog/posts/the-machines-are-fine/"/>
	<rdf:li rdf:resource="https://www.aeaweb.org/articles?id=10.1257/aer.20240763"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2603.25568"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2603.21687"/>
	<rdf:li rdf:resource="https://www.tes.com/magazine/teaching-learning/general/uta-frith-interview-autism-not-spectrum"/>
	<rdf:li rdf:resource="https://www.sciencedirect.com/science/article/abs/pii/S0167268116301202?via%3Dihub"/>
	<rdf:li rdf:resource="https://jbgruber.github.io/rollama/"/>
	<rdf:li rdf:resource="https://www.chronicle.com/article/can-danielle-allen-save-academe-from-itself"/>
	<rdf:li rdf:resource="https://eighteenthelephant.com/2026/03/12/space-mirrors-solar-panels-fools-and-their-money/"/>
	<rdf:li rdf:resource="https://statmodeling.stat.columbia.edu/2026/03/22/space-mirrors-awesome-solution-to-our-energy-problems-or-ridiculous-public-relations-stunt/"/>
	<rdf:li rdf:resource="https://www.nytimes.com/2026/03/04/science/chimpanzees-crystals.html"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2603.12228"/>
	<rdf:li rdf:resource="https://www.tue.nl/en/our-university/library/library-news/24-02-2026-are-ai-generated-summaries-suitable-for-studying-and-research"/>
	<rdf:li rdf:resource="https://scalawagmagazine.org/2026/03/the-ai-myth-of-solomon-fairfax-the-grim-reaper-of-charleston/"/>
	<rdf:li rdf:resource="https://vigji.github.io/blog/xerox-machines-of-loving-grace/"/>
	<rdf:li rdf:resource="https://www.percepta.ai/blog/can-llms-be-computers"/>
	<rdf:li rdf:resource="https://sogdians.si.edu/the-sogdians-at-home/"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2109.08203"/>
	<rdf:li rdf:resource="https://academic.oup.com/poq/article-abstract/88/3/909/7833185"/>
	<rdf:li rdf:resource="https://www.nytimes.com/2026/03/12/magazine/ai-coding-programming-jobs-claude-chatgpt.html"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2603.10145"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2603.01220"/>
	<rdf:li rdf:resource="https://tkeskinturk.github.io/blog/publicopin/"/>
	<rdf:li rdf:resource="https://f.briatte.org/r/current-views-on-generative-ai"/>
	<rdf:li rdf:resource="https://openreview.net/forum?id=vnX1WHMNmz"/>
	<rdf:li rdf:resource="https://www.nature.com/articles/s41593-026-02217-z"/>
	<rdf:li rdf:resource="https://www.3-16am.co.uk/articles/a-turd-in-the-punchbowl-initial-thoughts-christoph-shuringa-s-a-social-history-of-analytic-philosophy-or-an-epigone-crashes-the-party"/>
	<rdf:li rdf:resource="https://arxiv.org/abs/2602.15902"/>
	<rdf:li rdf:resource="https://link.springer.com/book/10.1007/978-1-4684-8941-5"/>
	<rdf:li rdf:resource="https://asteriskmag.com/issues/08/the-myth-of-the-loneliness-epidemic"/>
      </rdf:Seq>
    </items>
  </channel><item rdf:about="https://stackingthebricks.com/how-blogs-broke-the-web/">
    <title>How the Blog Broke the Web - Stacking the Bricks</title>
    <dc:date>2026-05-04T14:22:35+00:00</dc:date>
    <link>https://stackingthebricks.com/how-blogs-broke-the-web/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- On the one hand, probably true.  On the other hand, how much of my life, cumulatively, has been spent typing '<a href="'?
]]></description>
<dc:subject>the_web_we_have_lost blogging we_shape_our_tools_and_our_tools_shape_us the_present_before_it_was_widely_distributed via:?</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:a4d5cd0b780a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:the_web_we_have_lost"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:blogging"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:we_shape_our_tools_and_our_tools_shape_us"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:the_present_before_it_was_widely_distributed"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:?"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2502.09192">
    <title>[2502.09192] Thinking beyond the anthropomorphic paradigm benefits LLM research</title>
    <dc:date>2026-05-01T13:43:24+00:00</dc:date>
    <link>https://arxiv.org/abs/2502.09192</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Anthropomorphism, or the attribution of human traits to technology, is an automatic and unconscious response that occurs even in those with advanced technical expertise. In this position paper, we analyze hundreds of thousands of research articles to present empirical evidence of the prevalence and growth of anthropomorphic terminology in research on large language models (LLMs). We argue for challenging the deeper assumptions reflected in this terminology -- which, though often useful, may inadvertently constrain LLM development -- and broadening beyond them to open new pathways for understanding and improving LLMs. Specifically, we identify and examine five anthropomorphic assumptions that shape research across the LLM development lifecycle. For each assumption (e.g., that LLMs must use natural language for reasoning, or that they should be evaluated on benchmarks originally meant for humans), we demonstrate empirical, non-anthropomorphic alternatives that remain under-explored yet offer promising directions for LLM research and development."]]></description>
<dc:subject>to:NB via:henry_farrell large_language_models_(so_called) chain-of-thought_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:663d8f4e0529/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:henry_farrell"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:chain-of-thought_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2504.09762">
    <title>[2504.09762] Position: Stop Anthropomorphizing Intermediate Tokens as Reasoning/Thinking Traces!</title>
    <dc:date>2026-05-01T13:41:17+00:00</dc:date>
    <link>https://arxiv.org/abs/2504.09762</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Intermediate token generation (ITG), where a model produces output before the solution, has become a standard method to improve the performance of language models on reasoning tasks. These intermediate tokens have been called \say{reasoning traces} or even \say{thoughts} -- implicitly anthropomorphizing the traces, and implying that these traces resemble steps a human might take when solving a challenging problem, and as such can provide an interpretable window into the operation of the model's thinking process to the end user. In this position paper, we present evidence that this anthropomorphization isn't a harmless metaphor, and instead is quite dangerous -- it confuses the nature of these models and how to use them effectively, and leads to questionable research. We call on the community to avoid such anthropomorphization of intermediate tokens."]]></description>
<dc:subject>via:rvenkat to_read to:NB large_language_models_(so_called) chain-of-thought_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ade957324966/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:rvenkat"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:chain-of-thought_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://direct.mit.edu/books/monograph/6123/Artificial-ReligionOn-AI-Myth-and-Power">
    <title>Artificial Religion: On AI, Myth, and Power | Books Gateway | MIT Press</title>
    <dc:date>2026-04-30T19:58:33+00:00</dc:date>
    <link>https://direct.mit.edu/books/monograph/6123/Artificial-ReligionOn-AI-Myth-and-Power</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["How AI is shaped by Western religious culture and universal existential aspirations—and why we think we need it in the first place.
"Artificial Religion argues that to fully understand our puzzling relation to AI, we must first look at the religious and existential background of our thinking about machines. Mapping some surprising connections between our history of thought about machines and Western religious narratives to political issues and existential human needs and aspirations, Mark Coeckelbergh offers a better understanding of our relationship to machines and why we think we need them at all.
"The book is unique in not just discussing the myth of AI in terms of its technical limitations and the power of Big Tech but also revealing the deeper cultural “grammar” of AI—that is, the religious patterns of thinking and existential aspirations that are often not visible but still haunt Western thinking and shape its technological culture. Moreover, this is done in a way that sheds critical light on the power of AI."]]></description>
<dc:subject>to:NB books:noted artificial_intelligence books:suggest_to_library</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:59520017a464/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_intelligence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:suggest_to_library"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://faculty.washington.edu/yenchic/short_note/note_MoM.pdf">
    <title>A short note on the median-of-means estimator (Yen-Chi Chen, 2020)</title>
    <dc:date>2026-04-23T16:43:42+00:00</dc:date>
    <link>https://faculty.washington.edu/yenchic/short_note/note_MoM.pdf</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[Very nice.]]></description>
<dc:subject>to:NB have_read statistics heavy_tails estimation empirical_processes</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ee5135168ce3/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:heavy_tails"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:estimation"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:empirical_processes"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2604.19560">
    <title>[2604.19560] Separating Geometry from Probability in the Analysis of Generalization</title>
    <dc:date>2026-04-22T20:22:34+00:00</dc:date>
    <link>https://arxiv.org/abs/2604.19560</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["The goal of machine learning is to find models that minimize prediction error on data that has not yet been seen. Its operational paradigm assumes access to a dataset S and articulates a scheme for evaluating how well a given model performs on an arbitrary sample. The sample can be S (in which case we speak of ``in-sample'' performance) or some entirely new S′ (in which case we speak of ``out-of-sample'' performance). Traditional analysis of generalization assumes that both in- and out-of-sample data are i.i.d.\ draws from an infinite population. However, these probabilistic assumptions cannot be verified even in principle. This paper presents an alternative view of generalization through the lens of sensitivity analysis of solutions of optimization problems to perturbations in the problem data. Under this framework, generalization bounds are obtained by purely deterministic means and take the form of variational principles that relate in-sample and out-of-sample evaluations through an error term that quantifies how close out-of-sample data are to in-sample data. Statistical assumptions can then be used \textit{ex post} to characterize the situations when this error term is small (either on average or with high probability)."]]></description>
<dc:subject>to:NB to_read recht.benjamin raginsky.maxim learning_theory optimization via:mraginsky to_teach:childs_garden_of_statistical_learning_theory straight_into_my_veins interpolation</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:963cc7aaa897/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:recht.benjamin"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:raginsky.maxim"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:learning_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:optimization"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:mraginsky"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_teach:childs_garden_of_statistical_learning_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:straight_into_my_veins"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:interpolation"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(26)00052-5">
    <title>Dependency syntax as the simplest theory of grammar: Trends in Cognitive Sciences</title>
    <dc:date>2026-04-21T13:17:07+00:00</dc:date>
    <link>https://www.cell.com/trends/cognitive-sciences/fulltext/S1364-6613(26)00052-5</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["The syntax of human languages has long been argued to be complex and even unlearnable from the input alone. However, the success of large language models (LLMs) has challenged this idea. I argue for a simple view of syntax, where the syntax of a language is just the set of dependency rules, with no phrase structure or transformation rules—constructs central to Chomsky’s transformational grammar. This approach accounts for diverse phenomena in human language processing and explains crosslinguistic word order universals. Moreover, it better explains human data for cases that differentiate these accounts and eliminates the syntax learnability problem. I speculate that LLMs, similar to children, learn the dependency grammar from linguistic patterns, leading to their impressive syntactic competence."]]></description>
<dc:subject>to:NB linguistics grammar_induction via:rvenkat</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:6de20ecece1a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:linguistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:grammar_induction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:rvenkat"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.journals.uchicago.edu/doi/10.1086/724447">
    <title>On the Ecological and Internal Rationality of Bayesian Conditionalization and Other Belief Updating Strategies | The British Journal for the Philosophy of Science: Vol 77, No 1</title>
    <dc:date>2026-04-18T22:13:36+00:00</dc:date>
    <link>https://www.journals.uchicago.edu/doi/10.1086/724447</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["According to Bayesians, agents should respond to evidence by conditionalizing their prior degrees of belief on what they learn. A major aim of this article is to demonstrate that there are common scenarios where Bayesian conditionalization is less rational—from both an ecological and an internal perspective—than other theoretically well-motivated belief updating strategies, even in simple situations and even for an ‘ideal’ agent who is computationally unbounded. The examples also serve to demarcate the conditions under which Bayesian conditionalization may be expected to be ecologically optimal. A second aim of the article is to argue for a broader notion of rationality than what is typically assumed in formal epistemology. On this broader understanding of rationality, classical decision theoretic principles such as expected utility maximization play a less important role."]]></description>
<dc:subject>to:NB epistemology bayesianism rationality</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:c9a42dd78d1c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:epistemology"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:bayesianism"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:rationality"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.journals.uchicago.edu/doi/10.1086/723623">
    <title>Cascade Versus Mechanism: The Diversity of Causal Structure in Science | The British Journal for the Philosophy of Science: Vol 77, No 1</title>
    <dc:date>2026-04-18T22:12:54+00:00</dc:date>
    <link>https://www.journals.uchicago.edu/doi/10.1086/723623</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["According to mainstream philosophical views causal explanation in biology and neuroscience is mechanistic. As the term ‘mechanism’ gets regular use in these fields it is unsurprising that philosophers consider it important to scientific explanation. What is surprising is that they consider it the only causal term of importance. This article provides an analysis of a new causal concept—it examines the cascade concept in science and the causal structure it refers to. I argue that this concept is importantly different from the notion of mechanism and that this difference matters for our understanding of causation and explanation in science."]]></description>
<dc:subject>to:NB philosophy_of_science explanation_by_mechanisms neuroscience</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:513d062dbd7b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:explanation_by_mechanisms"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neuroscience"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.journals.uchicago.edu/doi/10.1086/724448">
    <title>When Is Similarity-Biased Social Learning Adaptive? | The British Journal for the Philosophy of Science: Vol 77, No 1</title>
    <dc:date>2026-04-18T22:12:08+00:00</dc:date>
    <link>https://www.journals.uchicago.edu/doi/10.1086/724448</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Some cultural evolution theorists claim that humans tend to imitate the traits of people who are similar to themselves—men tend to imitate men, women tend to imitate women, and so on. These theorists further suggest that selection has shaped human psychology to attend to similarities and weigh them when learning from others. The argument typically works like this: If similar people face similar problems, then learning from those people can ensure humans learn the most relevant information to solve problems they will face. Little formal evolutionary modelling has explored the conditions under which this argument is valid. This article develops a series of models to answer this question. The general insight is that the viability of the evolutionary argument depends largely on what we assume the function of social roles to be. If, as is the default view in the cultural evolution literature, social roles facilitate coordination, then the model is not very robust with respect to the initial conditions, parameter settings, or population structure. However, if social roles facilitate the division of labour, then similarity-biased learning evolves under a wide range of conditions. These results can improve our understanding of the origins of inequality. Some philosophers have proposed evolutionary bargaining models as potential explanations for inequality. These models make frequent use of similarity-biased learning assumptions. I suggest some ways to improve the research programme on bargaining models in light of these results."]]></description>
<dc:subject>to:NB cultural_evolution inequality</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:9e68f53b9a9f/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:cultural_evolution"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:inequality"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.journals.uchicago.edu/doi/10.1086/724449">
    <title>In Defence of Science: Two Ways to Rehabilitate Reichenbach’s Vindication of Induction | The British Journal for the Philosophy of Science: Vol 77, No 1</title>
    <dc:date>2026-04-18T22:10:53+00:00</dc:date>
    <link>https://www.journals.uchicago.edu/doi/10.1086/724449</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Confronted with the problem of induction, Reichenbach accepts that we cannot justify that induction is reliable. He tries to solve the problem by proving a weaker proposition: that induction is an optimal method of prediction, because it is guaranteed not to be worse and may be better than any alternative. Regarding the most serious objection to his approach, Reichenbach himself hints at an answer without spelling it out. In this article, I will argue that there are two workable strategies to rehabilitate Reichenbach’s account. The first leads to the widely discussed method of meta-induction, as proposed by Schurz. The second strategy has not been suggested thus far. I will develop the second strategy and argue for it being, in some respects, superior to the first and closer to Reichenbach’s own position. The strategy is based on Reichenbach’s idea that the inductive straight rule is not only applicable on the object but also on the method level. He does not spell out how exactly this insight is supposed to save his account. But he seems to assume that nothing more than the straight rule and the different levels of its application are needed for this purpose. The strategy introduced in this article illustrates that this assumption is correct."]]></description>
<dc:subject>to:NB philosophy_of_science induction reichenbach.hans</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:14188e6b7edb/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:induction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:reichenbach.hans"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/elements/boolean-networks-as-predictive-models-of-emergent-biological-behaviors/0D2383F0D64543A77892CEBD5C6A964B">
    <title>Boolean Networks as Predictive Models of Emergent Biological Behaviors</title>
    <dc:date>2026-04-17T03:18:43+00:00</dc:date>
    <link>https://www.cambridge.org/core/elements/boolean-networks-as-predictive-models-of-emergent-biological-behaviors/0D2383F0D64543A77892CEBD5C6A964B</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Interacting biological systems at all organizational levels display emergent behavior. Modeling these systems is made challenging by the number and variety of biological components and interactions – from molecules in gene regulatory networks to species in ecological networks – and the often-incomplete state of system knowledge, such as the unknown values of kinetic parameters for biochemical reactions. Boolean networks have emerged as a powerful tool for modeling these systems. This Element provides a methodological overview of Boolean network models of biological systems. After a brief introduction, the authors describe the process of building, analyzing, and validating a Boolean model. They then present the use of the model to make predictions about the system's response to perturbations and about how to control its behavior. The Element emphasizes the interplay between structural and dynamical properties of Boolean networks and illustrates them in three case studies from disparate levels of biological organization."]]></description>
<dc:subject>to:NB biochemical_networks of_course_its_really_a_spin_glass books:noted downloaded</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:98c166445d04/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:biochemical_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:of_course_its_really_a_spin_glass"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:downloaded"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://doi.org/10.1017/9781009029346">
    <title>Scientific Models and Decision Making</title>
    <dc:date>2026-04-17T02:56:41+00:00</dc:date>
    <link>https://doi.org/10.1017/9781009029346</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["This Element introduces the philosophical literature on models, with an emphasis on normative considerations relevant to models for decision-making. Chapter 1 gives an overview of core questions in the philosophy of modeling. Chapter 2 examines the concept of model adequacy for purpose, using three examples of models from the atmospheric sciences to describe how this sort of adequacy is determined in practice. Chapter 3 explores the significance of using models that are not adequate for purpose, including the purpose of informing public decisions. Chapter 4 provides a basic framework for values in modelling, using a case study to highlight the ethical challenges in building models for decision making. It concludes by establishing the need for strategies to manage value judgments in modelling, including the potential for public participation in the process."]]></description>
<dc:subject>to:NB books:noted philosophy_of_science modeling political_philosophy science_as_a_social_process</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:6cc19b65d04c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:modeling"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:political_philosophy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://digressionsimpressions.substack.com/p/scientific-models-and-political-decision">
    <title>Scientific Models and Political Decision-Making (on Winsberg &amp; Harvard)</title>
    <dc:date>2026-04-17T02:50:00+00:00</dc:date>
    <link>https://digressionsimpressions.substack.com/p/scientific-models-and-political-decision</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>have_read book_reviews science_as_a_social_process tracked_down_references</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:5848fe2cc01a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:book_reviews"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:tracked_down_references"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.pnas.org/doi/abs/10.1073/pnas.2021865119">
    <title>One model for the learning of language | PNAS</title>
    <dc:date>2026-04-17T02:05:27+00:00</dc:date>
    <link>https://www.pnas.org/doi/abs/10.1073/pnas.2021865119</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["A major goal of linguistics and cognitive science is to understand what class of learning systems can acquire natural language. Until recently, the computational requirements of language have been used to argue that learning is impossible without a highly constrained hypothesis space. Here, we describe a learning system that is maximally unconstrained, operating over the space of all computations, and is able to acquire many of the key structures present in natural language from positive evidence alone. We demonstrate this by providing the same learning model with data from 74 distinct formal languages which have been argued to capture key features of language, have been studied in experimental work, or come from an interesting complexity class. The model is able to successfully induce the latent system generating the observed strings from small amounts of evidence in almost all cases, including for regular (e.g., an, , and 
 ), context-free (e.g., 
 , and 
 ), and context-sensitive (e.g., 
 , and xx) languages, as well as for many languages studied in learning experiments. These results show that relatively small amounts of positive evidence can support learning of rich classes of generative computations over structures. The model provides an idealized learning setup upon which additional cognitive constraints and biases can be formalized."]]></description>
<dc:subject>to:NB grammar_induction color_me_skeptical</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:8107fdce20d9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:grammar_induction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:color_me_skeptical"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://henrich.fas.harvard.edu/sites/g/files/omnuum5811/files/henrich/files/hong_henrich_-_2021_-_the_cultural_evolution_of_epistemic_practices.pdfd">
    <title>The Cultural Evolution of Epistemic Practices: The case of Diviniation</title>
    <dc:date>2026-04-16T17:38:56+00:00</dc:date>
    <link>https://henrich.fas.harvard.edu/sites/g/files/omnuum5811/files/henrich/files/hong_henrich_-_2021_-_the_cultural_evolution_of_epistemic_practices.pdfd</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Although a substantial literature in anthropology and comparative religion explores
divination across diverse societies and back into history, little research has integrated
the older ethnographic and historical work with recent insights on human learning,
cultural transmission, and cognitive science. Here we present evidence showing that
divination practices are often best viewed as an epistemic technology, and we formally model the scenarios under which individuals may overestimate the efficacy of
divination that contribute to its cultural omnipresence and historical persistence. We
found that strong prior belief, underreporting of negative evidence, and misinferring
belief from behavior can all contribute to biased and inaccurate beliefs about the
effectiveness of epistemic technologies. We finally suggest how scientific epistemology, as it emerged in Western societies over the past few centuries, has influenced
the importance and cultural centrality of divination practices."]]></description>
<dc:subject>to:NB divination superstition cultural_evolution epistemology via:?</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:55ac0f0a6d48/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:divination"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:superstition"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:cultural_evolution"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:epistemology"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:?"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.academia.edu/106616966/Landemore_Can_AI_bring_deliberation_to_the_masses">
    <title>Landemore: Can AI bring deliberation to the masses</title>
    <dc:date>2026-04-16T17:33:41+00:00</dc:date>
    <link>https://www.academia.edu/106616966/Landemore_Can_AI_bring_deliberation_to_the_masses</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["A core problem in deliberative democracy is the tension between two seemingly equally important conditions of democratic legitimacy: deliberation, on the one hand, and mass participation, on the other. Might artificial intelligence help bring quality deliberation to the masses? The answer is a qualified yes. The chapter first examines the conundrum in deliberative democracy around the trade-off between deliberation and mass participation by returning to the seminal debate between Joshua Cohen and Jürgen Habermas. It then turns to an analysis of the 2019 French Great National Debate, a low-tech attempt to involve millions of French citizens in a two-month-long structured exercise of collective deliberation. Building on the shortcomings of this process, the chapter then considers two different visions for an algorithm-powered form of mass deliberation-Mass Online Deliberation (MOD), on the one hand, and Many Rotating Mini-publics (MRMs), on the other-theorizing various ways artificial intelligence could play a role in them. To the extent that artificial intelligence makes the possibility of either vision more likely to come to fruition, it carries with it the promise of deliberation at the very large scale."

--- Can't find this anywhere except this ridiculous parasitic site...]]></description>
<dc:subject>to:NB democracy large_language_models_(so_called) deliberative_democracy landemore.helene via:?</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:046e2daa4d7c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:democracy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:deliberative_democracy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:landemore.helene"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:?"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://theoryandpractice.org/2024/10/Yes,%20we%20did%20discover%20the%20Higgs!/">
    <title>Yes, we did discover the Higgs! - Theory And Practice</title>
    <dc:date>2026-04-16T17:30:22+00:00</dc:date>
    <link>https://theoryandpractice.org/2024/10/Yes,%20we%20did%20discover%20the%20Higgs!/</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>cranmer.kyle particle_physics hypothesis_testing statistics philosophy_of_science via:? sociology_of_science science_as_a_social_process have_read to:blog</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:86fa85118401/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:cranmer.kyle"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:particle_physics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:hypothesis_testing"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:?"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:sociology_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:blog"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2410.18858">
    <title>[2410.18858] Bilinear Sequence Regression: A Model for Learning from Long Sequences of High-dimensional Tokens</title>
    <dc:date>2026-04-16T17:13:09+00:00</dc:date>
    <link>https://arxiv.org/abs/2410.18858</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Current progress in artificial intelligence is centered around so-called large language models that consist of neural networks processing long sequences of high-dimensional vectors called tokens. Statistical physics provides powerful tools to study the functioning of learning with neural networks and has played a recognized role in the development of modern machine learning. The statistical physics approach relies on simplified and analytically tractable models of data. However, simple tractable models for long sequences of high-dimensional tokens are largely underexplored. Inspired by the crucial role models such as the single-layer teacher-student perceptron (aka generalized linear regression) played in the theory of fully connected neural networks, in this paper, we introduce and study the bilinear sequence regression (BSR) as one of the most basic models for sequences of tokens. We note that modern architectures naturally subsume the BSR model due to the skip connections. Building on recent methodological progress, we compute the Bayes-optimal generalization error for the model in the limit of long sequences of high-dimensional tokens, and provide a message-passing algorithm that matches this performance. We quantify the improvement that optimal learning brings with respect to vectorizing the sequence of tokens and learning via simple linear regression. We also unveil surprising properties of the gradient descent algorithms in the BSR model."]]></description>
<dc:subject>to:NB large_language_models_(so_called) neural_networks of_course_its_really_a_spin_glass high-dimensional_statistics zeborova.lenka</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:01772ebb1064/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neural_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:of_course_its_really_a_spin_glass"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:high-dimensional_statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:zeborova.lenka"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.mariangoodman.com/exhibitions/agnieszka-kurant-recursion-new-york/">
    <title>Agnieszka Kurant: Recursion, Marian Goodman Gallery New York</title>
    <dc:date>2026-04-16T16:52:33+00:00</dc:date>
    <link>https://www.mariangoodman.com/exhibitions/agnieszka-kurant-recursion-new-york/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[---- This actually looks cool.]]></description>
<dc:subject>art artificial_life artificial_intelligence via:mraginsky</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:b21da58d1da4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:art"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_life"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_intelligence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:mraginsky"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://direct.mit.edu/books/monograph/5848/AI-amp-IAn-Intellectual-History-of-Artificial">
    <title>AI &amp; I: An Intellectual History of Artificial Intelligence | Books Gateway | MIT Press</title>
    <dc:date>2026-04-16T16:45:59+00:00</dc:date>
    <link>https://direct.mit.edu/books/monograph/5848/AI-amp-IAn-Intellectual-History-of-Artificial</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["A concise and illuminating history of the field of artificial intelligence from one of its earliest and most respected pioneers.
"AI & I is an intellectual history of the field of artificial intelligence from the perspective of one of its first practitioners, Eugene Charniak. Charniak entered the field in 1967, roughly 12 years after AI's founding, and was involved in many of AI's formative milestones. In this book, he traces the trajectory of breakthroughs and disappointments of the discipline up to the current day, clearly and engagingly demystifying this oft revered and misunderstood technology. His argument is controversial but well supported: that classical AI has been almost uniformly unsuccessful and that the modern deep learning approach should be viewed as the foundation for all the exciting developments that are to come.
"Written for the scientifically educated layperson, this book chronicles the history of the field of AI, starting with its origin in 1956, as a topic for a small academic workshop held at Dartmouth University. From there, the author covers reasoning and knowledge representation, reasoning under uncertainty, chess, computer vision, speech recognition, language acquisition, deep learning, and learning writ large. Ultimately, Charniak takes issue with the controversy of AI—the fear that its invention means the end of jobs, creativity, and potentially even humans as a species—and explains why such concerns are unfounded. Instead, he believes that we should embrace the technology and all its potential to benefit society."

--- Charniak's statistical language processing book from the 1990s is great so I'm excited for this.]]></description>
<dc:subject>in_NB books:noted downloaded to_read artificial_intelligence machine_learning charniak.eugene</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ae501489213b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:downloaded"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_intelligence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:machine_learning"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:charniak.eugene"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/philosophy-of-science/article/parsimony-and-overfitting/B6A58202B75D1099BB22A95EDE1F8F58?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles">
    <title>Parsimony and Overfitting | Philosophy of Science | Cambridge Core</title>
    <dc:date>2026-04-16T16:06:31+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/philosophy-of-science/article/parsimony-and-overfitting/B6A58202B75D1099BB22A95EDE1F8F58?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Philosophers often defend appeals to parsimony by invoking its central role in science. I argue that this move fails once we distinguish between two uses of parsimony: non-ideal and ideal. Non-ideal parsimony enjoys strong inductive support in science, since complex models are prone to overfit to predictively irrelevant noise. But philosophical data aren’t significantly noisy in the relevant sense: when our intuitions are unreliable, their unreliability typically reflects systematic bias rather than noise, which parsimony doesn’t mitigate. Philosophers therefore need ideal parsimony, which finds only weak support from science. Thus, the scientific analogy cannot vindicate the philosopher’s use of parsimony."]]></description>
<dc:subject>occams_razor philosophy_of_science in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:b7f72d5e442f/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:occams_razor"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://direct.mit.edu/books/monograph/6116/The-Great-Energy-TransitionAmerica-from-1876-to">
    <title>The Great Energy Transition: America from 1876 to 1929 | Books Gateway | MIT Press</title>
    <dc:date>2026-04-16T13:16:40+00:00</dc:date>
    <link>https://direct.mit.edu/books/monograph/6116/The-Great-Energy-TransitionAmerica-from-1876-to</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["How new forms of energy transformed every aspect of American life in a span of 50 years, from 1876 to 1929—and how it seeded our current polarization.

"The era of reform. The Gilded Age. The Progressive Era. What historians often divide into discrete eras was one period of profound change: a massive, multipronged energy transition. Oil, gas, and electricity were woven into a culture that had to heal sectional differences after the Civil War, absorb an enormous influx of immigrants, shift from a rural to an urban society, and adopt a scientific understanding of nature.
"Every job, business, house, and street underwent a transformation so rapid and radical that Americans simply could not grasp the larger pattern. The concepts of “technology” and an “energy transition” had yet to emerge, and observers struggled to understand their experiences using inadequate terms such as “kaleidoscopic change,” “applied science,” and “the machine age.” In The Great Energy Transition, David Nye documents this transformation—and explains our failure to see it for what it was.
"In this disorienting transformation, Nye locates the roots of today’s cultural polarization. The great energy transition accelerated demographic and economic trends, including higher wages, increasing longevity, the commodification of experience, engineering nature, corporatism, urbanization, resistance to science, and racial segregation. At the same time, the book points to the innovations and institutions that held the country together, from national parks and monuments to mass consumption and newly invented media events."]]></description>
<dc:subject>to:NB books:noted to_read american_history 19th_century_history 20th_century_history great_transformation the_present_before_it_was_widely_distributed re:the_singularity_in_our_past_light-cone books:suggest_to_library</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:e461d24ea7cb/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:american_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:19th_century_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:20th_century_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:great_transformation"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:the_present_before_it_was_widely_distributed"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:the_singularity_in_our_past_light-cone"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:suggest_to_library"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/philosophy-of-science/article/abs/carnapian-inductive-logic-for-exponential-smoothing/84DD31142459DFD0289CCE8915E79952?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles">
    <title>Carnapian Inductive Logic for Exponential Smoothing | Philosophy of Science | Cambridge Core</title>
    <dc:date>2026-04-15T13:16:02+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/philosophy-of-science/article/abs/carnapian-inductive-logic-for-exponential-smoothing/84DD31142459DFD0289CCE8915E79952?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Articles</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["This paper explores the inductive logic associated with exponential smoothing, the most widely used predictive rule that manifests the idea that more recent observations have a stronger influence on predictive probabilities than more remote ones. The main result shows that exponential smoothing can be derived from a set of plausible qualitative invariance assumptions about conditional probabilities. I discuss various aspects of the resulting inductive logic, including its connections to exchangeable processes, to Bayesian predictive inference and kernel methods in machine learning, as well as the philosophy of probabilistic invariance conditions and symmetries."]]></description>
<dc:subject>to:NB prediction statistics non-stationarity induction to_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:0b80819ed32c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:prediction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:non-stationarity"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:induction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://academic.oup.com/qje/article/141/2/1581/8435315?nbd=45497357721">
    <title>How Do You Identify a Good Manager?* | The Quarterly Journal of Economics | Oxford Academic</title>
    <dc:date>2026-04-13T19:40:03+00:00</dc:date>
    <link>https://academic.oup.com/qje/article/141/2/1581/8435315?nbd=45497357721</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["We introduce and validate a novel approach to identifying good managers. In a preregistered lab experiment, we causally identify managerial contributions by randomly assigning managers to teams and controlling for individual skill. We find that manager contributions are crucial for team success, and that people who self-select into management roles perform worse than randomly assigned managers. Managerial performance is strongly predicted by economic decision-making skill but not by demographic characteristics. Two validation studies support our experimental results. Participants who succeed in the lab receive more real-world promotions and, in a separate study of retail store managers, skill measures strongly predict store sales. A one standard deviation increase in manager quality increases annual per store sales by US$4.1 million (25% increase). Selecting managers on skills rather than demographic characteristics or the desire to lead could substantially improve organizational performance."

--- I want to believe, so this needs to be treated with skepticism.]]></description>
<dc:subject>to:NB experimental_economics management economics to_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:a8ebab345ac7/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:experimental_economics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:management"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:economics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://github.com/xyz2tex/dot2tex">
    <title>GitHub - xyz2tex/dot2tex: Convert graphs generated by Graphviz to LaTeX friendly formats · GitHub</title>
    <dc:date>2026-04-13T02:10:05+00:00</dc:date>
    <link>https://github.com/xyz2tex/dot2tex</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- Works with modern python, unlike the version on CTAN.]]></description>
<dc:subject>latex</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:c9722d3e683e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:latex"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://taggart-tech.com/reckoning/">
    <title>I used AI. It worked. I hated it.: Taggart Tech</title>
    <dc:date>2026-04-13T01:33:40+00:00</dc:date>
    <link>https://taggart-tech.com/reckoning/</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>programming large_language_models_(so_called) via:absfac have_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:8b2ce7afbe9a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:programming"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:absfac"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://link.springer.com/article/10.1007/s11023-025-09755-8">
    <title>Decreasing Disruption and Increasing Concentration of Artificial Intelligence | Minds and Machines | Springer Nature Link</title>
    <dc:date>2026-04-09T13:32:20+00:00</dc:date>
    <link>https://link.springer.com/article/10.1007/s11023-025-09755-8</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["This paper examines the development of artificial intelligence (AI) technologies from 1976 to 2020 and investigates the socio-economic factors driving its evolution. Using a large-scale dataset of AI patents and a novel measure called the pairwise disruption index (PDI), we trace the social drivers of AI disruption and investigate the underlying mechanisms. Our analysis focuses on three key dimensions of the knowledge base emphasized in innovation theories: government support, R&D capacity, and inventor human capital. Results reveal (1) a clear trend of AI technologies becoming concentrated within well-resourced institutions, consistent with the theory of intellectual monopoly capitalism; and (2) while both macro-level factors—such as government support and corporate R&D capabilities—and micro-level factors—such as R&D team size—contribute to this concentration, macro-level forces exert a stronger influence overall. Among them, government support has the most substantial impact, and organizational R&D capacity has become an increasingly dominant driver in recent years. This study provides a systematic assessment of the socio-economic forces shaping AI development, complements the intellectual monopoly theory, and highlights concerns over declining technological disruption and increasing concentration in the AI sector."

--- My skepticism starts with the fundamental measurement of "disruption" and goes on from there.  There is no reason this regressand should be linear in those regressor variables, and there is no comparison to other sectors / areas of technology.  Look for replication data and see if it might make a good problem set?]]></description>
<dc:subject>to:NB technological_change economics artificial_intelligence</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:1b73bcd59027/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:technological_change"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:economics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_intelligence"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://link.springer.com/article/10.1007/s11023-026-09767-y">
    <title>Using LLMs to Enhance Democracy | Minds and Machines | Springer Nature Link</title>
    <dc:date>2026-04-09T13:18:09+00:00</dc:date>
    <link>https://link.springer.com/article/10.1007/s11023-026-09767-y</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["LLMs are among the most advanced tools ever devised for understanding and generating natural language. Democratic deliberation and decision-making involve, at several distinct stages, the production and comprehension of language. So it is natural to ask whether our best linguistic tools might prove instrumental to one of our most important linguistic tasks involving language. Researchers and practitioners have recently asked whether LLMs can support democratic deliberation by leveraging abilities to summarise content, to aggregate opinions over summarised content, and to represent voters by predicting their preferences over unseen choices. In this paper, we assess whether using LLMs to perform these and related functions really advances the democratic values behind these experiments. We suggest that the record is mixed. In the presence of background inequality of power and resources, as well as deep moral and political disagreement, we should not use LLMs to automate non-instrumentally valuable components of the democratic process, nor should we be tempted to supplant fair and transparent decision-making procedures that are practically necessary to reconcile competing interests and values. However, while LLMs should be kept well clear of formal democratic decision-making processes, we think they can instead strengthen the informal public sphere—the arena that mediates between democratic governments and the polities that they serve, in which political communities seek information, form civic publics, and hold their leaders to account."]]></description>
<dc:subject>large_language_models_(so_called) democracy re:ai_as_a_social_technology in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:d05ca1fd27a2/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:democracy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:ai_as_a_social_technology"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.aeaweb.org/articles?id=10.1257/aer.20240246">
    <title>Robust Misspecified Models - American Economic Association</title>
    <dc:date>2026-04-09T13:11:47+00:00</dc:date>
    <link>https://www.aeaweb.org/articles?id=10.1257/aer.20240246</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["This paper studies which misspecified models are likely to persist when decision-makers compare them with competing models. The main result characterizes such models based on two features that can be derived from primitives: The model's asymptotic accuracy in predicting the equilibrium distribution of observed outcomes and the "tightness" of the prior around such equilibria. Misspecified models can be robust, persisting against any arbitrary competing model—including the true model—despite decision-makers observing an infinite amount of data. Moreover, simple misspecified models equipped with entrenched priors can be more robust than complex correctly specified models."]]></description>
<dc:subject>decision_theory misspecification re:bayes_as_evol statistics in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:f59185400440/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:decision_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:misspecification"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:bayes_as_evol"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.aeaweb.org/articles?id=10.1257/aer.20241056">
    <title>Similarity of Information and Collective Action - American Economic Association</title>
    <dc:date>2026-04-09T13:03:28+00:00</dc:date>
    <link>https://www.aeaweb.org/articles?id=10.1257/aer.20241056</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["We study a canonical collective action game with incomplete information. Individuals attempt to coordinate to achieve a shared goal, while also facing a temptation to free-ride. More similar information can help them coordinate, but it can also exacerbate free-riding. Our main result shows that more similar information facilitates (impedes) achieving the common goal when it is sufficiently challenging (easy). We apply this insight to show why less powerful authoritarian governments may face larger protests if they restrict press freedom, when committee diversity is beneficial in costly voting, and when a more diverse community contributes more to public good provision."]]></description>
<dc:subject>collective_action collective_cognition re:democratic_cognition in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ab7e24fb22fd/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:collective_action"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:collective_cognition"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:democratic_cognition"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.ucs.org/resources/independent-science-initiative">
    <title>The Independent Science Initiative | Union of Concerned Scientists</title>
    <dc:date>2026-04-08T20:15:00+00:00</dc:date>
    <link>https://www.ucs.org/resources/independent-science-initiative</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[Official-science in exile...]]></description>
<dc:subject>our_decrepit_institutions via:aeo</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3acd8e2f05a6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:our_decrepit_institutions"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:aeo"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://direct.mit.edu/books/oa-edited-volume/6112/Dennett-s-Real-Patterns-in-Science-and-Nature">
    <title>Dennett's Real Patterns in Science and Nature | Books Gateway | MIT Press</title>
    <dc:date>2026-04-08T17:36:05+00:00</dc:date>
    <link>https://direct.mit.edu/books/oa-edited-volume/6112/Dennett-s-Real-Patterns-in-Science-and-Nature</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["How the concept of a pattern, as understood in information science and applied in contemporary AI, can address deep questions in science and philosophy.
"The explosive growth of AI and machine learning in recent decades is predicated on the recognition and exploitation of patterns in data. Of course, scientists have engaged in their own—less automated—processes of pattern recognition since the birth of science itself, and biological organisms evolved their own neural networks for pattern recognition long before people and their technology came along.
"In his seminal work, “Real Patterns,” philosopher and cognitive scientist Daniel Dennett laid out a road map for connecting the idea of “patterns” as understood by information theory to the practices of scientists and to our own cognitive capacity to model and predict the world around us. In this book—the first dedicated to the topic of real patterns—Tyler Millhouse, Steve Petersen, and Don Ross follow this road map. They explore the relevance of patterns to important aspects of both science and nature, including the emergence of high-level structure in physics, the nature of biological species, the measurement of welfare in economics, the evaluation of causal models, and the possibility of understanding in large neural networks."]]></description>
<dc:subject>books:noted philosophy_of_science emergence dennett.daniel_c. to_read downloaded in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3fd27bfc9cb4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:emergence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:dennett.daniel_c."/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:downloaded"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/royal-institute-of-philosophy-supplements/article/abs/mind-as-a-control-system/501BF772FCAADCB00A1F576602E771F9">
    <title>The Mind as a Control System* | Royal Institute of Philosophy Supplements | Cambridge Core</title>
    <dc:date>2026-04-08T17:16:49+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/royal-institute-of-philosophy-supplements/article/abs/mind-as-a-control-system/501BF772FCAADCB00A1F576602E771F9</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["This is not a scholarly research paper, but a ‘position paper’ outlining an approach to the study of mind which has been gradually evolving (at least in my mind) since about 1969 when I first become acquainted with work in Artificial Intelligence through Max Clowes. I shall try to show why it is more fruitful to construe the mind as a control system than as a computational system (although computation can play a role in control mechanisms)."

--- Preprint version [https://cogaffarchive.org/Aaron.Sloman_Mind.as.controlsystem/Aaron.Sloman_Mind.as.controlsystem.pdf] gives the following abstract:

"Many people who favour the design-based approach to the study of mind, including the author previously, have thought of the mind as a computational system, though they don’t all agree regarding the forms of computation required for mentality. Because of ambiguities in the notion of ’computation’ and also because it tends to be too closely linked to the concept of an algorithm, it is suggested in this paper that we should rather construe the mind (or an agent with a mind) as a control system involving many interacting control loops of various kinds, most of them implemented in high level virtual machines, and many of them hierarchically organised. (Some of the sub-processes are clearly computational in character, though not necessarily all.) A number of implications are drawn out, including the implication that there are many informational substates, some incorporating factual information, some control information, using diverse forms of representation. The notion of architecture, i.e. functional differentiation into interacting components, is explained, and the conjecture put forward that in order to account for the main characteristics of the human mind it is more important to get the architecture right than to get the mechanisms right (e.g. symbolic vs neural mechanisms). Architecture dominates mechanism"]]></description>
<dc:subject>philosophy_of_mind control_theory_and_control_engineering via:mraginsky in_NB cognitive_science</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3e58021c932d/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_mind"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:control_theory_and_control_engineering"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:mraginsky"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:cognitive_science"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2601.10825">
    <title>[2601.10825] Reasoning Models Generate Societies of Thought</title>
    <dc:date>2026-04-08T16:58:19+00:00</dc:date>
    <link>https://arxiv.org/abs/2601.10825</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Large language models have achieved remarkable capabilities across domains, yet mechanisms underlying sophisticated reasoning remain elusive. Recent reasoning models outperform comparable instruction-tuned models on complex cognitive tasks, attributed to extended computation through longer chains of thought. Here we show that enhanced reasoning emerges not from extended computation alone, but from simulating multi-agent-like interactions -- a society of thought -- which enables diversification and debate among internal cognitive perspectives characterized by distinct personality traits and domain expertise. Through quantitative analysis and mechanistic interpretability methods applied to reasoning traces, we find that reasoning models like DeepSeek-R1 and QwQ-32B exhibit much greater perspective diversity than instruction-tuned models, activating broader conflict between heterogeneous personality- and expertise-related features during reasoning. This multi-agent structure manifests in conversational behaviors, including question-answering, perspective shifts, and the reconciliation of conflicting views, and in socio-emotional roles that characterize sharp back-and-forth conversations, together accounting for the accuracy advantage in reasoning tasks. Controlled reinforcement learning experiments reveal that base models increase conversational behaviors when rewarded solely for reasoning accuracy, and fine-tuning models with conversational scaffolding accelerates reasoning improvement over base models. These findings indicate that the social organization of thought enables effective exploration of solution spaces. We suggest that reasoning models establish a computational parallel to collective intelligence in human groups, where diversity enables superior problem-solving when systematically structured, which suggests new opportunities for agent organization to harness the wisdom of crowds."]]></description>
<dc:subject>to:NB to_read artificial_intelligence large_language_models_(so_called) ensemble_methods evans.james kith_and_kin</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:f2cf63b3474b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:artificial_intelligence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:ensemble_methods"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:evans.james"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:kith_and_kin"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/philosophy-of-science/article/measured-inference-scales-statistics-and-scientific-inference/527F6793A1C954F01A321E72F780E931?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues">
    <title>Measured Inference: Scales, Statistics, and Scientific Inference | Philosophy of Science | Cambridge Core</title>
    <dc:date>2026-04-08T16:56:58+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/philosophy-of-science/article/measured-inference-scales-statistics-and-scientific-inference/527F6793A1C954F01A321E72F780E931?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Despite the recent “epistemic turn” in the philosophy of measurement, philosophers have ignored a nearly 80-year controversy about the relationship between statistical inference and measurement theory. Some scholars maintain that measurement theory places no constraints on statistics, whereas others argue that the measurement scale (e.g., ordinal or interval) of one’s data determines which statistical methods are “permissible.” I defend an intermediate position: Even if existing measurement theory were irrelevant to statistical inference, it would be critical for scientific inference, which requires connecting statistical hypotheses to broader research hypotheses."]]></description>
<dc:subject>to:NB measurement philosophy_of_science statistics mayo-wilson.conor to_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:cb7083aa4fd5/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:measurement"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:statistics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:mayo-wilson.conor"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/philosophy-of-science/article/we-should-not-align-quantitative-measures-with-stakeholder-values/1C7DBA5E3D5904AB023268C97EACB0F2?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues">
    <title>We Should not Align Quantitative Measures with Stakeholder Values | Philosophy of Science | Cambridge Core</title>
    <dc:date>2026-04-08T16:56:11+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/philosophy-of-science/article/we-should-not-align-quantitative-measures-with-stakeholder-values/1C7DBA5E3D5904AB023268C97EACB0F2?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["There is a growing consensus among philosophers that quantifying value-laden concepts can be epistemically successful and politically legitimate if all value-laden choices in the process of quantification are aligned with stakeholder values. I argue that proponents of this alignment approach have failed to argue for its basic premise: Successful quantification is sufficiently unconstrained to be achievable along multiple, stakeholder-specific pathways. I then challenge this premise by considering a rare example of successful value-laden quantification in seismology, in which stakeholder values had to be disregarded from measure design and testing. The example motivates my contention that value alignment is not a workable source of political legitimacy for successful programs of quantification."

]]></description>
<dc:subject>to:NB measurement science_as_a_social_process philosophy_of_science</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:0cab44853037/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:measurement"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.cambridge.org/core/journals/philosophy-of-science/article/science-without-trust/13A5FD15D5ADFF4C67DDC1DF8D8FEB6C?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues">
    <title>Science Without Trust | Philosophy of Science | Cambridge Core</title>
    <dc:date>2026-04-08T16:55:29+00:00</dc:date>
    <link>https://www.cambridge.org/core/journals/philosophy-of-science/article/science-without-trust/13A5FD15D5ADFF4C67DDC1DF8D8FEB6C?WT.mc_id=New%2520Cambridge%2520Alert%2520-%2520Issues</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["It is often said that successful scientific research must be built on trust. Focusing on the alleged necessity of trust for successful scientific communication and thus for scientific cooperation (which underlies much of contemporary science), I argue that science mustn’t be built on trust. Appearances to the contrary come from a failure to distinguish different attitudes toward scientists’ testimony, in particular, trusting and relying on other scientists. This article proposes an account of scientific reliance and explains how it differs from scientific trust; it also shows why this distinction matters for science."]]></description>
<dc:subject>to:NB philosophy_of_science science_as_a_social_process trust machery.edouard</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:baa0d82bef89/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy_of_science"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:trust"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:machery.edouard"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/math/0504472">
    <title>[math/0504472] Szemerédi's regularity lemma revisited</title>
    <dc:date>2026-04-08T02:30:02+00:00</dc:date>
    <link>https://arxiv.org/abs/math/0504472</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Szemerédi's regularity lemma is a basic tool in graph theory, and also plays an important role in additive combinatorics, most notably in proving Szemerédi's theorem on arithmetic progressions . In this note we revisit this lemma from the perspective of probability theory and information theory instead of graph theory, and observe a variant of this lemma which introduces a new parameter F. This stronger version of the regularity lemma was iterated in a recent paper of the author to reprove the analogous regularity lemma for hypergraphs."

--- Re last tag, I ought to try to find time to think about this as a form of (approximate) statistical sufficiency, and/or the information bottleneck.]]></description>
<dc:subject>have_read tao.terence graph_theory information_theory probability coarse_graining sufficiency in_NB</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:b71042d4baec/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:tao.terence"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:graph_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:information_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:probability"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:coarse_graining"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:sufficiency"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://direct.mit.edu/books/monograph/6030/Strange-AttractorThe-Hallucinatory-Life-of-Terence">
    <title>Strange Attractor: The Hallucinatory Life of Terence McKenna | Books Gateway | MIT Press</title>
    <dc:date>2026-04-04T03:34:28+00:00</dc:date>
    <link>https://direct.mit.edu/books/monograph/6030/Strange-AttractorThe-Hallucinatory-Life-of-Terence</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["An intellectual biography of one of the most celebrated and yet least understood figures of the late twentieth century, Terence McKenna.
"A stand-up philosopher who made a unique contribution to science, humanism, and the hidden arts, Terence McKenna (1946–2000) was the twentieth century’s psychedelic Renaissance man. Perfecting his rugged philosophy on the role of psychedelics in evolution, consciousness, and time, McKenna was a riotous charmer who stalked the shadows, but also sought the iridescence. More than twenty years since his untimely passing, McKenna has an enduring magnetism across the virtual pop stream, in pervasive digitization, and within social media networks. In Strange Attractor, the first biography of this enigmatic figure, Graham St John detects the signal behind the noise.
"This book is an engaging chronicle of the life, works, and legacy of this brazen adventurer of the inner and outer dimensions, whose weird intelligence has affected multitudes and whose spirit continues to haunt the present. It draws on original documents and letters, features fifty two rare photographs and artworks, and shares previously untold stories from over eighty people. Neither glorifying nor disparaging its subject, Strange Attractor will appeal to those interested in the evolution of a psychedelic intellectual, and to those for whom McKenna’s wisdom endures."]]></description>
<dc:subject>in_NB books:noted psychoceramica lives_of_the_scholars sort_of</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:70ad5101b1ce/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:psychoceramica"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:lives_of_the_scholars"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:sort_of"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://ergosphere.blog/posts/the-machines-are-fine/">
    <title>The machines are fine. I'm worried about us.</title>
    <dc:date>2026-04-03T07:45:55+00:00</dc:date>
    <link>https://ergosphere.blog/posts/the-machines-are-fine/</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>large_language_models_(so_called) science_as_a_social_process have_read via:kjhealy re:the_singer_of_tales_and_the_house_of_intellect</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:0aa0e4be9ed7/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:science_as_a_social_process"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:kjhealy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:the_singer_of_tales_and_the_house_of_intellect"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.aeaweb.org/articles?id=10.1257/aer.20240763">
    <title>Games on Multiplex Networks - American Economic Association</title>
    <dc:date>2026-03-31T18:08:27+00:00</dc:date>
    <link>https://www.aeaweb.org/articles?id=10.1257/aer.20240763</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["We develop a simple multilayer network model in which agents allocate effort across layers with heterogeneous structures, subject to an aggregate effort constraint. Incentives are shaped by agents' network positions within each layer, and equilibrium behavior reflects both within- and cross-layer interactions. We analyze how shocks propagate through the network and characterize optimal targeting interventions. Our results show that effective policy design must account for effort allocation across layers. We also demonstrate that predictions from monolayer models can diverge sharply from those of multilayer models, underscoring the importance of accounting for network complexity in both empirical and policy analyses."]]></description>
<dc:subject>in_NB game_theory social_networks</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:2be7c9d94d49/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:in_NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:game_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:social_networks"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2603.25568">
    <title>[2603.25568] Are LLMs Overkill for Databases?: A Study on the Finiteness of SQL</title>
    <dc:date>2026-03-31T13:10:48+00:00</dc:date>
    <link>https://arxiv.org/abs/2603.25568</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Translating natural language to SQL for data retrieval has become more accessible thanks to code generation LLMs. But how hard is it to generate SQL code? While databases can become unbounded in complexity, the complexity of queries is bounded by real life utility and human needs. With a sample of 376 databases, we show that SQL queries, as translations of natural language questions are finite in practical complexity. There is no clear monotonic relationship between increases in database table count and increases in complexity of SQL queries. In their template forms, SQL queries follow a Power Law-like distribution of frequency where 70% of our tested queries can be covered with just 13% of all template types, indicating that the high majority of SQL queries are predictable. This suggests that while LLMs for code generation can be useful, in the domain of database access, they may be operating in a narrow, highly formulaic space where templates could be safer, cheaper, and auditable."]]></description>
<dc:subject>to:NB large_language_models_(so_called) databases mimno.david re:gopnikism</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:b8b2e634087e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:databases"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:mimno.david"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:re:gopnikism"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2603.21687">
    <title>[2603.21687] MIRAGE: The Illusion of Visual Understanding</title>
    <dc:date>2026-03-30T18:05:57+00:00</dc:date>
    <link>https://arxiv.org/abs/2603.21687</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Multimodal AI systems have achieved remarkable performance across a broad range of real-world tasks, yet the mechanisms underlying visual-language reasoning remain surprisingly poorly understood. We report three findings that challenge prevailing assumptions about how these systems process and integrate visual information. First, Frontier models readily generate detailed image descriptions and elaborate reasoning traces, including pathology-biased clinical findings, for images never provided; we term this phenomenon mirage reasoning. Second, without any image input, models also attain strikingly high scores across general and medical multimodal benchmarks, bringing into question their utility and design. In the most extreme case, our model achieved the top rank on a standard chest X-ray question-answering benchmark without access to any images. Third, when models were explicitly instructed to guess answers without image access, rather than being implicitly prompted to assume images were present, performance declined markedly. Explicit guessing appears to engage a more conservative response regime, in contrast to the mirage regime in which models behave as though images have been provided. These findings expose fundamental vulnerabilities in how visual-language models reason and are evaluated, pointing to an urgent need for private benchmarks that eliminate textual cues enabling non-visual inference, particularly in medical contexts where miscalibrated AI carries the greatest consequence. We introduce B-Clean as a principled solution for fair, vision-grounded evaluation of multimodal AI systems."

!!!]]></description>
<dc:subject>to:NB large_language_models_(so_called) via:csantos chain-of-thought_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:6967b297f42e/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:csantos"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:chain-of-thought_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.tes.com/magazine/teaching-learning/general/uta-frith-interview-autism-not-spectrum">
    <title>Uta Frith interview: 'Autism is not a spectrum' | Tes</title>
    <dc:date>2026-03-30T01:56:50+00:00</dc:date>
    <link>https://www.tes.com/magazine/teaching-learning/general/uta-frith-interview-autism-not-spectrum</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>frith.uta autism interview neuropsychology</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:258177d1aba9/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:frith.uta"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:autism"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:interview"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neuropsychology"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.sciencedirect.com/science/article/abs/pii/S0167268116301202?via%3Dihub">
    <title>OPEC, the Seven Sisters, and oil market dominance: An evolutionary game theory and agent-based modeling approach - ScienceDirect</title>
    <dc:date>2026-03-27T19:56:07+00:00</dc:date>
    <link>https://www.sciencedirect.com/science/article/abs/pii/S0167268116301202?via%3Dihub</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["A methodological toolkit comprised of evolutionary game theory and agent-based modeling is used to study OPEC and the Seven Sisters as they struggled for control over global petroleum markets during the 1960s and 1970s. An evolutionary game theory model incorporates heterogeneous populations, energy-specific variables, and behavioral considerations to capture the fundamentals of the applied problem. An agent-based model is used to provide detailed results and demonstrate the importance of the natural resource to the outcome of the model."


]]></description>
<dc:subject>to:NB to_read economic_history agent-based_models evolutionary_game_theory via:aeo 20th_century_history</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ceba46bcf38f/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:economic_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:agent-based_models"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:evolutionary_game_theory"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:aeo"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:20th_century_history"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://jbgruber.github.io/rollama/">
    <title>Communicate with Ollama to Run Large Language Models Locally • rollama</title>
    <dc:date>2026-03-26T16:16:04+00:00</dc:date>
    <link>https://jbgruber.github.io/rollama/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["The goal of rollama is to wrap the Ollama API, which allows you to run different LLMs locally and create an experience similar to ChatGPT/OpenAI’s API. Ollama is very easy to deploy and handles a huge number of models. Checkout the project here: https://github.com/ollama/ollama.
"While there are several R packages for working with LLMs, rollama takes an opinionated approach centred on local, open-weight models: prioritising privacy, reproducibility, and ease of use for research tasks. The package and its learning materials are particularly focused on annotating text and images — making it a natural fit for (social) scientists who want to use LLMs without relying on proprietary APIs or sending sensitive data to third-party servers. It also offers deep integration with the Ollama ecosystem beyond just chat, including model management features like creating, copying, and pushing custom models."

--- Where "to_read" means "to install and play around with".  (Of course my 2017-vintage machine may groan and smoke...)]]></description>
<dc:subject>to:NB to_read large_language_models_(so_called) to_teach:statistics_and_generative_ai via:phnk</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:04a78f4bc198/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_teach:statistics_and_generative_ai"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:phnk"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.chronicle.com/article/can-danielle-allen-save-academe-from-itself">
    <title>Can Danielle Allen Save Academe From Itself?</title>
    <dc:date>2026-03-25T04:29:26+00:00</dc:date>
    <link>https://www.chronicle.com/article/can-danielle-allen-save-academe-from-itself</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- CMU-access link: [https://www-chronicle-com.cmu.idm.oclc.org/article/can-danielle-allen-save-academe-from-itself]]]></description>
<dc:subject>allen.danielle_s. have_read kith_and_kin academia</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:2c5192345ca6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:allen.danielle_s."/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:kith_and_kin"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:academia"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://eighteenthelephant.com/2026/03/12/space-mirrors-solar-panels-fools-and-their-money/">
    <title>Space mirrors, solar panels, fools, and their money – The Eighteenth Elephant</title>
    <dc:date>2026-03-25T03:30:06+00:00</dc:date>
    <link>https://eighteenthelephant.com/2026/03/12/space-mirrors-solar-panels-fools-and-their-money/</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>physics utter_stupidity why_oh_why_cant_we_have_a_better_press_corps</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:75789e9802a4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:physics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:utter_stupidity"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:why_oh_why_cant_we_have_a_better_press_corps"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://statmodeling.stat.columbia.edu/2026/03/22/space-mirrors-awesome-solution-to-our-energy-problems-or-ridiculous-public-relations-stunt/">
    <title>Space mirrors: Awesome solution to our energy problems, or ridiculous public relations stunt? | Statistical Modeling, Causal Inference, and Social Science</title>
    <dc:date>2026-03-25T03:26:51+00:00</dc:date>
    <link>https://statmodeling.stat.columbia.edu/2026/03/22/space-mirrors-awesome-solution-to-our-energy-problems-or-ridiculous-public-relations-stunt/</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>physics utter_stupidity why_oh_why_cant_we_have_a_better_press_corps</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:27324c580ac0/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:physics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:utter_stupidity"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:why_oh_why_cant_we_have_a_better_press_corps"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.nytimes.com/2026/03/04/science/chimpanzees-crystals.html">
    <title>Chimpanzees Are Really Into Crystals - The New York Times</title>
    <dc:date>2026-03-22T04:15:16+00:00</dc:date>
    <link>https://www.nytimes.com/2026/03/04/science/chimpanzees-crystals.html</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>primates have_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3791db78b222/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:primates"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2603.12228">
    <title>[2603.12228] Neural Thickets: Diverse Task Experts Are Dense Around Pretrained Weights</title>
    <dc:date>2026-03-22T04:13:16+00:00</dc:date>
    <link>https://arxiv.org/abs/2603.12228</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Pretraining produces a learned parameter vector that is typically treated as a starting point for further iterative adaptation. In this work, we instead view the outcome of pretraining as a distribution over parameter vectors, whose support already contains task-specific experts. We show that in small models such expert solutions occupy a negligible fraction of the volume of this distribution, making their discovery reliant on structured optimization methods such as gradient descent. In contrast, in large, well-pretrained models the density of task-experts increases dramatically, so that diverse, task-improving specialists populate a substantial fraction of the neighborhood around the pretrained weights. Motivated by this perspective, we explore a simple, fully parallel post-training method that samples N parameter perturbations at random, selects the top K, and ensembles predictions via majority vote. Despite its simplicity, this approach is competitive with standard post-training methods such as PPO, GRPO, and ES for contemporary large-scale models."]]></description>
<dc:subject>to:NB neural_networks large_language_models_(so_called) ensemble_methods</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:a9f750012e1c/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neural_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:ensemble_methods"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.tue.nl/en/our-university/library/library-news/24-02-2026-are-ai-generated-summaries-suitable-for-studying-and-research">
    <title>Are AI-generated summaries suitable for studying and research?</title>
    <dc:date>2026-03-22T04:09:06+00:00</dc:date>
    <link>https://www.tue.nl/en/our-university/library/library-news/24-02-2026-are-ai-generated-summaries-suitable-for-studying-and-research</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- I had not seen the phrase "knowledge bleed" before, but have encountered the phenomenon]]></description>
<dc:subject>have_read large_language_models_(so_called) track_down_references via:bruces</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3f808463a93a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:track_down_references"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:bruces"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://scalawagmagazine.org/2026/03/the-ai-myth-of-solomon-fairfax-the-grim-reaper-of-charleston/">
    <title>The AI Myth of Solomon Fairfax, the “Grim Reaper” of Charleston – Scalawag</title>
    <dc:date>2026-03-22T04:06:59+00:00</dc:date>
    <link>https://scalawagmagazine.org/2026/03/the-ai-myth-of-solomon-fairfax-the-grim-reaper-of-charleston/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- The _cheapness_ of producing stuff like this is new, but the qualitative phenomenon is much older.]]></description>
<dc:subject>epidemiology_of_representations</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:6336dfc9fa73/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:epidemiology_of_representations"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://vigji.github.io/blog/xerox-machines-of-loving-grace/">
    <title>Xerox Machines of Loving Grace — Luigi Petrucco</title>
    <dc:date>2026-03-22T04:06:14+00:00</dc:date>
    <link>https://vigji.github.io/blog/xerox-machines-of-loving-grace/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- Somebody (by whom I mean W.T.) should write an essay about why this didn't all happen in the 1990s with genetic programming.  (I'd be prepared to hear about how it _did_ happen but people outside the field didn't pay attention.)
--- Heck, someone should compare people saying stuff like this to Ashby (1956) "Design for an Intelligence Amplifier" [https://www.jstor.org/stable/j.ctt1bgzb3s.13]: if you can specify enough effective tests, and have a big enough source of variety, you can find incredible things...]]></description>
<dc:subject>have_read large_language_models_(so_called) programming</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:784152b0d285/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:programming"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.percepta.ai/blog/can-llms-be-computers">
    <title>Can LLMs Be Computers? | Percepta</title>
    <dc:date>2026-03-22T04:02:11+00:00</dc:date>
    <link>https://www.percepta.ai/blog/can-llms-be-computers</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>have_read large_language_models_(so_called) via:? slightly_mad</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:ecee8ee1f1a2/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:?"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:slightly_mad"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://sogdians.si.edu/the-sogdians-at-home/">
    <title>The Sogdians at Home | The Sogdians</title>
    <dc:date>2026-03-22T03:59:28+00:00</dc:date>
    <link>https://sogdians.si.edu/the-sogdians-at-home/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- Fig. 43 looks startlingly like LZS in his favorite red cap.]]></description>
<dc:subject>central_asia ancient_history art_history</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:077acba9477d/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:central_asia"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:ancient_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:art_history"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2109.08203">
    <title>[2109.08203] Torch.manual_seed(3407) is all you need: On the influence of random seeds in deep learning architectures for computer vision</title>
    <dc:date>2026-03-22T03:57:16+00:00</dc:date>
    <link>https://arxiv.org/abs/2109.08203</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["In this paper I investigate the effect of random seed selection on the accuracy when using popular deep learning architectures for computer vision. I scan a large amount of seeds (up to 104) on CIFAR 10 and I also scan fewer seeds on Imagenet using pre-trained models to investigate large scale datasets. The conclusions are that even if the variance is not very large, it is surprisingly easy to find an outlier that performs much better or much worse than the average."]]></description>
<dc:subject>to:NB neural_networks optimization funny:laughing_instead_of_screaming</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3feeb80e1ba4/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neural_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:optimization"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:funny:laughing_instead_of_screaming"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://academic.oup.com/poq/article-abstract/88/3/909/7833185">
    <title>Curious Case of Black “Conservatives”: Assessing the Validity of the Liberal-Conservative Scale among Black Americans | Public Opinion Quarterly | Oxford Academic</title>
    <dc:date>2026-03-22T03:56:12+00:00</dc:date>
    <link>https://academic.oup.com/poq/article-abstract/88/3/909/7833185</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Many Black Americans identify as conservative on surveys, despite their allegiance to the Democratic Party. Prominent theoretical accounts claim this mismatch results from identity-based considerations. I revisit this long-standing puzzle and offer an alternative explanation with broad implications for studying political attitudes and behavior across the social sciences. Leveraging data from the American National Election Studies, I demonstrate that the terms “liberal” and “conservative” are unfamiliar to many Black Americans, compromising the usefulness and validity of the liberal-conservative scale for Black respondents. Respondents unfamiliar with these terms misapply them and choose ideological labels that fail to align with their partisan preferences. Consequently, scholars and political actors make incorrect and imprecise inferences about the contours of Black politics. Findings also raise new concerns about the generalizability of claims that rely on ideological self-identification measures, including popular claims about mass polarization and partisan-ideological sorting among the American public. Critically, this work suggests a need for caution when using concepts that vary in their meaningfulness across social groups."]]></description>
<dc:subject>to:NB public_opinion surveys social_measurement us_politics to_teach:statistics_of_inequality_and_discrimination</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:78775ff52ab6/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:public_opinion"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:surveys"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:social_measurement"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:us_politics"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_teach:statistics_of_inequality_and_discrimination"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.nytimes.com/2026/03/12/magazine/ai-coding-programming-jobs-claude-chatgpt.html">
    <title>Coding After Coders: The End of Computer Programming as We Know It - The New York Times</title>
    <dc:date>2026-03-22T03:31:26+00:00</dc:date>
    <link>https://www.nytimes.com/2026/03/12/magazine/ai-coding-programming-jobs-claude-chatgpt.html</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>to:NB have_read large_language_models_(so_called) thompson.clive programming</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:24d8fbb58eed/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:thompson.clive"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:programming"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2603.10145">
    <title>[2603.10145] Lost in Backpropagation: The LM Head is a Gradient Bottleneck</title>
    <dc:date>2026-03-22T03:30:05+00:00</dc:date>
    <link>https://arxiv.org/abs/2603.10145</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["The last layer of neural language models (LMs) projects output features of dimension D to logits in dimension V, the size of the vocabulary, where usually D≪V. This mismatch is known to raise risks of limited expressivity in neural LMs, creating a so-called softmax bottleneck. We show the softmax bottleneck is not only an expressivity bottleneck but also an optimization bottleneck. Backpropagating V-dimensional gradients through a rank-D linear layer induces unavoidable compression, which alters the training feedback provided to the vast majority of the parameters. We present a theoretical analysis of this phenomenon and measure empirically that 95-99% of the gradient norm is suppressed by the output layer, resulting in vastly suboptimal update directions. We conduct controlled pretraining experiments showing that the gradient bottleneck makes trivial patterns unlearnable, and drastically affects the training dynamics of LLMs. We argue that this inherent flaw contributes to training inefficiencies at scale independently of the model architecture, and raises the need for new LM head designs."

--- I'm not persuaded (from the mere abstract) that there is really any _problem_ here.]]></description>
<dc:subject>to:NB neural_networks optimization large_language_models_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:8999e2ebb681/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neural_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:optimization"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2603.01220">
    <title>[2603.01220] Generative AI &amp; Fictionality: How Novels Power Large Language Models</title>
    <dc:date>2026-03-22T03:21:44+00:00</dc:date>
    <link>https://arxiv.org/abs/2603.01220</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Generative models, like the one in ChatGPT, are powered by their training data. The models are simply next-word predictors, based on patterns learned from vast amounts of pre-existing text. Since the first generation of GPT, it is striking that the most popular datasets have included substantial collections of novels. For the engineers and research scientists who build these models, there is a common belief that the language in fiction is rich enough to cover all manner of social and communicative phenomena, yet the belief has gone mostly unexamined. How does fiction shape the outputs of generative AI? Specifically, what are novels' effects relative to other forms of text, such as newspapers, Reddit, and Wikipedia? Since the 1970s, literature scholars such as Catherine Gallagher and James Phelan have developed robust and insightful accounts of how fiction operates as a form of discourse and language. Through our study of an influential open-source model (BERT), we find that LLMs leverage familiar attributes and affordances of fiction, while also fomenting new qualities and forms of social response. We argue that if contemporary culture is increasingly shaped by generative AI and machine learning, any analysis of today's various modes of cultural production must account for a relatively novel dimension: computational training data."]]></description>
<dc:subject>to:NB so.richard_jean narrative large_language_models_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:da734b816008/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:so.richard_jean"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:narrative"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://tkeskinturk.github.io/blog/publicopin/">
    <title>Visualizing the Dynamics of Opinion Change – Turgut Keskintürk</title>
    <dc:date>2026-03-18T13:26:14+00:00</dc:date>
    <link>https://tkeskinturk.github.io/blog/publicopin/</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- I'm going to follow the references, but I will make two predictions just based on this.
1. It's pretending to solve the age-period-cohort non-identification problem by decreeing that one of those effects just doesn't exist.  (Advantages of theft over honest toil, etc.)
2. Having done so, it's the Kitagawa (-Oaxaca-Blinder) decomposition.  (Which is a cool thing I wish I had appreciated earlier, and no shame in having re-re-re-discovered.)

]]></description>
<dc:subject>have_read visual_display_of_quantitative_information social_measurement via:kjhealy public_opinion surveys cultural_evolution track_down_references</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:bd8814cc4a6b/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:visual_display_of_quantitative_information"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:social_measurement"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:kjhealy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:public_opinion"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:surveys"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:cultural_evolution"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:track_down_references"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://f.briatte.org/r/current-views-on-generative-ai">
    <title>Current views on generative AI</title>
    <dc:date>2026-03-16T20:28:37+00:00</dc:date>
    <link>https://f.briatte.org/r/current-views-on-generative-ai</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- Very, very much in line with my own thinking, for whatever that's worth.]]></description>
<dc:subject>teaching phnk via:phnk large_language_models_(so_called)</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:f97a49848413/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:teaching"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:phnk"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:phnk"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://openreview.net/forum?id=vnX1WHMNmz">
    <title>Large Language Model Reasoning Failures | OpenReview</title>
    <dc:date>2026-03-15T03:30:03+00:00</dc:date>
    <link>https://openreview.net/forum?id=vnX1WHMNmz</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Large Language Models (LLMs) have exhibited remarkable reasoning capabilities, achieving impressive results across a wide range of tasks. Despite these advances, significant reasoning failures persist, occurring even in seemingly simple scenarios. To systematically understand and address these shortcomings, we present the first comprehensive survey dedicated to reasoning failures in LLMs. We introduce a novel categorization framework that distinguishes reasoning into embodied and non-embodied types, with the latter further subdivided into informal (intuitive) and formal (logical) reasoning. In parallel, we classify reasoning failures along a complementary axis into three types: fundamental failures intrinsic to LLM architectures that broadly affect downstream tasks; application-specific limitations that manifest in particular domains; and robustness issues characterized by inconsistent performance across minor variations. For each reasoning failure, we provide a clear definition, analyze existing studies, explore root causes, and present mitigation strategies. By unifying fragmented research efforts, our survey provides a structured perspective on systemic weaknesses in LLM reasoning, offering valuable insights and guiding future research towards building stronger, more reliable, and robust reasoning capabilities. "]]></description>
<dc:subject>to:NB large_language_models_(so_called) via:rvenkat</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:6b28e937b837/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:rvenkat"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.nature.com/articles/s41593-026-02217-z">
    <title>Orbitofrontal cortex drives predictive filtering of sensory responses | Nature Neuroscience</title>
    <dc:date>2026-03-11T14:12:29+00:00</dc:date>
    <link>https://www.nature.com/articles/s41593-026-02217-z</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Habituation is a crucial sensory filtering mechanism whose dysregulation can lead to a continuously intense world in disorders with hypersensitivity. Although habituation is often termed the simplest form of learning, its circuit mechanisms remain elusive. Conventional peripheral explanations fail to fully account for long-term habituation in complex sensory environments, leading to theories proposing top-down regulation. Here we evaluated two competing top-down mechanistic explanations for habituation in mice (growth in predictive filtering and waning of novelty-driven amplification) and identified an unexpected role for the orbitofrontal cortex (OFC) in predictive filtering. After daily sound exposure, neural habituation in the primary auditory cortex (A1) was reversed by inactivating the OFC. Top-down projections from the OFC, but not other frontal areas, carried predictive signals that grew with daily sound experience and suppressed A1 via somatostatin-expressing inhibitory neurons. Thus, prediction signals from the OFC cancel out anticipated stimuli by generating their ‘negative images’ in sensory cortices."]]></description>
<dc:subject>to:NB neuroscience neural_coding_and_decoding prediction via:sifu_tweety</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:b2110fbc9876/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neuroscience"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:neural_coding_and_decoding"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:prediction"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:sifu_tweety"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://www.3-16am.co.uk/articles/a-turd-in-the-punchbowl-initial-thoughts-christoph-shuringa-s-a-social-history-of-analytic-philosophy-or-an-epigone-crashes-the-party">
    <title>A Turd in the Punchbowl: Initial Thoughts On Christoph Shuringa’s A Social History of Analytic Philosophy Or: An epigone Crashes the Party</title>
    <dc:date>2026-03-11T14:10:04+00:00</dc:date>
    <link>https://www.3-16am.co.uk/articles/a-turd-in-the-punchbowl-initial-thoughts-christoph-shuringa-s-a-social-history-of-analytic-philosophy-or-an-epigone-crashes-the-party</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA[--- Too long, and self-indulgently written.  (I know, I know.)  But over-all sound, and cumulatively devastating.]]></description>
<dc:subject>have_read intellectual_history 20th_century_history philosophy analytical_philosophy book_reviews evisceration via:mraginsky</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:4c8b064d6fe1/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:intellectual_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:20th_century_history"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:philosophy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:analytical_philosophy"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:book_reviews"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:evisceration"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:mraginsky"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://arxiv.org/abs/2602.15902">
    <title>[2602.15902] Doc-to-LoRA: Learning to Instantly Internalize Contexts</title>
    <dc:date>2026-03-11T14:08:14+00:00</dc:date>
    <link>https://arxiv.org/abs/2602.15902</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["Long input sequences are central to in-context learning, document understanding, and multi-step reasoning of Large Language Models (LLMs). However, the quadratic attention cost of Transformers makes inference memory-intensive and slow. While context distillation (CD) can transfer information into model parameters, per-prompt distillation is impractical due to training costs and latency. To address these limitations, we propose Doc-to-LoRA (D2L), a lightweight hypernetwork that meta-learns to perform approximate CD within a single forward pass. Given an unseen prompt, D2L generates a LoRA adapter for a target LLM, enabling subsequent queries to be answered without re-consuming the original context, reducing latency and KV-cache memory consumption during inference of the target LLM. On a long-context needle-in-a-haystack task, D2L successfully learns to map contexts into adapters that store the needle information, achieving near-perfect zero-shot accuracy at sequence lengths exceeding the target LLM's native context window by more than 4x. On real-world QA datasets with limited compute, D2L outperforms standard CD while significantly reducing peak memory consumption and update latency. We envision that D2L can facilitate rapid adaptation of LLMs, opening up the possibility of frequent knowledge updates and personalized chat behavior."

--- Prompting is conditioning, and so localizing to a particular part of the state-space, so this is, what, changing the transition probabilities so we automatically stay in that region?  (Obvious answer is to read the paper and _then_ think about this...)]]></description>
<dc:subject>to:NB large_language_models_(so_called) to_read</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:012f6f18ff71/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:large_language_models_(so_called)"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_read"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://link.springer.com/book/10.1007/978-1-4684-8941-5">
    <title>Adaptive Control of Ill-Defined Systems [1984] | Springer Nature Link</title>
    <dc:date>2026-03-11T14:05:04+00:00</dc:date>
    <link>https://link.springer.com/book/10.1007/978-1-4684-8941-5</link>
    <dc:creator>cshalizi</dc:creator><description><![CDATA["There are some types of complex systems that are built like clockwork, with well-defined parts that interact in well-defined ways, so that the action of the whole can be precisely analyzed and anticipated with accuracy and precision. Some systems are not themselves so well-defined, but they can be modeled in ways that are like trained pilots in well-built planes, or electrolyte balance in healthy humans. But there are many systems for which that is not true; and among them are many whose understanding and control we would value. For example, the model for the trained pilot above fails exactly where the pilot is being most human; that is, where he is exercising the highest levels of judgment, or where he is learning and adapting to new conditions. Again, sometimes the kinds of complexity do not lead to easily analyzable models at all; here we might include most economic systems, in all forms of societies. There are several factors that seem to contribute to systems being hard to model, understand, or control. The human participants may act in ways that are so variable or so rich or so interactive that the only adequate model of the system would be the entire system itself, so to speak. This is probably the case in true long term systems involving people learning and growing up in a changing society."]]></description>
<dc:subject>to:NB books:noted control_theory_and_control_engineering complexity selfridge.oliver arbib.michael_a. via:mraginsky downloaded</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:3bbb4c3a975a/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to:NB"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:books:noted"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:control_theory_and_control_engineering"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:complexity"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:selfridge.oliver"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:arbib.michael_a."/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:via:mraginsky"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:downloaded"/>
</rdf:Bag></taxo:topics>
</item>
<item rdf:about="https://asteriskmag.com/issues/08/the-myth-of-the-loneliness-epidemic">
    <title>The Myth of the Loneliness Epidemic—Asterisk</title>
    <dc:date>2026-03-11T14:03:51+00:00</dc:date>
    <link>https://asteriskmag.com/issues/08/the-myth-of-the-loneliness-epidemic</link>
    <dc:creator>cshalizi</dc:creator><dc:subject>fischer.claude_s. loneliness social_networks social_measurement have_read to_teach:baby-nets</dc:subject>
<dc:source>https://pinboard.in/</dc:source>
<dc:identifier>https://pinboard.in/u:cshalizi/b:139206771372/</dc:identifier>
<taxo:topics><rdf:Bag>	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:fischer.claude_s."/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:loneliness"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:social_networks"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:social_measurement"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:have_read"/>
	<rdf:li rdf:resource="https://pinboard.in/u:cshalizi/t:to_teach:baby-nets"/>
</rdf:Bag></taxo:topics>
</item>
</rdf:RDF>