<?xml version="1.0" encoding="UTF-8" ?>
  <rss version="2.0">
    <channel>
      <title>Ai2 Blog</title>
      <link>https://allenai.org/blog</link>
      <description>Latest updates</description>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/evaluating-scientific-discovery-agents</guid>
        <title>Evaluating agents for scientific discovery</title>
        <link>https://allenai.org/blog/evaluating-scientific-discovery-agents</link>
        <pubDate>2026-04-13T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Two benchmarks developed at Ai2 – ScienceWorld and DiscoveryWorld – reveal that even incredibly strong AI science agents struggle with problems human scientists solve routinely.</div>
              <img src="https://www.datocms-assets.com/64837/1776100925-1732561444-adobestock_695348945.avif?w=200" width="200" alt="Evaluating agents for scientific discovery" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/wilddet3d</guid>
        <title>Introducing WildDet3D: Open-world 3D detection from a single image</title>
        <link>https://allenai.org/blog/wilddet3d</link>
        <pubDate>2026-04-07T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>WildDet3D is an open model that predicts 3D bounding boxes from a single image. It generalizes across cameras and object categories, and folds in depth signals when available—alongside a new dataset of verified 3D annotations.</div>
              <img src="https://www.datocms-assets.com/64837/1775568828-wilddet3d.jpg?w=200" width="200" alt="Introducing WildDet3D: Open-world 3D detection from a single image" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmoweb</guid>
        <title>MolmoWeb: An open agent for automating web tasks</title>
        <link>https://allenai.org/blog/molmoweb</link>
        <pubDate>2026-03-24T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Introducing MolmoWeb, an open visual web agent that navigates and completes tasks in a browser using screenshots alone, along with MolmoWebMix, the largest public dataset for training web agents.</div>
              <img src="https://www.datocms-assets.com/64837/1774295860-unnamed-2026-03-23t155727-900.png?w=200" width="200" alt="MolmoWeb: An open agent for automating web tasks" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/gtc2026</guid>
        <title>Highlights from Ai2 at NVIDIA GTC 2026</title>
        <link>https://allenai.org/blog/gtc2026</link>
        <pubDate>2026-03-23T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>A recap of Ai2's week at NVIDIA GTC 2026, covering panels on open models, live demos of Olmo Hybrid and Asta AutoDiscovery, and conversations on coding agents, hybrid architectures, and robotics.</div>
              <img src="https://www.datocms-assets.com/64837/1772840904-gtc-featuredimage-final.jpg?w=200" width="200" alt="Highlights from Ai2 at NVIDIA GTC 2026" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmopoint</guid>
        <title>MolmoPoint: Better pointing architecture for vision-language models</title>
        <link>https://allenai.org/blog/molmopoint</link>
        <pubDate>2026-03-18T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>MolmoPoint is a new vision-language model architecture that replaces text-based coordinate outputs with a more natural, token-based pointing mechanism that directly selects regions from visual features.</div>
              <img src="https://www.datocms-assets.com/64837/1773795311-molmopoint.jpg?w=200" width="200" alt="MolmoPoint: Better pointing architecture for vision-language models" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmobot-robot-manipulation</guid>
        <title>MolmoBot: Training robot manipulation entirely in simulation</title>
        <link>https://allenai.org/blog/molmobot-robot-manipulation</link>
        <pubDate>2026-03-11T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>MolmoBot is an open robotic manipulation model suite trained entirely in simulation—demonstrating zero-shot transfer to real-world robots without any real-world data collection or fine-tuning. </div>
              <img src="https://www.datocms-assets.com/64837/1773238621-molmobotheaderimage-1.jpg?w=200" width="200" alt="MolmoBot: Training robot manipulation entirely in simulation" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmobot</guid>
        <title>Ai2 introduces open, simulation-first stack for physical AI, achieving zero-shot transfer to real robots</title>
        <link>https://allenai.org/blog/molmobot</link>
        <pubDate>2026-03-11T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Introducing MolmoBot and MolmoSpaces, an open foundation for training real-world robots to advance science.</div>
              <img src="https://www.datocms-assets.com/64837/1773238621-molmobotheaderimage-1.jpg?w=200" width="200" alt="Ai2 introduces open, simulation-first stack for physical AI, achieving zero-shot transfer to real robots" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/olmohybrid</guid>
        <title>Introducing Olmo Hybrid: Combining transformers and linear RNNs for superior scaling</title>
        <link>https://allenai.org/blog/olmohybrid</link>
        <pubDate>2026-03-05T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Olmo Hybrid is a fully open 7B language model that combines transformer attention with linear RNN layers to achieve greater expressivity and significantly improved data and compute efficiency compared to pure transformer models.</div>
              <img src="https://www.datocms-assets.com/64837/1772724968-ai2-olmo-3-blog-post-graphic-development-v1-1.png?w=200" width="200" alt="Introducing Olmo Hybrid: Combining transformers and linear RNNs for superior scaling" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/asta-interaction-dataset</guid>
        <title>How do researchers actually use AI-powered science tools? Lessons from 250,000+ queries</title>
        <link>https://allenai.org/blog/asta-interaction-dataset</link>
        <pubDate>2026-02-27T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>The Asta Interaction Dataset (AID) contains real researcher queries revealing how scientists actually use AI-powered research tools, and where their habits diverge from what tool builders expect.</div>
              <img src="https://www.datocms-assets.com/64837/1772212556-ai2-asta-blog-post-graphic-development-v3-1.png?w=200" width="200" alt="How do researchers actually use AI-powered science tools? Lessons from 250,000+ queries" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/prescience</guid>
        <title>PreScience: Forecasting the future of science end-to-end</title>
        <link>https://allenai.org/blog/prescience</link>
        <pubDate>2026-02-25T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>PreScience is a new benchmark that evaluates whether AI can forecast how science unfolds end-to-end, from team formation through eventual impact.</div>
              <img src="https://www.datocms-assets.com/64837/1771986512-frame-1-7.png?w=200" width="200" alt="PreScience: Forecasting the future of science end-to-end" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/olmix</guid>
        <title>Olmix: A framework for data mixing throughout LM development</title>
        <link>https://allenai.org/blog/olmix</link>
        <pubDate>2026-02-13T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Olmix is a framework for language model data mixing that provides empirically grounded defaults and efficient reuse techniques.</div>
              <img src="https://www.datocms-assets.com/64837/1770924963-unnamed-2026-02-12t142646-438.png?w=200" width="200" alt="Olmix: A framework for data mixing throughout LM development" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/autodiscovery</guid>
        <title>Introducing AutoDiscovery: Automated scientific discovery, now in AstaLabs</title>
        <link>https://allenai.org/blog/autodiscovery</link>
        <pubDate>2026-02-12T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>AutoDiscovery explores data autonomously, generating its own hypotheses to surface surprising findings that researchers might never have thought to look for.</div>
              <img src="https://www.datocms-assets.com/64837/1770856163-autodiscovery-thumbnail.jpg?w=200" width="200" alt="Introducing AutoDiscovery: Automated scientific discovery, now in AstaLabs" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/autodiscovery-impact</guid>
        <title>How researchers are using AutoDiscovery</title>
        <link>https://allenai.org/blog/autodiscovery-impact</link>
        <pubDate>2026-02-12T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Learn about how researchers are using AutoDiscovery, our scientific discovery tool, to make transformative impact across their fields.</div>
              <img src="https://www.datocms-assets.com/64837/1770828892-autodiscovery_bg_desktop.jpg?w=200" width="200" alt="How researchers are using AutoDiscovery" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmospaces</guid>
        <title>MolmoSpaces, an open ecosystem for embodied AI</title>
        <link>https://allenai.org/blog/molmospaces</link>
        <pubDate>2026-02-11T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>MolmoSpaces is our new open platform for embodied AI that provides physics-grounded scenes, objects, and grasp annotations to train and evaluate generalist robotic policies.</div>
              <img src="https://www.datocms-assets.com/64837/1770871363-3-quarter-top-down-5x5-matrix-copy.png?w=200" width="200" alt="MolmoSpaces, an open ecosystem for embodied AI" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/how2everything</guid>
        <title>How2Everything: Mining the web to evaluate and improve LLMs on real-world procedures</title>
        <link>https://allenai.org/blog/how2everything</link>
        <pubDate>2026-02-10T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>How2Everything is an open framework for evaluating and improving how well LLMs generate step-by-step procedures.</div>
              <img src="https://www.datocms-assets.com/64837/1770685440-unnamed-2026-02-09t200346-687.png?w=200" width="200" alt="How2Everything: Mining the web to evaluate and improve LLMs on real-world procedures" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/nature-openscilm</guid>
        <title>Now in Nature: Synthesizing scientific literature with retrieval-augmented LMs</title>
        <link>https://allenai.org/blog/nature-openscilm</link>
        <pubDate>2026-02-04T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>We're excited to share that our paper “Synthesizing scientific literature with retrieval-augmented language models” has been accepted to Nature.
</div>
              <img src="https://www.datocms-assets.com/64837/1770163834-naturepaperthumbnail.jpg?w=200" width="200" alt="Now in Nature: Synthesizing scientific literature with retrieval-augmented LMs" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/theorizer</guid>
        <title>Theorizer: Turning thousands of papers into scientific laws</title>
        <link>https://allenai.org/blog/theorizer</link>
        <pubDate>2026-01-28T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Theorizer is a system that automatically reads scientific literature and synthesizes structured, testable theories.</div>
              <img src="https://www.datocms-assets.com/64837/1769101042-ai2-theorizer-graphic-development-v2-1.png?w=200" width="200" alt="Theorizer: Turning thousands of papers into scientific laws" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/open-coding-agents</guid>
        <title>Open Coding Agents: Fast, accessible coding agents that adapt to any repo</title>
        <link>https://allenai.org/blog/open-coding-agents</link>
        <pubDate>2026-01-27T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>SERA is the first in our family of Open Coding Agents, achieving state-of-the-art performance at low cost.</div>
              <img src="https://www.datocms-assets.com/64837/1769473127-unnamed-46.jpg?w=200" width="200" alt="Open Coding Agents: Fast, accessible coding agents that adapt to any repo" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/hiro-ace</guid>
        <title>HiRO-ACE: An accessible solution for kilometer-scale climate simulation</title>
        <link>https://allenai.org/blog/hiro-ace</link>
        <pubDate>2026-01-21T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>HiRO-ACE is an AI framework that makes kilometer-scale climate simulation dramatically more accessible, generating decades of precipitation data for any region of the globe.</div>
              <img src="https://www.datocms-assets.com/64837/1768936808-screenshot-2026-01-16-at-11-58-13-2.png?w=200" width="200" alt="HiRO-ACE: An accessible solution for kilometer-scale climate simulation" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/bolmo</guid>
        <title>Introducing Bolmo: Byteifying the next generation of language models</title>
        <link>https://allenai.org/blog/bolmo</link>
        <pubDate>2025-12-15T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Bolmo is new a byte-level family built by adapting Olmo 3 into a fast, flexible byte-based model with a short extra training run.</div>
              <img src="https://www.datocms-assets.com/64837/1765565350-ai2-bolmo-graphic-development-v1.png?w=200" width="200" alt="Introducing Bolmo: Byteifying the next generation of language models" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/neurodiscoverybench</guid>
        <title>NeuroDiscoveryBench: Benchmarking AI for neuroscience data analysis</title>
        <link>https://allenai.org/blog/neurodiscoverybench</link>
        <pubDate>2025-12-12T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>NeuroDiscoveryBench is a benchmark to test how well AI systems can answer questions grounded in real-world neuroscience data.</div>
              <img src="https://www.datocms-assets.com/64837/1765494808-ai2-neurodiscoverybench-graphic-development-v1-3.png?w=200" width="200" alt="NeuroDiscoveryBench: Benchmarking AI for neuroscience data analysis" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/molmo2</guid>
        <title>Molmo 2: State-of-the-art video understanding, pointing, and tracking</title>
        <link>https://allenai.org/blog/molmo2</link>
        <pubDate>2025-12-11T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Molmo 2, a new suite of state-of-the-art vision-language models with open weights, training data, and training code, can analyze videos and multiple images at once.</div>
              <img src="https://www.datocms-assets.com/64837/1765856912-molmoheroimage-1.jpg?w=200" width="200" alt="Molmo 2: State-of-the-art video understanding, pointing, and tracking" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/olmo3</guid>
        <title>Olmo 3: Charting a path through the model flow to lead open-source AI </title>
        <link>https://allenai.org/blog/olmo3</link>
        <pubDate>2025-11-20T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Our new flagship Olmo 3 model family empowers the open source community with not only state-of-the-art open models, but the entire model flow and full traceability back to training data.</div>
              <img src="https://www.datocms-assets.com/64837/1763593598-olmoblogthumbnail.jpg?w=200" width="200" alt="Olmo 3: Charting a path through the model flow to lead open-source AI " />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/dr-tulu</guid>
        <title>DR Tulu: An open, end-to-end training recipe for long-form deep research</title>
        <link>https://allenai.org/blog/dr-tulu</link>
        <pubDate>2025-11-18T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>We introduce Deep Research Tulu (DR Tulu), an open post-training recipe and framework for long-form deep research agents.</div>
              <img src="https://www.datocms-assets.com/64837/1763149980-ai2-dr-tulu-graphic-development-v2-2.png?w=200" width="200" alt="DR Tulu: An open, end-to-end training recipe for long-form deep research" />
            </div>
          ]]>
        </description>
      </item>
      
      <item>
        <guid isPermaLink="false">https://allenai.org/blog/olmoearth</guid>
        <title>Introducing OlmoEarth Platform: Powerful open infrastructure for planetary insights</title>
        <link>https://allenai.org/blog/olmoearth</link>
        <pubDate>2025-11-04T00:00:00-08:00</pubDate>
        <description>
          <![CDATA[
            <div style="display: flex; gap: 12px">
              <div>Introducing OlmoEarth Platform, an open, scalable, end-to-end system that turns multi-sensor Earth data into constantly-updating, decision-ready insights.</div>
              <img src="https://www.datocms-assets.com/64837/1762205332-unnamed-2025-11-03t162841-112.png?w=200" width="200" alt="Introducing OlmoEarth Platform: Powerful open infrastructure for planetary insights" />
            </div>
          ]]>
        </description>
      </item>
      
    </channel>
  </rss>