<!DOCTYPE html><html lang="en" xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" style="font-size:16px;"><head></head><head><meta charset="utf-8"/><!--[if !mso]><!--><meta http-equiv="X-UA-Compatible" content="IE=edge"/><!--<![endif]--><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="x-apple-disable-message-reformatting"/><meta name="format-detection" content="telephone=no,address=no,email=no,date=no,url=no"/><meta name="color-scheme" content="light"/><meta name="supported-color-schemes" content="light"/><title>Beyond Semantics: The Unreasonable Effectiveness of Reasonless Intermediate Tokens</title><!--[if mso]><xml><o:OfficeDocumentSettings><o:AllowPNG/><o:PixelsPerInch>96</o:PixelsPerInch></o:OfficeDocumentSettings></xml><![endif]--><style> :root { color-scheme: light; supported-color-schemes: light; } body { margin: 0; padding: 0; min-width: 100%!important; -ms-text-size-adjust: 100% !important; -webkit-transform: scale(1) !important; -webkit-text-size-adjust: 100% !important; -webkit-font-smoothing: antialiased !important; } .body { word-wrap: normal; word-spacing:normal; } table.mso { width: 100%; border-collapse: collapse; padding: 0; table-layout: fixed; } img { border: 0; outline: none; } table { mso-table-lspace: 0px; mso-table-rspace: 0px; } td, a, span { mso-line-height-rule: exactly; } #root [x-apple-data-detectors=true], a[x-apple-data-detectors=true], #MessageViewBody a { color: inherit !important; text-decoration: inherit !important; font-size: inherit !important; font-family: inherit !important; font-weight: inherit !important; line-height: inherit !important; } span.MsoHyperlink { color: inherit !important; mso-style-priority: 99 !important; } span.MsoHyperlinkFollowed { color: inherit !important; mso-style-priority: 99 !important; } .a { background-color:#dedede; } .b { background-color:#2a2a2a; } .c { background-color:#ffffff; } .d { background-color:#fff0c8; } .d2 { background-color:#FFFFFF; } .d3 { background-color:#FFFFFF; } h1 a { text-decoration:none;color:#2C81E5;font-style:italic; } h2 a { text-decoration:none;color:#2C81E5;font-style:italic; } h3 a { text-decoration:none;color:#2C81E5;font-style:italic; } h4 a { text-decoration:none;color:#2C81E5;font-style:italic; } h5 a { text-decoration:none;color:#2C81E5;font-style:italic; } h6 a { text-decoration:none;color:#2C81E5;font-style:italic; } h1, h1 a, h2, h2 a, h3, h3 a, h4, h4 a, h5, h5 a, h6, h6 a, ul, li, ol, p, p a { margin: 0;padding: 0; } h1 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:700;font-size:28px;color:#2A2A2A;line-height:42px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } h2 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:700;font-size:24px;color:#2A2A2A;line-height:36px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } h3 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:400;font-size:20px;color:#2A2A2A;line-height:30px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } h4 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:400;font-size:18px;color:#2A2A2A;line-height:27px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } h5 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:400;font-size:16px;color:#2A2A2A;line-height:24px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } h6 { font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif;font-weight:400;font-size:14px;color:#2A2A2A;line-height:21px;padding-bottom:4px;padding-top:16px;mso-margin-top-alt:16px;mso-margin-bottom-alt:4px } p { font-family:'Georgia','Times New Roman',serif;font-weight:400;color:#2D2D2D;font-size:16px;line-height:24px;padding-bottom:8px;padding-top:8px;mso-margin-top-alt:8px;mso-margin-bottom-alt:8px; } p a, .e a, ul a, li a, .h a, .h2 a, .h3 a { word-break:break-word;color:#2C81E5 !important;text-decoration:none;font-style:italic; } p a span, .e a span, ul a span, li a span { color: inherit } p .bold { font-weight:bold;color:#2D2D2D; } p span[style*="font-size"] { line-height: 1.6; } .f p { font-size:12px;line-height:15px;color:#2D2D2D;padding:0; } .f p a { color:#2D2D2D !important; } .g p { font-family:'Helvetica',Arial,sans-serif;font-size:14px;line-height:20px;font-weight:normal;margin:0; } .g p a { text-decoration: underline; } .i p { font-family:'Helvetica',Arial,sans-serif;line-height:23px;font-size:15px;color:#2D2D2D; } .i p a { color:#2D2D2D !important; } .i2 p { font-family:'Helvetica',Arial,sans-serif;line-height:23px;font-size:15px;color:#2D2D2D; } .i2 p a { color:#2D2D2D !important; } .i3 p { font-family:'Helvetica',Arial,sans-serif;line-height:43px;font-size:24px;color:#2D2D2D; } .i3 p a { color:#2D2D2D !important; } .h p a { color:#595959 !important; } .h2 p a { color:#595959 !important; } .h3 p a { color:#595959 !important; } .f p a, .i p a, .i2 p a, .i3 p a, .h p a, .h2 p a, .h3 p a { text-decoration:underline; } .j { border-top:3px solid #ffeb2d; } .k p { padding-left:15px;padding-bottom:0px;padding-top:6px;mso-margin-top-alt:6px;mso-margin-bottom-alt:0px;mso-margin-left-alt:15px; } .o { background-color:#FFFFFF;border:1px solid #F1F1F1;border-radius:5px; } .o p { font-family:'Helvetica',Arial,sans-serif;padding:0px;margin:0px; } .l p, .l p a { font-size:14px;line-height:20px;font-weight: bold;color:#2D2D2D;padding-bottom:6px;mso-margin-bottom-alt:6px;text-decoration:none; } .m p, .m p a { font-size:13px;line-height:18px;font-weight:400;color:#2D2D2D;padding-bottom:6px;mso-margin-bottom-alt:6px;text-decoration:none; } .n p, .n p a { font-size:12px;line-height:17px;font-weight:400;color:#2D2D2D;padding-bottom:6px;mso-margin-bottom-alt:6px;text-decoration:none; } .p { background-color:#FFFFFF;max-width:520px;border:1px solid #E1E8ED;border:1px solid rgba(80, 80, 80, 0.3);border-radius:5px; } .q { font-size:16px;font-family:Helvetica,Roboto,Calibri,sans-serif !important;border:1px solid #e1e8ed;border:1px solid rgba(80, 80, 80, 0.3);border-radius:10px;background-color:#FFFFFF; } .q p { font-size:16px;font-family:system-ui,Helvetica,Roboto,Calibri,sans-serif !important;color:#222222;padding:4px 0; } .r { border:1px solid #E1E8ED !important;border-radius:5px; } .s p { font-size: 14px; line-height: 17px; font-weight: 400; color: #697882; text-decoration: none; } .t p { font-family:'Helvetica',Arial,sans-serif;font-size:12px;line-height:18px;font-weight:400;color:#000000;font-style:italic;padding:4px 0px 0px; } .v { border-radius:10px;border:solid 0px #DFD150;background-color:#2C81E5;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;color:#FFFFFF; } .v a { text-decoration:none;display:block;color:#FFFFFF; } .w p { font-size:12px;line-height:15px;font-weight:400;color:#FFFFFF; } .w p a { text-decoration: underline !important;color:#FFFFFF !important; } ul { font-family:'Helvetica',Arial,sans-serif;margin:0px 0px 0px 25px !important;padding:0px !important;color:#2D2D2D;line-height:24px;list-style:disc;font-size:16px; } ul > li { font-family:'Helvetica',Arial,sans-serif;margin:10px 0px 0px 0px !important;padding: 0px 0px 0px 0px !important; color: #2D2D2D; list-style:disc; } ol { font-family:'Helvetica',Arial,sans-serif;margin: 0px 0px 0px 25px !important;padding:0px !important;color:#2D2D2D;line-height:24px;list-style:decimal;font-size:16px; } ol > li { font-family:'Helvetica',Arial,sans-serif;margin:10px 0px 0px 0px !important;padding: 0px 0px 0px 0px !important; color: #2D2D2D; list-style:decimal; } .e h3, .e p, .e span { padding-bottom:0px;padding-top:0px;mso-margin-top-alt:0px;mso-margin-bottom-alt:0px; } .e span, .e li { font-family:'Helvetica',Arial,sans-serif;font-size:16px;color:#2D2D2D;line-height:24px; } .rec { font-family: ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji" !important; } .rec__button:hover { background-color: #f9fafb !important; } .copyright a {color: inherit !important; text-decoration: none !important; font-size: inherit !important; font-family: inherit !important; font-weight: inherit !important; line-height: inherit !important;} .txt_social p { padding: 0; word-break: break-all; } .table, .table-c, .table-h { border: 1px solid #C0C0C0; } .table-c { padding:5px; background-color:#FFFFFF; } .table-c p { color: #2D2D2D; font-family:'Helvetica',Arial,sans-serif !important;overflow-wrap: break-word; } .table-h { padding:5px; background-color:#F1F1F1; } .table-h p { color: #2A2A2A; font-family:'Trebuchet MS','Lucida Grande',Tahoma,sans-serif !important;overflow-wrap: break-word; } @media only screen and (max-width:667px) { .aa { width: 100% !important; } .bb img { width: 100% !important; height: auto !important; max-width: none !important; } .cc { padding: 0px 8px !important; } .ee { padding-top:10px !important;padding-bottom:10px !important; } .ff ul, .ff ol { margin: 0px 0px 0px 10px !important;padding: 0px !important; } .ff li { margin:10px 0px 0px 10px !important; } .r {height:140px !important;} .s p { font-size:13px !important;line-height:15px !important; } .mob-hide {display:none !important;} .mob-stack {display:block !important;width:100% !important;} .mob-w-full {width:100% !important;} .mob-block {display:block !important;} .embed-img {padding:0px 0px 12px 0px !important;} .socialShare {padding-top:15px !important;} .rec { padding-left:15px!important;padding-right:15px!important; } .bodyWrapper { padding:7px 4px 7px 4px !important; } .social-mobile {float:left !important;margin-top:10px !important;} } @media screen and (max-width: 480px) { u + .a .gg { width: 100% !important; width: 100vw !important; } .tok-heart { padding-top:75% !important; } .tok-play { padding-top: 250px !important; } } @media screen and (max-width: 320px) { .tok-heart { padding-top:65% !important; } } .u { border: 1px solid #CACACA !important; border-radius: 2px !important; background-color: #ffffff !important; padding: 0px 13px 0px 13px !important; font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif !important;font-size: 12px !important; color: #767676 !important; } .u a { text-decoration: none; display: block !important; color: #767676 !important; margin: 0px !important; } .u span, .u img { color: #767676 !important;margin:0px !important; max-height:32px !important;background-color:#ffffff !important; } </style><!--[if mso]><style type="text/css"> sup { font-size: 100% !important;vertical-align: .5em !important;mso-text-raise: -1.5% !important;line-height: 0 !important; } ul { margin-left:0px !important; margin-right:10px !important; margin-top:20px !important; margin-bottom:20px !important; } ul li { margin-left: 0px !important; mso-special-format: decimal; } ol { margin-left:0px !important; margin-right:10px !important; margin-top:20px !important; margin-bottom:20px !important; } ol li { margin-left: 0px !important; mso-special-format: decimal; } li.listItem { margin-left:15px !important; margin-top:0px !important; } .paddingDesktop { padding: 10px 0 !important; } .edm_outlooklist { margin-left: -20px !important; } .embedImage { display:none !important; } </style><![endif]--><style> @font-face { font-family: 'Open Sans'; font-style: normal; font-weight: 700; font-display: swap; src: url('https://fonts.gstatic.com/s/opensans/v40/memSYaGs126MiZpBA-UvWbX2vVnXBbObj2OVZyOOSr4dVJWUgsg-1x4gaVIUwaEQbjA.woff2') format('woff2'); } @font-face { font-family: 'Open Sans'; font-style: italic; font-weight: 700; font-display: swap; src: url('https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@1,700&display=swap') format('woff2'); } </style></head><body class="a" style="margin:0px auto;padding:0px;word-wrap:normal;word-spacing:normal;background-color:#dedede;"><div role="article" aria-roledescription="email" aria-label="email_name" lang="en" style="font-size:1rem"><div style="display:none;max-height:0px;overflow:hidden;"> Plus more about Reinforcement Learning Finetunes Small Subnetworks in Large Language Models and MMaDA: Multimodal Large Diffusion Language Models  ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ ‌ </div><table role="none" width="100%" border="0" cellspacing="0" align="center" cellpadding="0" class="gg"><tr><td align="center" valign="top"><table role="none" width="670" border="0" cellspacing="0" cellpadding="0" class="aa" style="width:670px;table-layout:fixed;"><tr><td class="bodyWrapper" align="center" valign="top" style="padding:7px 7px 7px 7px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td align="center" valign="top" style="border-width:0px 0px 0px 0px;border-style: solid; border-color: #2a2a2a;border-radius:10px 10px 0px 0px;background-color:#ffffff;" class="c"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr id="header"><td style="padding:28px 28px 0px 28px;"><div style="padding-top:0px;padding-right:0px;padding-bottom:20px;padding-left:0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td class="f" align="right" valign="top"><p> May 27, 2025 | <a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.c6q0w4g5sodbtO4I1B_pxSdB5RCIH6yy1Fm1CYma3EwnGDuIqhrRgmiT4T0AaPbsZRHrZyhPjpLZkN12tajbBFojOUmnLe2FgyZ4szuA7Un8Ghw5YH3jBui9uDwMaUkUoA79p6kfJC1D-HabmYhh4qLVA23ebvdb5HVYAndzpkN2H5_RI09CkPAGCVaNJ1XTjnjbamebguGDz9Djuykz7h1A8qvOIGoD7gIX-Gpi7GCPn4jJt7rWWR0SrD5ggRUKY_0c6xgWGo186_YJ0Ehtewo-uLsqBOMY1ybpyYLYkcX6JLj8ZbOiZfq44LC6fm2H1gwH2f8LWzPKQN14nV4DdZBS4PvnhOftiuFIMEoO7cmeCnL0bq6N3QK72HlYGjE3QjRjxhnEAykzLQwQW58-H3aBkvsYdz7-oOM4Kfck3CPXIIuj3YbeAfAKqlhNo71ydrmeQpT678Ug4T0bKbZZeHeTLX8yj7Ux-fRpppvNcxhQYiBBtKtyPx0k_6mFvI7ppTayuBE377JHEDhNl0AluKv9yplLdMEBd-5xFqTX4CNJwIvh14X1cQvSnNjQaDwdPmNlqFC8D9oBje1h-7Uc_NPk6eU4KAtjnN7l7gUC5h-RSwMhsnicRq3pkp7NsVZuERSqRrHUtZ7k67mFwcYWPKxj-pdr32wTFTouW51hQLvbhjNVs_zBS5aLNzRpJ51lxoO4xqXua1irVVpkVn3TFNbV5gO5dHDbLpyxC7uJvzc/4gu/i3tAb-AfRv2ETmob543EHQ/h0/h001.XET0qbFmyGkP31MA-OahJ9BXFkWHggtPZivALsQrosU"><span class="translation_missing" title="translation missing: en.templates.posts.email.v3.header.read_online">Read Online</span></a></p></td></tr><tr><td class="dd" align="center" valign="top" style="padding:15px 0;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td align="center" valign="top"><h1 style="text-align:left;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;font-weight:Bold;font-size:32px;color:#2A2A2A;padding:2px 0;line-height:38px;"> Beyond Semantics: The Unreasonable Effectiveness of Reasonless Intermediate Tokens </h1><p style="text-align:left;font-family:'Helvetica',Arial,sans-serif;font-weight:normal;font-size:20px;color:#3E3E3E;padding:5px 0;line-height:24px;"> Plus more about Reinforcement Learning Finetunes Small Subnetworks in Large Language Models and MMaDA: Multimodal Large Diffusion Language Models </p></td></tr></table></td></tr><tr><td style="height:0px;width:0px;"><div style="height:1px;" data-open-tracking="true"> <img src="https://elink4f7.mail.bycloud.ai/ss/o/u001.3wmUuY8gEWd4_869a_eXcg/4gu/i3tAb-AfRv2ETmob543EHQ/ho.gif" alt="" width="1" height="1" border="0" style="height:1px !important;width:1px !important;border-width:0 !important;margin-top:0 !important;margin-bottom:0 !important;margin-right:0 !important;margin-left:0 !important;padding-top:0 !important;padding-bottom:0 !important;padding-right:0 !important;padding-left:0 !important;"/> </div></td></tr></table></div></td></tr><tr id="content-blocks"><td class="email-card-body" align="center" valign="top" style="padding-bottom:28px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td id="nov-18-th-nov-24-th-33-latest-ai-re" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h6 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:87.5%;"><i>May 19th ~ May 26th</i><br><i>#57 Latest AI Research Explained Simply</i></h6></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="#222222" style="background-color:#222222;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"></p></td></tr></table></td></tr></table></td></tr><tr><td id="industry-news-in-1-line" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;">🗞️ Industry News in 1 Line</h2></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="1" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="background-color:#e0e0e0;"><span style="color:rgb(255, 58, 58);font-size:0.6rem;">♥ 20k</span></span> Anthropic has released <a class="link" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j2yo571wOI2ayM48Skcu9lJJ2zKuwcru1RJhnQ7cr6X5MnGoHtYx0OVb_T-ogPC0kGGKwreRrJk3Md3wPw9eJCzXMdyPkv91LCDazg7_B_Hbv4vLxAWnuhPsS7FW7YXxLI9Zi3f8OXjhJYYEvB2wSjgw/4gu/i3tAb-AfRv2ETmob543EHQ/h1/h001.inehowBP1ODy1eN78BTGdMxV0hC-KdRZB4nnDvvjvP4" target="_blank" rel="noopener noreferrer nofollow"><span>Claude Opus 4 and Claude Sonnet 4</span></a>, the next generation of its AI models featuring hybrid architecture with instant and extended reasoning modes. Claude Opus 4 is Anthropic’s biggest model and leads in software engineering benchmarks, while Claude Sonnet 4 provides enhanced coding and reasoning capabilities compared to its predecessor. </p><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:480px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/0aebc80e-b4bd-4cc5-aa7c-5bfc42dbce9e/image.png?t=1748368161" alt="" height="auto" width="480" style="display:block;width:100%;" border="0"/></td></tr></table></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="background-color:#e0e0e0;"><span style="color:rgb(255, 58, 58);font-size:0.6rem;">♥ 5.3k</span></span> Mistral has launched <a class="link" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.c6q0w4g5sodbtO4I1B_pxfFOMT61RFABBNLSEd2-2FEBgyttG0Kt_aZOCvQUdhEFBLaNOBvcNUPAyDaA-uk6FF9P-ZJBXhTm34gY9oAwVcA2hPgdTIoNoNUJJW8NVYyvE--23GnA7RlaguuoJPAqDbo90qeBSWDSvByXE9_KgT4/4gu/i3tAb-AfRv2ETmob543EHQ/h2/h001.fBL3IIffIlMdilDAk9Z2q39sbP4dM6FmQhYy67cSg88" target="_blank" rel="noopener noreferrer nofollow"><span>Document AI</span></a>, which is an enterprise-grade document processing solution that uses advanced OCR technology and achieves 99%+ accuracy across multiple languages. The platform processes up to 2,000 pages per minute on a single GPU and offers structured data extraction capabilities for complex documents including tables, forms, and handwritten content. </p><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/2ab68462-4be6-4361-9e87-4f3e46f2b24b/image.png?t=1748368382" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></li></ol></div></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="#222222" style="background-color:#222222;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"></p></td></tr></table></td></tr></table></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="transparent" style="background-color:transparent;border-color:#2C81E5;border-style:solid;border-width:5px;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;"><span style=""><a class="link" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j29rweNtSHTMPdCBNsuu15hStNWn7Cp-_z7KAfyQIe6He4nQT_96lcVXX1db5XGEmA2fwDiqKQU_6H5ubd8zR_R7fdhqA0y5gF6aRHwtvIqQk/4gu/i3tAb-AfRv2ETmob543EHQ/h3/h001.tPkZq7IOi-qn3qCRAcxx7jXk_HUPP46byiuUXhEN6bM" target="_blank" rel="noopener noreferrer nofollow"><span>findmypapers.ai</span></a></span><span style=""> got a tiny new feature </span>👀</h2></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j29rweNtSHTMPdCBNsuu15hStNWn7Cp-_z7KAfyQIe6HeOFOoVeJhHHgfKKiaK6i5-Ic350jZ21CxuRI2MkUHelTFWfhrsXZS1ZYo28G1EYDp/4gu/i3tAb-AfRv2ETmob543EHQ/h4/h001.19yu_qhr1dJDTVtLz11-Rgv7kjVVZFX1cKAdrLCPAfg" rel="noopener noreferrer nofollow" style="text-decoration:none;" target="_blank"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/bfba51a0-ee8e-42d5-8b45-51ddfd5ebe33/image.png?t=1748369227" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></a></td></tr><tr><td align="center" valign="top" class="t" style="width:600px; padding: 4px 0px 4px 0px;"><p><span style="">papers preview, go give it a spin!</span></p></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="color:rgb(34, 34, 34);font-family:Georgia, "Times New Roman", serif;font-size:16px;">While we are improving the retrieval quality for finding AI research papers, we still want to make the search experience a bit less boring.</span></p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="color:rgb(34, 34, 34);font-family:Georgia, "Times New Roman", serif;font-size:16px;">So now, we are able to display the papers that are being searched on! (truly a new tiny feature lol)</span></p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j29rweNtSHTMPdCBNsuu15hStNWn7Cp-_z7KAfyQIe6Heu4XfgVZCIYSF5UjwZDASpyWFcfhFkP2TFOOwHXWr6iDTPPq42Rx9e09rZOxXYfGf/4gu/i3tAb-AfRv2ETmob543EHQ/h5/h001.3LoRoXJGr4m0emA_3l6LIVaMdB3KRwrC6UzHjaqCuBE" rel="noopener noreferrer nofollow" style="text-decoration:none;" target="_blank"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/b33452d0-4484-4fe0-aa6b-68f337e064b3/image.png?t=1748369486" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></a></td></tr><tr><td align="center" valign="top" class="t" style="width:600px; padding: 4px 0px 4px 0px;"><p><span style="">and a nicer citation section</span></p></td></tr></table></td></tr><tr><td align="center" valign="top" style="padding-bottom:14px;padding-left:28px;padding-right:28px;padding-top:14px;text-align:center;width:100%;word-break:break-word;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" align="center" style="margin:14px auto 14px auto;"><tr><td align="center" valign="middle" height="44.75" style="height:44.75px;background-color:#2C81E5;border-color:#DFD150;border-radius:10px 10px 10px 10px;border-style:solid;border-width:0px 0px 0px 0px;color:#FFFFFF;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j29rweNtSHTMPdCBNsuu15hStNWn7Cp-_z7KAfyQIe6He9SUNC4nbJ6LlgkqlU9UDXwVB3vJvizYgRJ7hNxQcy0ODHSr63C3LoSd725TYLfvP/4gu/i3tAb-AfRv2ETmob543EHQ/h6/h001.ykndB3lwP6Q5yvIF0RmtX5z-wNF5VD950dca-0Hkj64" target="_blank" rel="noopener noreferrer nofollow" style="color:#FFFFFF;display:block;font-size:16px;font-size:16px;font-weight:normal;padding:0px 14px;padding:14px 14px 14px 14px;text-decoration:none;"> Check Out FMP </a></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="font-size:0.8rem;font-weight:500;"><a class="link" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.tLfGW26lAwaS9gFg17HSoGymQ3NNPtd5dE5MV_8UgjIDFPVXngz8pvQBldSW42yhUe_Qiq6DgEPMEBuPL9yfRpXelTiuu2kS8pLFvsoem_XoZoy_n13sTKUhZIbl0VH6/4gu/i3tAb-AfRv2ETmob543EHQ/h7/h001.i8A0ucD4Gw298NkcGSAhaFt0RFHuAAtJtmMlivxpnwc" target="_blank" rel="noopener noreferrer nofollow"><span>Advertise with The AI Timeline! </span></a></span></p></td></tr></table></td></tr></table></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="#222222" style="background-color:#222222;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"></p></td></tr></table></td></tr></table></td></tr><tr><td id="beyond-semantics-the-unreasonable-e" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;">Beyond Semantics: The Unreasonable Effectiveness of Reasonless Intermediate Tokens</h2></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style=""><i>Stechly et al. [SCAI, Arizona State University]</i></span></p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="background-color:#e0e0e0;"><span style="color:rgb(255, 58, 58);font-size:0.6rem;"> ♥ 569 </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span><span style="background-color:#e0e0e0;"><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> LLM Reasoning </span></span></p></td></tr><tr><td id="thinking-about-chain-of-thought" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Thinking about Chain of Thought</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> If you’ve followed recent advances in AI reasoning, you would know that it’s common for LLMs to “think” through problems by generating intermediate steps known as a Chain of Thought (CoT). These steps are meant to resemble human reasoning traces and are widely assumed to explain <span style=""><i>how</i></span> models arrive at answers. </p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> But are these intermediate tokens really necessary? This study argues that the semantic validity of CoT steps might matter far less than we think, and that models can excel even when their “thoughts” are nonsensical. Do training models to produce structured, algorithm-like reasoning traces actually teach them to reason algorithmically, or is the performance boost from CoT a byproduct of something else? </p></td></tr><tr><td id="tracing-the-path-between-noise-and-" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Tracing the Path Between Noise and Accuracy in Chain of Thought</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> To test the relationship between reasoning traces and model performance, the researchers designed a controlled environment using maze-solving tasks. Transformers were trained to generate both solutions (plans) and intermediate traces mimicking the <span style="color:rgb(24, 128, 56);">A*</span> search algorithm, a classic pathfinding method. Unlike prior work, this setup included a formal validator to check whether generated traces adhered to A*’s semantics (such as correctly updating node costs and prioritizing paths) while also verifying final solutions. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/62361f0a-99f3-4701-8b33-1a59086bc1be/image.png?t=1748357253" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr><tr><td align="center" valign="top" class="t" style="width:600px; padding: 4px 0px 4px 0px;"><p>Examples of mazes. The left is generated by Wilson’s algorithm and is used for model training. The right is generated by the Drunkard’s Walk algorithm.</p></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> Surprisingly, models trained on <span style=""><i>correct</i></span> A* traces often produced valid solutions alongside <span style=""><i>invalid</i></span> intermediate steps. What’s even more surprising is that performance improved when models were trained on <span style=""><i>swapped traces</i></span>, where intermediate steps were randomly paired with unrelated mazes. These models ignored the noisy, irrelevant traces during inference but still achieved higher accuracy, particularly on out-of-distribution mazes. </p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> After conducting this experiment, the researchers were asking exactly the same question that you are asking right now. Why would nonsensical traces help? The paper speculates that intermediate tokens act as a form of “prompt augmentation,” nudging the model toward better solutions without requiring semantically meaningful reasoning. Just like how adversarial prompts subtly shift model behavior, CoT-like tokens might create an internal context that biases the model toward correct answers. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/bff4e90f-e61d-46da-841d-ce2b471029fd/image.png?t=1748357294" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td id="beyond-the-illusion-of-reasoning-vi" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Beyond the Illusion of Reasoning via Chain of Thought</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> As you might have guessed by now, the results were quite surprising as models trained on swapped traces achieved 51.6% plan validity on Wilson-generated mazes (vs. 50.1% for correct traces) and outperformed baselines on out-of-distribution tasks. For example, on “Drunkard’s Walk” mazes, the swapped-trace model solved 26% of tasks, compared to just 2.5% for the A*-trace model. Meanwhile, trace validity plummeted to 0%, confirming that the model’s success had little to do with executing A* semantics. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/85ccb310-8ece-40da-8862-961e3a5099bf/image.png?t=1748357322" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> These findings challenge two assumptions: </p></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="1" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Anthropomorphization of CoT</b></span>: Intermediate tokens are not reliable indicators of human-like reasoning. Models can generate correct answers despite flawed or irrelevant traces. </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Trace semantics as a training target</b></span>: Enforcing algorithmically valid reasoning steps may be unnecessary (or even counterproductive) for maximizing performance. </p></li></ol></div></td></tr><tr><td align="center" valign="top" style="padding-bottom:14px;padding-left:28px;padding-right:28px;padding-top:14px;text-align:center;width:100%;word-break:break-word;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" align="center" style="margin:14px auto 14px auto;"><tr><td align="center" valign="middle" height="44.75" style="height:44.75px;background-color:#2C81E5;border-color:#DFD150;border-radius:10px 10px 10px 10px;border-style:solid;border-width:0px 0px 0px 0px;color:#FFFFFF;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.fUNb4GdFo9D3F8WuLArtoV5sElgytBlvJRzI9WtI92Z1eidHAzD9xspssQRkEf7t4nUCLQ6thhVa81JU2AVGBO4pHsQsLj3Qcr8isunqHdZ6RuSaNMu6ziUqnlrz09xo/4gu/i3tAb-AfRv2ETmob543EHQ/h8/h001.fMHcgEeUTtirLsgidKgNiY0zTeZwsMaXSCbHbGTeINc" target="_blank" rel="noopener noreferrer nofollow" style="color:#FFFFFF;display:block;font-size:16px;font-size:16px;font-weight:normal;padding:0px 14px;padding:14px 14px 14px 14px;text-decoration:none;"> Read Full Paper </a></td></tr></table></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="#222222" style="background-color:#222222;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"></p></td></tr></table></td></tr></table></td></tr><tr><td id="reinforcement-learning-finetunes-sm" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;">Reinforcement Learning Finetunes Small Subnetworks in Large Language Models</h2></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style=""><i>Mukherjee et al. [University of Illinois Urbana-Champaign]</i></span></p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="background-color:#e0e0e0;"><span style="color:rgb(255, 58, 58);font-size:0.6rem;"> ♥ 741 </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span><span style="background-color:#e0e0e0;"><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> Sparse LLMs </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span></p></td></tr><tr><td id="the-mystery-of-sparse-updates-in-rl" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">The Mystery of Sparse Updates in RL</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> When we think about training AI models, the assumption is often that more computation and more parameter updates lead to better performance. But what if the opposite were true? This study argues that RL fine-tuning often updates only 5-30% of a model’s parameters but still achieves results nearly identical to full fine-tuning. </p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> Remarkably, this sparsity emerges <span style=""><i>without</i></span> explicit regularization or architectural constraints. The researchers found a way where RL fine-tuning consistently modifies a small, consistent subset of parameters across diverse algorithms (PPO, DPO, GRPO, etc.) and model families (Llama, Tulu, DeepSeek). What’s even more intriguing is that freezing the majority of parameters and training only this “subnetwork” reproduces the performance of a fully fine-tuned model. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/fa55f885-41d1-4d61-97c1-583108463b0c/image.png?t=1748357385" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td id="how-rl-discovers-and-uses-sparse-su" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">How RL Discovers and Uses Sparse Subnetworks</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> The study reveals three core insights about how RL interacts with LLM parameters: </p></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="1" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Sparsity is Universal and Intrinsic: </b></span>The updates occur on a small subnetwork (5-30% of parameters) across 10 models and 7 RL algorithms, while the rest remain nearly unchanged. This sparsity isn’t confined to specific layers or components, as nearly all parameter matrices receive sparse, scattered updates. </p></li></ol></div></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="2" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Subnetworks Are Consistent Across Training Conditions: </b></span>When training with different random seeds, datasets, or RL methods, the updated parameters overlap significantly more than chance. For example, subnetworks trained with DPO and PRIME (two distinct algorithms) share ~59% of their updated parameters. This suggests pretrained LLMs contain a latent, reusable structure that RL selectively activates, regardless of external variables. </p></li></ol></div></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="3" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Updates Are Sparse but Full-Rank: </b></span>Despite their sparsity, the parameter changes are <span style=""><i>full-rank</i></span>, meaning they span the entire subspace of possible updates for each matrix. This contrasts with methods like LoRA, which constrain updates to low-rank subspaces. In other words, RL fine-tuning isn’t just pruning redundant parameters; it’s identifying a compact set of weights that can represent nearly any adjustment the model needs. </p></li></ol></div></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/3e9e833b-0c7f-40e7-b9a8-f7190dbe28a0/image.png?t=1748357412" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td id="benchmark-performance-and-implicati" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Benchmark performance and Implications</span></h3></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ul style="font-weight:normal;list-style-type:disc;margin-bottom:12px !important;margin-top:12px !important;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Performance Parity</b></span>: Training only the identified subnetwork matches or slightly exceeds the accuracy of full fine-tuning. For example, on MATH500, a subnetwork-trained PRIME model outperformed its fully fine-tuned counterpart by +2.4% overall, with gains up to +5.2% on harder problems. </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Parameter Replication</b></span>: Subnetwork-trained models converge to nearly identical parameter values as their fully fine-tuned counterparts (94-100% similarity, depending on numerical tolerance). </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Efficiency Insights</b></span>: Sparsity correlates with training on “in-distribution” data (samples close to the model’s current policy). Techniques like KL regularization or gradient clipping—often thought to stabilize training—have minimal impact on sparsity. </p></li></ul></div></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/5f2923b2-04ac-46a1-aca1-7b2a61b9f2f8/image.png?t=1748357455" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> The implications of these findings: </p></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ul style="font-weight:normal;list-style-type:disc;margin-bottom:12px !important;margin-top:12px !important;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Reduced Compute Costs</b></span>: Early identification of subnetworks could drastically cut RL training time and memory usage. </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Preserved Pretrained Knowledge</b></span>: By updating fewer parameters, RL may avoid overwriting valuable pretrained capabilities, addressing a common concern in fine-tuning. </p></li></ul></div></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="font-weight:700;"><b>Algorithm Design</b></span>: Future RL methods could explicitly target sparse updates, combining the benefits of full-rank adjustments with parameter efficiency. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:14px;padding-left:28px;padding-right:28px;padding-top:14px;text-align:center;width:100%;word-break:break-word;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" align="center" style="margin:14px auto 14px auto;"><tr><td align="center" valign="middle" height="44.75" style="height:44.75px;background-color:#2C81E5;border-color:#DFD150;border-radius:10px 10px 10px 10px;border-style:solid;border-width:0px 0px 0px 0px;color:#FFFFFF;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.fUNb4GdFo9D3F8WuLArtoV5sElgytBlvJRzI9WtI92YEO3thTykroC6uxNk9tR1k5HKJiKP_RV95qIKobUxBAAQfFFjfM0fbtZg5WDvxurSJKVpcdjuis7nQYemWSKkk/4gu/i3tAb-AfRv2ETmob543EHQ/h9/h001.m57Q3O4A9UYGdJU9UfQSujmlnp7H2MsYPf80DoboyU0" target="_blank" rel="noopener noreferrer nofollow" style="color:#FFFFFF;display:block;font-size:16px;font-size:16px;font-weight:normal;padding:0px 14px;padding:14px 14px 14px 14px;text-decoration:none;"> Read Full Paper </a></td></tr></table></td></tr><tr><td><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" style=""><tr><td bgcolor="#222222" style="background-color:#222222;padding:0.0px 0.0px 0.0px 0.0px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0"><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"></p></td></tr></table></td></tr></table></td></tr><tr><td id="m-ma-da-multimodal-large-diffusion-" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;">MMaDA: Multimodal Large Diffusion Language Models</h2></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style=""><i>Yang et al. [Princeton University, Peking University, Tsinghua University, ByteDance Seed]</i></span></p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"><span style="background-color:#e0e0e0;"><span style="color:rgb(255, 58, 58);font-size:0.6rem;"> ♥ 393 </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span><span style="background-color:#e0e0e0;"><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> Multimodal Diffusion LM </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span><span style="background-color:#e0e0e0;"><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> bycloud’s pick </span></span><span style="color:rgb(44, 129, 229);font-size:0.6rem;"> </span></p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/8dfa7ed6-e6f4-466b-ab61-846a18006367/example_compare.png?t=1748357693" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td id="introduction-to-m-ma-da" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Introduction to MMaDA</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> When we think of super strong AI, we might imagine a single model can solve complex math problems, explain visual diagrams, and generate photorealistic images. This has become easier with the rise of multimodal foundation models, which aim to unify language, vision, and other modalities under one architecture. </p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> However, there’s still a critical gap as most existing models struggle to balance <span style=""><i>post-training refinement</i></span> (like reinforcement learning) with the demands of diverse tasks, leading to trade-offs between reasoning accuracy, factual consistency, and generative quality. </p></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> This paper introduces <span style="font-weight:700;"><b>MMaDA</b></span> (Multimodal Masked Diffusion Architecture), which is a novel framework designed to harmonize these objectives. This paper aims to solve this by reimagining diffusion models as unified solvers for both discrete (text) and continuous (image) data. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/5d42630d-8a9d-4613-8f32-52a89ff0dea5/image.png?t=1748357499" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr><tr><td align="center" valign="top" class="t" style="width:600px; padding: 4px 0px 4px 0px;"><p>Specific design choices employed by different unified multimodal foundation model families.</p></td></tr></table></td></tr><tr><td id="how-m-ma-da-works" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">How MMaDA Works</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> There are three main innovations which make the MMaDA model possible. </p></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ol start="1" style="list-style-type:decimal;margin:0px 0px;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Unified Diffusion Architecture</b></span><br>The MMaDA treats text and images as sequences of discrete tokens processed through a shared diffusion pipeline. Unlike traditional hybrid models that use separate components for different modalities, MMaDA uses a single masked token predictor. This predictor learns to reconstruct corrupted inputs (whether text snippets or image patches) using a unified cross-entropy loss. By aligning the noise-corruption process across modalities during pretraining, the model builds an intrinsic understanding of how textual logic and visual semantics interconnect. </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Mixed Long Chain-of-Thought Fine-Tuning</b></span><br>To enhance reasoning, MMaDA introduces <span style=""><i>mixed long chain-of-thought (CoT) fine-tuning</i></span>. Here, the model generates step-by-step explanations (e.g., solving a geometry problem) or factual rationales (e.g., describing why a landmark is culturally significant) before producing a final answer or image. These CoT traces follow a standardized format, allowing the model to generalize across tasks. For instance, when generating an image of the Statue of Liberty, MMaDA first infers its historical context (“gifted by France”) and then synthesizes visuals aligned with that knowledge. </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Unified Reinforcement Learning with UniGRPO</b></span><br>The final piece is <span style="font-weight:700;"><b>UniGRPO</b></span>, which is a reinforcement learning algorithm tailored for diffusion models. Traditional RL methods falter with diffusion’s iterative denoising process, but UniGRPO cleverly samples diverse “masking ratios” during training. By partially masking outputs (like hiding 25% of tokens in a math solution or image captions) and rewarding accurate reconstructions, the model learns to balance correctness, formatting, and human preferences. This approach unifies rewards for text reasoning, multimodal understanding, and image generation, avoiding the need for task-specific tuning. </p></li></ol></div></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/1e85ed36-e229-4f37-834d-4fc9053ea301/image.png?t=1748357641" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr></table></td></tr><tr><td id="results-and-implications-of-m-ma-da" class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:normal;padding:0px 28px;text-align:left;"><h3 style="color:#2A2A2A;font-weight:normal;mso-line-height-alt:125.0%;"><span style="color:rgb(67, 67, 67);">Results and Implications of MMaDA</span></h3></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> The MMaDA-8B model outperforms specialized models on three tests: </p></td></tr><tr><td style="padding-bottom:12px;padding-left:50px;padding-right:40px;padding-top:12px;" class="ee"><div style="margin-left:0px;" class="edm_outlooklist"><ul style="font-weight:normal;list-style-type:disc;margin-bottom:12px !important;margin-top:12px !important;padding:0px 0px 0px 0px;"><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Textual Reasoning</b></span>: Surpasses LLaMA-3-7B and Qwen2-7B on math benchmarks like GSM8K (73.4% accuracy vs. 53.1% for LLaMA-3). </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Multimodal Understanding</b></span>: Achieves 76.7% on VQAv2, eclipsing Show-o (69.4%) and SEED-X (47.9%). </p></li><li class="listItem ultext"><p style="mso-line-height-alt:150.0%;padding:0px;text-align:left;word-break:break-word;"><span style="font-weight:700;"><b>Image Generation</b></span>: Scores 32.46 CLIP points and 1.15 Image Reward, outperforming SDXL (32.12 CLIP) and Janus (1.03 Image Reward). </p></li></ul></div></td></tr><tr><td align="center" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; " class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 auto;"><tr><td align="center" valign="top" style="width:600px;"><img src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/uploads/asset/file/f021b0f9-fbe1-401c-82d2-459290eead3f/image.png?t=1748357752" alt="" height="auto" width="600" style="display:block;width:100%;" border="0"/></td></tr><tr><td align="center" valign="top" class="t" style="width:600px; padding: 4px 0px 4px 0px;"><p>Evaluation on Multimodal Understanding Benchmarks</p></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> Although this is promising, MMaDA relies on discrete tokenization and this introduces computational overhead. Moreover, its CoT traces occasionally deviate into verbose reasoning. </p></td></tr><tr><td align="center" valign="top" style="padding-bottom:14px;padding-left:28px;padding-right:28px;padding-top:14px;text-align:center;width:100%;word-break:break-word;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" align="center" style="margin:14px auto 14px auto;"><tr><td align="center" valign="middle" height="44.75" style="height:44.75px;background-color:#2C81E5;border-color:#DFD150;border-radius:10px 10px 10px 10px;border-style:solid;border-width:0px 0px 0px 0px;color:#FFFFFF;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.fUNb4GdFo9D3F8WuLArtoV5sElgytBlvJRzI9WtI92aPDbEhzLZA9vkHBEjxy8fGHSkPIR6xjbYE6yi_BYVgsw2i7KLknED3vTRHKKMiH2H-5RTSLssP2VNJ5ki9GR-S/4gu/i3tAb-AfRv2ETmob543EHQ/h10/h001.DhoiwdD23FTUKobaOD5D8fE_hTSvpZJwbzpPG80YKw8" target="_blank" rel="noopener noreferrer nofollow" style="color:#FFFFFF;display:block;font-size:16px;font-size:16px;font-weight:normal;padding:0px 14px;padding:14px 14px 14px 14px;text-decoration:none;"> Read Full Paper </a></td></tr></table></td></tr><tr><td class="dd" align="center" valign="top" style="padding:20px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.amatuKKICSickUKplYJXmG-FNB0qrv5ZIj7K3on2sYTk78uSDNPdDW7yxOVYmVDibTF40umV0F3VRoeKmvdCzQQexhx2ue1pGL88aGkXqHwA6CiSOo8TI5q8m1WY9XP6/4gu/i3tAb-AfRv2ETmob543EHQ/h11/h001._hv0YH2N3qA3HX8shc79RUOioSYFJvX3zmsatuiQxHM" style="text-decoration:none;"><table align="center" width="100%" cellpadding="0" cellspacing="0" border="0" role="none" style="max-width:520px;margin:0 auto;"><tr><td class="p" width="100%" style="padding:2px;border:none;"><table width="100%" cellpadding="0" cellspacing="0" border="0" role="none"><tr><td align="center" valign="top" style="width:100%;"><div style="max-height:0;position:relative;opacity:0.999;width:100%;mso-hide:all;"><div style="display:inline-block;width:100%;padding-top:25%;"><img width="20%" height="auto" loading="lazy" alt="" style="border:0;" src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/static_assets/youtube_play_icon.png"/></div></div><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.amatuKKICSickUKplYJXmG-FNB0qrv5ZIj7K3on2sYS745wotuj3ji7U6NHzqKlFyfksPrbwmw2b77TyZTk7poBJzS0TFMolmJsVl6CqwDYB5l6F_um6cM-EoU0xEm0t/4gu/i3tAb-AfRv2ETmob543EHQ/h12/h001.SrtWFtFRwYOZVELxGag_10SnatmVpqfL6KhpdCQVwKo" style="text-decoration:none;"><img src="https://i.ytimg.com/vi/4BhZZYg2_J4/maxresdefault.jpg" width="480" height="auto" loading="lazy" alt="YouTube video by bycloud" style="display:block;height:auto;border:0;outline:none;text-decoration:none;background-color:#000000;width:100%;"/></a></td></tr><tr><td><p style="font-size:12px;font-weight:500;font-style:italic;font-family:Helvetica, Calibri, sans-serif;color: #686a6d; padding-top:0 !important;padding-bottom:6px !important; padding-left:4px !important;"> Multi-Token Prediction: The LLM Performance Boost No One Talks About </p></td></tr></table></td></tr></table></a></td></tr><tr><td align="center" valign="top"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td align="center" valign="top" style="font-size:0px;line-height:0px;padding:30px 0px 30px;" class="dd"><table class="j" role="none" width="50%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td> </td></tr></table></td></tr><tr><td class="dd" align="left" valign="top" style="color:#2A2A2A;font-weight:Bold;padding:0px 28px;text-align:left;"><h2 style="color:#2A2A2A;font-weight:Bold;mso-line-height-alt:150.0%;">Share The AI Timeline</h2></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> You currently have <strong>0</strong> referrals. </p></td></tr><tr><td align="left" valign="top" style="padding-bottom:20px;padding-left:28px;padding-right:28px;padding-top:20px; display:none;width:0px;max-height:0px;overflow:hidden;mso-hide:all;height:0;font-size:0;max-height:0;line-height:0;margin:0 auto;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" style="margin:0 auto 0 0;"><tr><td align="center" valign="top" style="width:300px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.c6q0w4g5sodbtO4I1B_pxWc4htTObwdorovK0nFHVH-4pUdVE0ELYH5DsNemk732SjNwhPNJ25r0O8B5vYifsGNUqyW5TiZkyMsF1yreu0byy2KW36J1wDdpoLuXg2TU1F1OW8OHoHaU4-ZmrZpPU4RN-crQCEimD190CSn9fPuxpIRojBJyu1VfV5KtQD3QMVdSg2JrjEj5-xm4r4E12Whf08itqPCb9Q5W0X4rt3ubYkqCmWnLeZpmb3_RZcbIk0UE5wZnFLCQJHLFs0qZ0OGpXp89o1HU4mWIBur5Or4tQGm5M_Y8m5PvTEfYfxLRyrcRv7GyVs5oLtFfiySZ2SqtZypLA-h50h61p0uPiA7iA_PiMqlVLtM-87XL33VZi05_O3UTpWE_0nAzFRJ4TW1ayz3_vn4Zlp9IERdbnnAd_1kPLD4lAQcR5PRXgtpC-rvjtsNZ-WBg8R14Wm3y1sMDJsdcKejA4ZASWzvIzIN9KS8uQVesa1hdGepUQ438kibseHi0xTTyqjkjdkbQCe5Ip0bj2a_J8_Ub5icrexjwgcP6fzB42LU0PFQzQJCmqUa5MYf7G1wfcDCway25faBZ5BIzQjD-wCHsemYDe4TuuyF5X5UKPNkUBW4_jHHG/4gu/i3tAb-AfRv2ETmob543EHQ/h13/h001.1c1rWbywmsRbWNOxr0_uMiLCsgPNGKLEBKA-BRCKhbk" rel="noopener noreferrer nofollow" style="text-decoration:none;" target="_blank"><img src="" alt="" height="auto" width="300" style="display:block;width:100%;" border="0"/></a></td></tr></table></td></tr><tr><td align="left" valign="top" style="padding-bottom:14px;padding-left:28px;padding-right:28px;padding-top:14px;text-align:left;width:100%;word-break:break-word;" class="dd"><table role="none" border="0" cellspacing="0" cellpadding="0" align="left" style="margin:14px auto 14px auto;"><tr><td align="center" valign="middle" height="44.75" style="height:44.75px;background-color:#2C81E5;border-color:#DFD150;border-radius:10px 10px 10px 10px;border-style:solid;border-width:0px 0px 0px 0px;color:#FFFFFF;font-family:'Open Sans','Segoe UI','Apple SD Gothic Neo','Lucida Grande','Lucida Sans Unicode',sans-serif;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.c6q0w4g5sodbtO4I1B_pxWc4htTObwdorovK0nFHVH-4pUdVE0ELYH5DsNemk732SjNwhPNJ25r0O8B5vYifsGNUqyW5TiZkyMsF1yreu0byy2KW36J1wDdpoLuXg2TU1F1OW8OHoHaU4-ZmrZpPU4RN-crQCEimD190CSn9fPuxpIRojBJyu1VfV5KtQD3QMVdSg2JrjEj5-xm4r4E12Whf08itqPCb9Q5W0X4rt3ubYkqCmWnLeZpmb3_RZcbIk0UE5wZnFLCQJHLFs0qZ0OGpXp89o1HU4mWIBur5Or4tQGm5M_Y8m5PvTEfYfxLRyrcRv7GyVs5oLtFfiySZ2SqtZypLA-h50h61p0uPiA7iA_PiMqlVLtM-87XL33VZi05_O3UTpWE_0nAzFRJ4TW1ayz3_vn4Zlp9IERdbnnAd_1kPLD4lAQcR5PRXgtpC-rvjtsNZ-WBg8R14Wm3y1sMDJsdcKejA4ZASWzvIzIN9KS8uQVesa1hdGepUQ438kibseHi0xTTyqjkjdkbQCe5Ip0bj2a_J8_Ub5icrexjwgcP6fzB42LU0PFQzQJCmqUa5MYf7G1wfcDCway25faBZ5BIzQjD-wCHsemYDe4TuuyF5X5UKPNkUBW4_jHHG/4gu/i3tAb-AfRv2ETmob543EHQ/h14/h001.lVsYSq3hQiH7jhSP-JAGnVHAaeBJVFhEp1secYRRjVg" target="_blank" rel="noopener noreferrer nofollow" style="color:#FFFFFF;display:block;font-size:16px;font-size:16px;font-weight:normal;padding:0px 14px;padding:14px 14px 14px 14px;text-decoration:none;"> Click to Share </a></td></tr></table></td></tr><tr><td class="dd" align="left" style="padding:0px 28px;text-align:left;word-break:break-word;"><p style="mso-line-height-alt:150.0%;"> Or copy and paste this link to others: <a class="link" href="https://mail.bycloud.ai/subscribe?ref=6SqUHb8KiF&_bhlid=bf7a73b936aab597b0df9777ef50b28c5a049d32" target="_blank" rel="noopener noreferrer nofollow" clicktracking="off"><span>https://mail.bycloud.ai/subscribe?ref=6SqUHb8KiF</span></a></p></td></tr><tr><td align="center" valign="top" style="font-size:0px;line-height:0px;padding:30px 0px 30px;" class="dd"><table class="j" role="none" width="50%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td> </td></tr></table></td></tr></table></td></tr><tr><td class="dd" style="padding: 20px;"><table width="100%" cellpadding="0" cellspacing="0" role="none" style="max-width:520px;margin:0 auto;"><tr><td class="q" style="padding:16px 16px 6px 16px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.tLfGW26lAwaS9gFg17HSoDDFT6eh5Nsg0xYVQj-h6I3o9m2k79_qw4izMYhmcI36l_1Kwi6uTfNuZoKinF6nDOa4fNy3KSvh2D84gjeVz9IVA-B058vZLveNeTCIjF-TBPOy8FWpea-R5aYXqIew-uqLMX3NJ4MPrrGzHu6Buvw/4gu/i3tAb-AfRv2ETmob543EHQ/h15/h001.kBhaYuNpuXbKLklSRi0RBlnqs_anYGbQxbUiclTTQpY" style="text-decoration:none !important;"><table width="100%" cellpadding="0" cellspacing="0" border="0" role="none"><tr><td width="100%" style="padding: 0 0 14px 0;text-decoration:none;width:100%;"><table width="100%" cellpadding="0" cellspacing="0" border="0" role="none"><tr><td width="36" style="width:36px;"><img src="https://pbs.twimg.com/profile_images/1698572487909400576/BvncwnrP_normal.jpg" alt="tw profile: The AI Timeline" style="display:block;width:36px;height:36px;border-radius:50%;border:0;"/></td><td width="400" style="padding:0 0 0 8px;text-decoration:none;"><span style="display:block;font-size:14px;color:#1c2022;font-weight:700;"> The AI Timeline </span><span style="display:block;color:#697882;font-size:14px;"> @TheAITimeline </span></td><td width="24" align="right" style="vertical-align:text-top;"><img width="24" height="24" loading="lazy" alt="tw" style="border:0;" src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/static_assets/x_logo.png"/></td></tr></table></td></tr><tr></tr><tr><td style="word-break:break-word;"><p>🚨This week's top AI/ML research papers:</p><p>- Claude 4 System Card <br>- Emerging Properties in Unified Multimodal Pretraining <br>- dKV-Cache <br>- Thinkless <br>- GUI-G1 <br>- Scaling Diffusion Transformers Efficiently via μP <br>- Faster Video Diffusion with Trainable Sparse Attention <br>- Model Merging</p></td></tr><tr><td style="padding:12px 0 0 0;"></td></tr><tr><td align="center" style="padding:8px 0 0 0;width:480px;"><img src="https://pbs.twimg.com/media/Gr14vCTXIAA3hO8.jpg" width="480" height="auto" style="display:block;border:1px solid #E1E8ED;border-radius:5px;width:100%;max-width:480px;height:auto;"/></td></tr><tr><td height="8" style="line-height:1px;font-size:1px;height:8px;"> </td></tr><tr><td align="left" valign="top" class="s"><p>2:37 AM • May 26, 2025</p></td></tr><tr><td height="10" style="line-height: 1px; font-size: 1px; height: 10px;"> </td></tr><tr><td height="1" bgcolor="#e1e8ed" style="line-height:0px;font-size:0px;height:1px;"></td></tr><tr><td height="10" style="line-height:1px;font-size:1px;height:10px;"> </td></tr><tr><td align="left" valign="top" class="s"><p><b style="color:#1C2022">736</b> Likes <b style="color:#1C2022">77</b> Retweets </p></td></tr><tr><td align="left" valign="top" class="s"><div align="center" style="text-align:center;margin-top:4px;margin-bottom:4px;padding:8px;border:1px solid #ccd6dd;border-radius:9999px;color:#1B95E0"><b>4 Replies</b></div></td></tr></table></a></td></tr></table></td></tr></table></td></tr></table></td></tr><tr><td align="center" valign="top"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td><tr><td class="b" align="center" valign="top" bgcolor="#2a2a2a" style="padding:0px 0px 0px 0px;border-style:solid;border-width: 0px 0px 0px 0px;border-color: #2a2a2a;border-bottom-left-radius:10px;border-bottom-right-radius:10px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td align="center" valign="top" bgcolor="#73ddff" style="padding:12px"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td><span style="padding-left:1px;"></span></td><td align="center" valign="middle" width="75" style="width:75px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.1muhFWIqieRYpaJ-FbWSCQqcWoV4NNHHr5SkP9THApWuHAAlWLQxI3Q_IqFmt_DcyAxeC8jDApCnHmMSBGpBb5sgtimvBYgxRX-Rp7s0F3LjCHoSwdhr83OBqRFhJ1y_/4gu/i3tAb-AfRv2ETmob543EHQ/h16/h001.z6CQdjmbTXLKhtBwCHLhY41_RQi50Agb3IatcPVFDOc" style="text-decoration:none;"><img width="22" height="22" alt="tw" border="0" style="display:block;max-width:22px;color:Dark" src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/static_assets/x_dark.png"/></a></td><td align="center" valign="middle" width="75" style="width:75px;"><a href="https://elink4f7.mail.bycloud.ai/ss/c/u001.amatuKKICSickUKplYJXmBoQnQ9VXnB2zTxBG4HeHBgjMqVxpoXRdj01cjwyoVlHgiebEOgBvwHtevoVpsSvpn3Q1di2ml6sb3cBM-X6IStQbj_zQSVGWJ8AAmPw2en2/4gu/i3tAb-AfRv2ETmob543EHQ/h17/h001.4q99t52Ba2D6fpgueGjLbZxANl4J8o2UV3WbjeID2f8" style="text-decoration:none;"><img width="22" height="16" alt="yt" border="0" style="display:block;max-width:22px;color:Dark" src="https://media.beehiiv.com/cdn-cgi/image/fit=scale-down,format=auto,onerror=redirect,quality=80/static_assets/youtube_dark.png"/></a></td><td><span style="padding-left:1px;"></span></td></tr></table></td></tr><tr><td height="10" style="line-height:1px;font-size:1px;height:10px;"> </td></tr><tr><td class="w" align="center" valign="top" style="padding:15px 15px 15px 15px;"><table role="none" width="100%" border="0" cellspacing="0" cellpadding="0" align="center"><tr><td align="center" valign="top"><p style="font-family:'Verdana',Geneva,sans-serif;color:#FFFFFF!important;"> Update your email preferences or unsubscribe <a class="link" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.c6q0w4g5sodbtO4I1B_pxWc4htTObwdorovK0nFHVH-4pUdVE0ELYH5DsNemk732SjNwhPNJ25r0O8B5vYifsBhEpz-DJgyVFmavJPa0OyKRRnvw4o7XGyvIv7PRofnmauzxBf7yzvYmubpXmdUl-RONIgJiVJnOSKZGKG7BEeU8ZScjTOc5xgFaow-aT1y-e4Ew5HofuXeRuAF9dU_KCYbnzrZrJOqaWUvfHedLCa__yFCR-m1N7tk16R3UlnlP1CbAGIF83xJUBACE1gxCq5pBi9xDIKliHQwGpNsZU9O64s3iXGmNrfG5sNJzUDGyVhWRa7Rm00M9ZQuURtSPOO7uSQQX0M3D0v7ElLy2pjVJtOvjNSNDb28kF6bTrd94hjJpTTwNkmdVd0lQ7Oh1ybRUNBFVB4o574tfp7bntcDb8zAPmMv-ruPx69s-6uAhlX_jpMY3HxCtWdkNobhUTFCD5buPuyNkxWedSFefjse8RFNkeOG23aoe-8BSPkKSjqkwVBR4shJqItkiePSqQf0CruoUPfJ6-viBqqAxqg7cRdovnLKu4OSI9t37fCp8ICF-Zh7yCEi2eHvUXA2s_JdqWmpJQ5uGzQ7BeXJ08NZq7m5gSHQnrHfCSnBNFp6_lvKpganQqevvZtacSGpOJan_PoLpoeixtbRJ03j2CsD46dRItcL-QMsIu_6EpoNS4Lb651kqIFtYSXc3FNaFig5OcLZj80Y_hifXaPoaOOK0Wg34j-NoaYDNzHwH4FuIRnfVmgS4WrqN4R4IYXSV9kv1ooAnCWd0Q-0yBc01dVcLTeoiropZi1J2aYjyG9wUA8pMiKj2gqVPh99MGWrD9w/4gu/i3tAb-AfRv2ETmob543EHQ/h18/h001.BFFivJ99op7M0rMG3_NG5Nv51W_Th4KFwT8TeymVUGo" style="text-decoration:underline;text-decoration-color:#FFFFFF!important;color:#FFFFFF!important;"> here</a></p><p class="copyright" style="font-family:'Verdana',Geneva,sans-serif;color:#FFFFFF!important;"> © 2025 bycloudai </p><p style="font-family:'Verdana',Geneva,sans-serif;color:#FFFFFF!important;"> 228 Park Ave S, #29976, New York, New York 10003, United States </p></td></tr><tr style="display: table-row !important;"><td align="center" valign="top" style="padding-top:20px;" style="display:table-cell !important;"><table role="none" border="0" cellspacing="0" cellpadding="0" align="center" style="display:table !important;"><tr style="display:table-row !important;"><td class="u" align="center" valign="middle" height="32" style="height:32px;display:table-cell !important; max-height: 32px !important;margin:0px !important; background-color: #ffffff !important;"><a style="line-height:32px !important;text-decoration:none;display:block !important;" href="https://elink4f7.mail.bycloud.ai/ss/c/u001.DUiN96-Eq7pUHzwEhy5j28olDWFpV5DDKfdk_OdOKOgdhYE0mFaBGBa-KNb01GHFtLKhZmGg8MBkXrW3mvVKNfhYibuDWEkwZvHb6kf5paad5b2CAMWSSuzwrdycWW0Z0oUy_-dUfbEQLYPWEoGJFRXHwyFqaIyIyKsbOZGjLUiIsxIYFL_OeMN4nRP0vHaqpYYrRPlFrVeObNblKVYKKtXN8iH8aFn3_VLtaqOnbI8vtXdf9SYOPc75wOe0vIqa/4gu/i3tAb-AfRv2ETmob543EHQ/h19/h001.lBTkHTrzG8rnu1I0_9vUSgW-6B8OF-BLmNOruY3JHts"><img src="https://media.beehiiv.com/output-onlinepngtools.png" width="16" alt="beehiiv logo" style="display:inline-block !important;max-width:16px !important; vertical-align:-3px !important;width: 16px !important;" border="0"/><span style="padding-left:11px !important;display: inline-block !important;">Powered by beehiiv</span></a></td></tr></table></td></tr><tr><td align="left" valign="top" height="2" style="height:2px;"><a href='https://elink4f7.mail.bycloud.ai/ss/c/u001.CxDkkVpJsBdVoe83c_tBWsHIaP4XNp0WgUYqLvHcKk_3uqk_KIkz4ddLinhFbud6JuxLFdSUhYnR7b1NSsmbtzXNGNblnEEMKUtkCAjkn8Y/4gu/i3tAb-AfRv2ETmob543EHQ/h20/h001.k41Q5NG50RCgeeCSRT5McJjnoERdc_5Flss75zJaAxM' style="color: #2a2a2a !important; cursor: default; font-size: 1px; text-decoration: none;"> Terms of Service </a></td></tr></table></td></tr></table></td></tr></td></tr></table></td></tr></table></td></tr></table></td></tr></table></div></body></html>