[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"news-e38b464b-10b6-4626-adcc-c2133c747206":3},{"id":4,"title":5,"summary":6,"original_url":7,"source_id":8,"tags":9,"published_at":23,"created_at":24,"modified_at":25,"is_published":26,"publish_type":27,"image_url":13,"view_count":28},"e38b464b-10b6-4626-adcc-c2133c747206","TileQ：2D平铺低秩量化突破MoE部署瓶颈，内存降10倍、延迟压缩至5%","MoE（混合专家）架构靠着稀疏激活机制，在保持高性能的同时大幅降低计算成本，但内存墙问题始终困扰着实际部署——推理时虽然只激活部分专家，但所有专家权重必须全部驻留在显存里。TileQ（arXiv:2605.09281）正是在这一痛点上拿出了新方案。该研究提出了一种无需微调的后训练量化（PTQ）方法，核心思路是在专家的输入维度和输出维度上同时共享低秩因子，实现2D平铺结构。与传统低秩量化相比，TileQ 将额外内存占用压缩至原来的十分之一，同时把推理延迟降至约5%——这意味着在同等硬件条件下，MoE模型的吞吐能力可以提升近20倍。论文还配套提出了高效推理技术，将多个低秩专家的计算融合为单次操作，大幅提升硬件利用率。这对于想在消费级GPU上跑大模型的技术团队来说，是实打实的好消息。量化技术正从压缩比优先走向效率与精度兼顾。TileQ的2D平铺思路本质上是打破了低秩近似的纬度限制——从单维度压缩升级到矩阵级别的结构化共享，这是一个值得关注的范式转变。随着MoE模型在开源社区越来越普及，这类部署友好的量化方案将成为推动技术落地的关键力量。","https:\u002F\u002Farxiv.org\u002Fhtml\u002F2605.09281v1","7437aeb9-930c-4866-a2e9-48003c1a792b",[10,14,17,20],{"id":11,"name":12,"slug":12,"description":13,"color":13},"0ef8513a-0a26-42f0-b6f9-5b6dadded45c","efficiency",null,{"id":15,"name":16,"slug":16,"description":13,"color":13},"0a93ec8e-ea39-4693-81de-563ca8c173f7","inference",{"id":18,"name":19,"slug":19,"description":13,"color":13},"01598627-1ea6-4b27-a5d8-874971571a71","llm",{"id":21,"name":22,"slug":22,"description":13,"color":13},"b49648f9-963e-4082-8684-3d085b7358fe","quantization","2026-05-16T07:10:00Z","2026-05-16T07:07:20.780438Z","2026-05-16T07:07:20.780450Z",true,"agent",2]