﻿{"id":584003,"date":"2026-03-22T16:47:41","date_gmt":"2026-03-22T08:47:41","guid":{"rendered":"http:\/\/www.nlpir.org\/wordpress\/?p=584003"},"modified":"2026-03-22T20:12:51","modified_gmt":"2026-03-22T12:12:51","slug":"%e6%9d%8e%e7%a3%8a-%e5%ae%9e%e9%aa%8c%e5%ae%a4%e5%af%bc%e5%b8%88%ef%bc%88%e6%8b%9b%e7%a0%94%e7%a9%b6%e7%94%9f%ef%bc%89-2","status":"publish","type":"post","link":"http:\/\/www.nlpir.org\/wordpress\/2026\/03\/22\/%e6%9d%8e%e7%a3%8a-%e5%ae%9e%e9%aa%8c%e5%ae%a4%e5%af%bc%e5%b8%88%ef%bc%88%e6%8b%9b%e7%a0%94%e7%a9%b6%e7%94%9f%ef%bc%89-2\/","title":{"rendered":"\u674e\u78ca-\u5b9e\u9a8c\u5ba4\u5bfc\u5e08\uff08\u62db\u7814\u7a76\u751f\uff09"},"content":{"rendered":"<div class=\"wp-block-image\">\n<figure class=\"aligncenter size-full is-resized\"><img loading=\"lazy\" decoding=\"async\" width=\"650\" height=\"720\" src=\"http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/\u674e\u78ca.webp\" alt=\"\" class=\"wp-image-584005\" style=\"width:377px;height:auto\" srcset=\"http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/\u674e\u78ca.webp 650w, http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/\u674e\u78ca-271x300.webp 271w\" sizes=\"(max-width: 650px) 100vw, 650px\" \/><\/figure><\/div>\n\n\n<p>\u804c\u79f0\uff1a\u51c6\u8058\u6559\u6388<\/p>\n\n\n\n<p>E-mail\uff1alilei@ustc.edu<\/p>\n\n\n\n<p>\u901a\u4fe1\u5730\u5740\uff1a\u5317\u4eac\u5e02\u6d77\u6dc0\u533a\u4e2d\u5173\u6751\u5357\u5927\u88575\u53f7\u9662\u4eba\u5de5\u667a\u80fd\u5b66\u9662\u8ba4\u77e5\u667a\u80fd\u5b9e\u9a8c\u5ba4\uff08104\uff09<\/p>\n\n\n\n<p><strong>\u57fa\u672c\u60c5\u51b5<\/strong><\/p>\n\n\n\n<p><strong>\u674e\u78ca<\/strong>\uff0c\u535a\u58eb\uff0c\u5317\u4eac\u7406\u5de5\u5927\u5b66\u4eba\u5de5\u667a\u80fd\u5b66\u9662\u51c6\u8058\u6559\u6388\uff0c\u535a\u58eb\u751f\u5bfc\u5e08\uff0c\u7279\u7acb\u5b66\u8005\u30022026\u5e743\u6708\u52a0\u5165\u5317\u4eac\u7406\u5de5\u5927\u5b66\u4eba\u5de5\u667a\u80fd\u5b66\u9662\uff0c\u4e3b\u8981\u7814\u7a76\u65b9\u5411\u4e3a\u591a\u6a21\u6001,\u751f\u6210\u6a21\u578b,\u673a\u5668\u5b66\u4e60\u7b49\u3002\u591a\u5e74\u5728\u54e5\u672c\u54c8\u6839\u5927\u5b66\uff0c\u534e\u76db\u987f\u5927\u5b66\u7b49\u4ece\u4e8b\u4eba\u5de5\u667a\u80fd\u548c\u591a\u6a21\u6001\u9886\u57df\u7684\u7814\u7a76\uff0c\u4e0e\u65af\u5766\u798f\u5927\u5b66\uff0c\u5361\u8010\u57fa\u6885\u9686\u5927\u5b66\uff0c\u745e\u58eb\u8054\u90a6\u7406\u5de5\u5927\u5b66\uff0c\u725b\u6d25\u5927\u5b66\u6709\u6df1\u5165\u7684\u4ea4\u6d41\u548c\u5408\u4f5c\uff0c\u53d6\u5f97\u4e86\u4e00\u7cfb\u5217\u5177\u6709\u56fd\u9645\u5f71\u54cd\u529b\u7684\u6210\u679c\u3002\u4ee5\u7b2c\u4e00\u4f5c\u8005\u6216\u901a\u8baf\u4f5c\u8005\u8eab\u4efd\u53d1\u8868\u9876\u7ea7\u8bba\u6587 40\u4f59\u7bc7\uff08\u5982 CVPR, NeurIPS\uff0cACL\uff0cICLR\uff09\u3002\u5728CVPR\u6311\u6218\u8d5b\u8363\u83b7\u8363\u8a89\u63d0\u540d\u5956\uff0c\u5728MIPR\uff0cICCC\u7b49\u56fd\u9645\u4f1a\u8bae\u8363\u83b7\u6700\u4f73\u8bba\u6587\u5956\uff0c\u4e8eAAAI\uff0cCVPR\uff0cNeruIPS\u7b49\u4f1a\u8bae\u5206\u4f1a\u4e3b\u5e2d\u548c\u7a0b\u5e8f\u59d4\u5458\u4f1a\u3002<\/p>\n\n\n\n<p>\u6bcf\u5e74\u8ba1\u5212\u62db\u751f\u535a\u58eb\u751f1\u4eba\uff0c\u7855\u58eb\u7814\u7a76\u751f3-4\u4eba\uff0c\u540c\u65f6\u6b22\u8fce\u9ad8\u5e74\u7ea7\u4f18\u79c0\u672c\u79d1\u751f\u52a0\u5165\u8bfe\u9898\u7ec4\u3002<\/p>\n\n\n\n<p>\u4e2a\u4eba\u4e3b\u9875\uff1a<a href=\"https:\/\/bitlilei.github.io\/Lilei\">https:\/\/bitlilei.github.io\/Lilei<\/a><\/p>\n\n\n\n<p><strong>\u7814\u7a76\u65b9\u5411<\/strong><\/p>\n\n\n\n<p>\u591a\u6a21\u6001\u6a21\u578b\uff0c\u751f\u6210\u6a21\u578b\uff0c\u5177\u8eab\u667a\u80fd\uff0c\u81ea\u7136\u8bed\u8a00\u5904\u7406\uff0c\u4fe1\u606f\u68c0\u7d22\uff0c\u4eba\u5de5\u667a\u80fd\uff0c\u5927\u8bed\u8a00\u6a21\u578b<\/p>\n\n\n\n<p><strong>\u4e3b\u8981\u83b7\u5956\u60c5\u51b5<\/strong><\/p>\n\n\n\n<p>(1)&nbsp;<strong>\u6700\u4f73\u8bba\u6587\u5956<\/strong>\uff0cUniHPR: Unified Human Pose Representation via Singular Value Contrastive Learning\u3002\u8be5\u8bba\u6587\u5728<strong>&nbsp;2025\u5e74\u7b2c\u516b\u5c4a<\/strong>&nbsp;<strong>IEEE \u591a\u5a92\u4f53\u4fe1\u606f\u5904\u7406\u4e0e\u68c0\u7d22\u56fd\u9645\u4f1a\u8bae<\/strong>&nbsp;(MIPR 2025)&nbsp;\u4e2d\u83b7\u5f97\u6700\u4f73\u8bba\u6587\u5956\u3002<\/p>\n\n\n\n<p>(2)&nbsp;<strong>\u8363\u8a89\u63d0\u540d\u5956<\/strong>&nbsp;(Honorable Mention)\uff0c<strong>2025\u5e74 CVPR<\/strong>&nbsp;(IEEE\/CVF \u8ba1\u7b97\u673a\u89c6\u89c9\u4e0e\u6a21\u5f0f\u8bc6\u522b\u4f1a\u8bae) challenge\u3002\u83b7\u5956\u4f5c\u54c1\u4e3a\uff1aMedal S: Spatio-Textual Prompt Model for Medical Segmentation\u3002<\/p>\n\n\n\n<p>(3)&nbsp;<strong>\u6700\u4f73\u8bba\u6587\u5956<\/strong>\uff0c<strong>2022\u5e74\u7b2c\u516b\u5c4a IEEE \u8ba1\u7b97\u673a\u4e0e\u901a\u4fe1\u56fd\u9645\u4f1a\u8bae<\/strong>&nbsp;(ICCC 2022)\u3002\u83b7\u5956\u8bba\u6587\uff1aLR-CSNet: Low-rank deep unfolding network for image compressive sensing\u3002<\/p>\n\n\n\n<p><strong>\u4ee3\u8868\u6027\u8bba\u6587<\/strong><\/p>\n\n\n\n<p>[1] Jingzhe Shi, Qinwei Ma, Hongyi Liu, Hang Zhao, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cIntrinsic Entropy of Context Length Scaling in LLMs\u201d. International Conference on Learning Representations (ICLR) 2026,(oral).<\/p>\n\n\n\n<p>[2] Sen Jia, Ning Zhu, Jinqin Zhong, Jiale Zhou, Zhang Huaping, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cRAM: Recover Any 3D Human Motion in-the-Wild\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2026.<\/p>\n\n\n\n<p>[3] Sen Jia, Huayu Wang, Hsiang-Wei Huang, Zhaochong An, Jenq-Neng Hwang, Zhang Huaping,&nbsp;Lei Li*. \u201cCLEP: Contrastive Language-Pose Pretraining\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2026.<\/p>\n\n\n\n<p>[4] Yunchuan Guan, Yu Liu, Ke Zhou, Zhiqi Shen, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cLearning to Learn Weight Generation via Local Consistency Diffusion\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2026.<\/p>\n\n\n\n<p>[5] Xin Zhang, Shen Chen, Jiale Zhou,&nbsp;Lei Li*. \u201cPSGS: Text-driven Panorama Sliding Scene Generation via Gaussian Splatting\u201d. IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) 2026.<\/p>\n\n\n\n<p>[6]&nbsp;Lei Li*, Sen Jia, Jenq-Neng Hwang. \u201cMultiple Human Motion Understanding\u201d. AAAI Conference on Artificial Intelligence (AAAI) 2026.<\/p>\n\n\n\n<p>[7] Qiaoyi Xu, Azizi Abdullah, Tao Chen, Xinglin Zhang, Adam Shephard, Patsy Ng Pei Sze, Noraidah Masir,&nbsp;Lei Li*, Reena Rahayu. \u201cRobust Multi-Domain Digital Pathology Image Segmentation via Joint Balancing Representation Learning\u201d. Expert Systems With Applications 2026.<\/p>\n\n\n\n<p>[8] Ziyang Yan, Yihua Shao, Minwen Liao, Siyu Chen, Nan Wang, Muyuan Lin, Jenq-Neng Hwang, Hao Zhao, Fabio Remondino,&nbsp;Lei Li*. \u201c3DSceneEditor: Controllable 3D Scene Editing with Gaussian Splatting\u201d. IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV) 2026.<\/p>\n\n\n\n<p>[9] Yunchuan Guan, Yu Liu, Ke Zhou, Zhiqi Shen, Jenq-Neng Hwang, Serge Belongie,&nbsp;Lei Li*. \u201cIs Meta-Learning Out? Rethinking Unsupervised Few-Shot Classification with Limited Entropy\u201d. International Conference on Computer Vision (ICCV) 2025.<\/p>\n\n\n\n<p>[10] Haonan Tong, Ke Liu, Chuang Zhang, Xinglin Zhang, Tao Chen, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cPAMN: Multi-phase Correlation Modeling for Contrast-Enhanced 3D Medical Image Retrieval\u201d. Conference on Empirical Methods in Natural Language Processing (EMNLP) 2025.<\/p>\n\n\n\n<p>[11] Yunchuan Guan, Yu Liu, Ke Zhou, Hui Li, Sen Jia, Zhiqi Shen, Ziyang Wang, Xinglin Zhang, Tao Chen, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cLearning an Efficient Optimizer via Hybrid-Policy Sub-Trajectory Balance\u201d. European Conference on Artificial Intelligence (ECAI) 2025.<\/p>\n\n\n\n<p>[12] Tian Lan, Jinyuan Xu, Xue He, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cAttention Consistency for LLMs Explanation\u201d. Conference on Empirical Methods in Natural Language Processing (EMNLP) 2025.<\/p>\n\n\n\n<p>[13] Ruocheng Gu, Sen Jia, Yule Ma, Jinqin Zhong, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cMoCount: Motion-Based Repetitive Action Counting\u201d. ACM International Conference on Multimedia (ACMMM) 2025.<\/p>\n\n\n\n<p>[14] Libin Liu, Shen Chen, Sen Jia, Jingzhe Shi, Can Jin, Zongkai Wu, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cGraph Canvas for Controllable 3D Scene Generation\u201d. ACM International Conference on Multimedia (ACMMM) 2025.<\/p>\n\n\n\n<p>[15] Pengcheng Shi, Jiawei Chen, Jiaqi Liu, Xinglin Zhang, Tao Chen,&nbsp;Lei Li*. \u201cMedal S: Spatio-Textual Prompt Model for Medical Segmentation\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2025.<\/p>\n\n\n\n<p>[16] Ziyu Yao, Xuxin Cheng, Zhiqi Huang,&nbsp;Lei Li*. \u201cCountLLM: Towards Generalizable Repetitive Action Counting via Large Language Model\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2025.<\/p>\n\n\n\n<p>[17]&nbsp;Lei Li*, Sen Jia, Jianhao Wang, Zhongyu Jiang, Feng Zhou, Ju Dai, Tianfang Zhang, Zongkai Wu, Jenq-Neng Hwang. \u201cHuman Motion Instruction Tuning\u201d. IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) 2025.<\/p>\n\n\n\n<p>[18] Chengkun Cai, Haoliang Liu, Xu Zhao, Zhongyu Jiang, Tianfang Zhang, Zongkai Wu, John Lee, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cBayesian Optimization for Controlled Image Editing via LLMs\u201d. Annual Meeting of the Association for Computational Linguistics (ACL) 2025.<\/p>\n\n\n\n<p>[19] Chengkun Cai, Xu Zhao, Haoliang Liu, Zhongyu Jiang, Tianfang Zhang, Zongkai Wu, Jenq-Neng Hwang,&nbsp;Lei Li*. \u201cThe Role of Deductive and Inductive Reasoning in LLMs\u201d. Annual Meeting of the Association for Computational Linguistics (ACL) 2025.<\/p>\n\n\n\n<p>[20] Zhongyu Jiang, Wenhao Chai,&nbsp;Lei Li*, Zhuoran Zhou, Cheng-Yen Yang, Jenq-Neng Hwang. \u201cUniHPR: Unified Human Pose Representation via Singular Value Contrastive Learning\u201d. IEEE International Conference on Multimedia Information Processing and Retrieval (MIPR) 2025 (Best Paper).<\/p>\n\n\n\n<p>[21] Chengbo Sun, Hui Yi Leong,&nbsp;Lei Li*. \u201cCoarse-to-Fine Personalized LLM Impressions for Streamlined Radiology Reports\u201d. ICML 2025 Workshop.<\/p>\n\n\n\n<p>[22] Jinyuan Xu, Tian Lan, Mathieu Valette, Pierre Magistry,&nbsp;Lei Li*. \u201cTinyMentalLLMs Enable Depression Detection in Chinese Social Media Texts\u201d. Recent Advances in Natural Language Processing (RANLP) 2025.<\/p>\n\n\n\n<p>[23] Pengcheng Shi, Jiawei Chen, Jiaqi Liu, Xinglin Zhang, Tao Chen,&nbsp;Lei Li*. \u201cMedal S: Spatio-Textual Prompt Model for Medical Segmentation\u201d. CVPR 2025 Workshop (Honorable Mention).<\/p>\n\n\n\n<p>[24] Wenhao Yang, Jianguo Wei, Wenhuan Lu,&nbsp;Lei Li*. \u201cYou Only Speak Once to See\u201d. IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) 2025.<\/p>\n\n\n\n<p>[25] Shan Chen, Jiale Zhou,&nbsp;Lei Li*. \u201cDense Point Clouds Matter: Dust-GS for Scene Reconstruction from Sparse Viewpoints\u201d. IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) 2025.<\/p>\n\n\n\n<p>[26] Youjia Fu, Zihao Xu, Junsong Fu, Huixia Xue, Shuqiu Tan,&nbsp;Lei Li*, Shaoxun Qing. \u201cMonoMM: A Multi-scale Mamba-Enhanced Network for Real-time Monocular 3D Object Detection\u201d. The Journal of Supercomputing 2025.<\/p>\n\n\n\n<p>[27]&nbsp;Lei Li*, Xinglin Zhang, Jun Liang, Huang Mengqian, Tao Chen. \u201cAddressing Domain Shift via Imbalance-Aware Domain Adaptation in Embryo Development Assessment\u201d. Mathematical Biosciences and Engineering 2025.<\/p>\n\n\n\n<p>[28] Jingzhe Shi, Qinwei Ma, Huan Ma,&nbsp;Lei Li*. \u201cScaling Law for Time Series Forecasting\u201d. Neural Information Processing Systems (NeurIPS) 2024.<\/p>\n\n\n\n<p>[29] Jingzhe Shi, Jialuo Li, Qinwei Ma, Zaiwen Yang, Huan Ma,&nbsp;Lei Li*. \u201cCHOPS: CHat with custOmer Profile Systems for Customer Service with LLMs\u201d. Conference on Language Modeling (COLM) 2024.<\/p>\n\n\n\n<p>[30] Stefan Oehmcke,&nbsp;Lei Li$^#$, Katerina Trepekli, Jaime C. Revenga, Thomas Nord-Larsen, Fabian Gieseke, Christian Igel. \u201cDeep Point Cloud Regression for Above-ground Forest Biomass Estimation from Airborne LiDAR\u201d. Remote Sensing of Environment 2024.<\/p>\n\n\n\n<p>[31]&nbsp;Lei Li. \u201cImage Semantic Segmentation via Chain-of-Thought Prompts\u201d. IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV) 2024.<\/p>\n\n\n\n<p>[32] Zhongyu Jiang, Zhuoran Zhou,&nbsp;Lei Li*, Wenhao Chai, Cheng-Yen Yang, Jenq-Neng Hwang. \u201cBack to Optimization: Diffusion-based Zero-shot 3D Human Pose Estimation\u201d. IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV) 2024.<\/p>\n\n\n\n<p>[33] Zhuoran Zhou, Zhongyu Jiang, Wenhao Chai, Cheng-Yen Yang,&nbsp;Lei Li*, Jenq-Neng Hwang. \u201cEfficient Domain Adaptation via Generative Prior for 3D Infant Pose Estimation\u201d. IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV) 2024.<\/p>\n\n\n\n<p>[34] Tianfang Zhang,&nbsp;Lei Li$^#$, Zhenming Peng. \u201cOptimization-inspired Cumulative Transmission Network for Image Compressive Sensing\u201d. Knowledge-Based Systems 2023.<\/p>\n\n\n\n<p>[38] Tianfang Zhang,&nbsp;Lei Li$^#$, Christian Igel, Stefan Oehmcke, Fabian Gieseke, Zhenming Peng. \u201cLR-CSNet: Low-rank Deep Unfolding Network for Image Compressive Sensing\u201d. IEEE International Conference on Computer and Communications (ICCC) 2022 (Best Paper).<\/p>\n\n\n\n<p>[39] Stefan Oehmcke,&nbsp;Lei Li*, Jaime C Revenga, Thomas Nord-Larsen, Katerina Trepekli, Fabian Gieseke, Christian Igel. \u201cDeep Learning Based 3D Point Cloud Regression for Estimating Forest Biomass\u201d. ACM SIGSPATIAL 2022.<\/p>\n\n\n\n<p><strong>\u4e3b\u6301\u53c2\u4e0e\u79d1\u7814\u9879\u76ee<\/strong><\/p>\n\n\n\n<p>(1)&nbsp;<strong>\u4e39\u9ea6\u5148\u950b\u4eba\u5de5\u667a\u80fd\u4e2d\u5fc3<\/strong>&nbsp;(Pioneer Centre for AI)\uff0c\u54e5\u672c\u54c8\u6839\u5927\u5b66 (UCPH) \u6218\u7565\u89c4\u5212\uff0c\u4e39\u9ea6\u56fd\u5bb6\u7814\u7a76\u57fa\u91d1\u4f1a (DNRF) \u62e8\u6b3e\u7f16\u53f7 P1\uff0c\u201c\u5148\u950b\u4eba\u5de5\u667a\u80fd\u4e2d\u5fc3\u201d 2021-01 \u81f3 2034-12<\/p>\n\n\n\n<p>(2)&nbsp;<strong>\u65b0\u52a0\u5761\u7814\u7a76\u9662\u4e0e\u4f01\u4e1a\u8054\u5408\u8d44\u52a9\u9879\u76ee<\/strong>\uff0c\u7814\u7a76\u8bfe\u9898\uff0c\u65e0\uff0c\u201c\u57fa\u4e8e\u4eba\u5de5\u667a\u80fd\u7684 3D \u8fd0\u52a8\u8303\u56f4\u4e0e\u529b\u91cf\u8bc4\u4f30\u201d 2025-10 \u81f3 2026-09\uff0c<\/p>\n\n\n\n<p>(3)<strong>&nbsp;SKAI Intelligence<\/strong>\uff0c\u201c3D \u573a\u666f\u751f\u6210\u4e0e\u4eff\u771f\u201d\uff0c\u65e0\uff0c\u201c3D \u573a\u666f\u751f\u6210\u4e0e\u4eff\u771f\u201d 2024-12 \u81f3 2025-06<\/p>\n\n\n\n<p>(4)&nbsp;<strong>Villum \u57fa\u91d1\u4f1a<\/strong>\uff0c\u201c\u6df1\u5ea6\u5b66\u4e60\u4e0e\u9065\u611f\u6280\u672f\uff1a\u89e3\u9501\u5168\u7403\u751f\u6001\u7cfb\u7edf\u8d44\u6e90\u201d\uff0c\u9879\u76ee\u7f16\u53f7 34306\uff0cDeReEco 2021-01 \u81f3 2024-06\uff0c<\/p>\n\n\n\n<p><strong>\u83b7\u5f97\u7684\u4e13\u5229<\/strong><\/p>\n\n\n\n<p>[1]&nbsp;<strong>\u949f\u91d1\u7434\uff1b\u8f9c\u82e5\u57ce\uff1b\u6881\u653f\uff1b\u674e\u78ca\uff1b\u8d3e\u68ee\uff1b\u4e00\u79cd\u57fa\u4e8e\u8fd0\u52a8\u7684\u4eba\u4f53\u52a8\u4f5c\u91cd\u590d\u8ba1\u6570\u65b9\u6cd5\u53ca\u8bbe\u5907\u3002<\/strong><\/p>\n\n\n\n<p>[2]&nbsp;<strong>\u674e\u78ca<\/strong>\uff1b\u5218\u5ead\u7693\uff1b\u738b\u6743\uff1b\u94b1\u6668\uff1b\u5173\u952e\u70b9\u68c0\u6d4b\u65b9\u6cd5\u53ca\u88c5\u7f6e\u3001\u7535\u5b50\u8bbe\u5907\u548c\u5b58\u50a8\u4ecb\u8d28\u3002<\/p>\n\n\n\n<p><strong>\u6559\u5b66\u5de5\u4f5c<\/strong><\/p>\n\n\n\n<p>\u673a\u5668\u5b66\u4e60\u5bfc\u8bba<\/p>\n\n\n\n<p>\u4eba\u5de5\u667a\u80fd\u5bfc\u8bba<\/p>\n\n\n\n<p>\u5927\u6570\u636e\u6280\u672f<\/p>\n\n\n\n<div data-wp-interactive=\"core\/file\" class=\"wp-block-file\"><object data-wp-bind--hidden=\"!state.hasPdfPreview\"  class=\"wp-block-file__embed\" data=\"http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/CV_Lei-1-2.pdf\" type=\"application\/pdf\" style=\"width:100%;height:600px\" aria-label=\"\u5d4c\u5165 CV_Lei-1-2\"><\/object><a id=\"wp-block-file--media-a8dc5141-e984-4c19-9fae-9ed8598c5f32\" href=\"http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/CV_Lei-1-2.pdf\">CV_Lei-1-2<\/a><a href=\"http:\/\/www.nlpir.org\/wordpress\/wp-content\/uploads\/2026\/03\/CV_Lei-1-2.pdf\" class=\"wp-block-file__button wp-element-button\" download aria-describedby=\"wp-block-file--media-a8dc5141-e984-4c19-9fae-9ed8598c5f32\">\u4e0b\u8f7d<\/a><\/div>\n","protected":false},"excerpt":{"rendered":"<p>\u804c\u79f0\uff1a\u51c6\u8058\u6559\u6388 E-mail\uff1alilei@ustc.edu \u901a\u4fe1\u5730\u5740\uff1a\u5317\u4eac\u5e02\u6d77\u6dc0 &hellip; <a href=\"http:\/\/www.nlpir.org\/wordpress\/2026\/03\/22\/%e6%9d%8e%e7%a3%8a-%e5%ae%9e%e9%aa%8c%e5%ae%a4%e5%af%bc%e5%b8%88%ef%bc%88%e6%8b%9b%e7%a0%94%e7%a9%b6%e7%94%9f%ef%bc%89-2\/\">\u7ee7\u7eed\u9605\u8bfb <span class=\"meta-nav\">&rarr;<\/span><\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[33,38],"tags":[],"_links":{"self":[{"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/posts\/584003"}],"collection":[{"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/comments?post=584003"}],"version-history":[{"count":3,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/posts\/584003\/revisions"}],"predecessor-version":[{"id":584120,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/posts\/584003\/revisions\/584120"}],"wp:attachment":[{"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/media?parent=584003"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/categories?post=584003"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/www.nlpir.org\/wordpress\/wp-json\/wp\/v2\/tags?post=584003"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}