ner_service.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. """
  2. NER 服务实现
  3. 支持多种模式:
  4. 1. rule - 基于规则的简单 NER(默认,用于开发测试)
  5. 2. spacy - 使用 spaCy 模型
  6. 3. transformers - 使用 Transformers 模型
  7. 4. api - 调用外部 API(如 DeepSeek/Qwen)
  8. """
  9. import re
  10. import uuid
  11. from typing import List, Optional
  12. from loguru import logger
  13. from ..config import settings
  14. from ..models import EntityInfo, PositionInfo
  15. class NerService:
  16. """NER 服务"""
  17. def __init__(self):
  18. self.model_type = settings.ner_model
  19. logger.info(f"初始化 NER 服务: model_type={self.model_type}")
  20. async def extract_entities(
  21. self,
  22. text: str,
  23. entity_types: Optional[List[str]] = None
  24. ) -> List[EntityInfo]:
  25. """
  26. 从文本中提取实体
  27. Args:
  28. text: 待提取的文本
  29. entity_types: 指定要提取的实体类型,为空则提取所有类型
  30. Returns:
  31. 实体列表
  32. """
  33. if not text or not text.strip():
  34. return []
  35. if self.model_type == "rule":
  36. return await self._extract_by_rules(text, entity_types)
  37. elif self.model_type == "spacy":
  38. return await self._extract_by_spacy(text, entity_types)
  39. elif self.model_type == "transformers":
  40. return await self._extract_by_transformers(text, entity_types)
  41. elif self.model_type == "api":
  42. return await self._extract_by_api(text, entity_types)
  43. else:
  44. logger.warning(f"未知的模型类型: {self.model_type},使用规则模式")
  45. return await self._extract_by_rules(text, entity_types)
  46. async def _extract_by_rules(
  47. self,
  48. text: str,
  49. entity_types: Optional[List[str]] = None
  50. ) -> List[EntityInfo]:
  51. """
  52. 基于规则的 NER 提取
  53. 用于开发测试阶段,后续可替换为更高级的模型
  54. """
  55. entities = []
  56. # 规则定义
  57. rules = {
  58. "DATE": [
  59. # 中文日期格式
  60. r'(\d{4}年\d{1,2}月\d{1,2}日)',
  61. r'(\d{4}年\d{1,2}月)',
  62. r'(\d{4}-\d{1,2}-\d{1,2})',
  63. r'(\d{4}/\d{1,2}/\d{1,2})',
  64. ],
  65. "NUMBER": [
  66. # 带单位的数值
  67. r'(\d+\.?\d*\s*(?:万元|元|米|公里|千米|平方米|㎡|吨|kg|g|个|台|套|件|次|人|天|小时|分钟|秒|%|百分比))',
  68. # 百分比
  69. r'(\d+\.?\d*%)',
  70. # 纯数值(较大的数)
  71. r'(?<![a-zA-Z])(\d{4,}(?:\.\d+)?)(?![a-zA-Z])',
  72. ],
  73. "ORG": [
  74. # 机构/公司名称
  75. r'([\u4e00-\u9fa5]{2,10}(?:公司|集团|院|所|局|部|厅|委|会|中心|协会|学会|银行|医院|学校|大学|学院))',
  76. # xx省/市/县/区
  77. r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村)(?:人民)?(?:政府|委员会)?)',
  78. ],
  79. "LOC": [
  80. # 地点
  81. r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村|路|街|巷|号|楼|栋|单元|室))',
  82. # 常见地名后缀
  83. r'([\u4e00-\u9fa5]{2,8}(?:工业园|开发区|高新区|科技园|产业园))',
  84. ],
  85. "PERSON": [
  86. # 人名(简单规则:姓+名)
  87. r'(?:(?:张|王|李|赵|刘|陈|杨|黄|周|吴|徐|孙|马|朱|胡|郭|何|林|罗|高|郑|梁|谢|唐|许|邓|冯|韩|曹|曾|彭|萧|蔡|潘|田|董|袁|于|余|叶|蒋|杜|苏|魏|程|吕|丁|沈|任|姚|卢|傅|钟|姜|崔|谭|廖|范|汪|陆|金|石|戴|贾|韦|夏|邱|方|侯|邹|熊|孟|秦|白|江|阎|薛|尹|段|雷|黎|史|龙|陶|贺|顾|毛|郝|龚|邵|万|钱|严|赖|覃|洪|武|莫|孔)[\u4e00-\u9fa5]{1,2})(?:总|经理|主任|工程师|教授|博士|先生|女士|同志)?',
  88. ],
  89. "DEVICE": [
  90. # 设备名称
  91. r'([\u4e00-\u9fa5]{2,10}(?:设备|仪器|仪表|机器|装置|系统|探测器|传感器|检测仪|分析仪|监测仪))',
  92. ],
  93. "PROJECT": [
  94. # 项目名称 - 更严格的规则
  95. # 要求:项目名应该是完整的名词短语,通常有特定前缀
  96. # 带书名号的项目名
  97. r'《([\u4e00-\u9fa5a-zA-Z0-9]{2,30}(?:项目|工程|计划|方案|课题))》',
  98. # 明确的项目编号/名称格式
  99. r'([A-Z0-9\-]+(?:项目|工程))',
  100. # 地名/机构名 + 项目类型(更严格)
  101. r'((?:[\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇))?[\u4e00-\u9fa5]{2,15}(?:建设|改造|修复|治理|开发|研究|试点|示范)(?:项目|工程))',
  102. # xx项目部/项目组
  103. r'([\u4e00-\u9fa5]{2,15}项目(?:部|组|办))',
  104. ],
  105. }
  106. # 过滤实体类型
  107. if entity_types:
  108. rules = {k: v for k, v in rules.items() if k in entity_types}
  109. # 停用词/无效实体过滤(这些词虽然匹配规则但不是有效实体)
  110. stopwords = {
  111. # 常见无意义匹配
  112. "该项目", "本项目", "此项目", "各项目", "子公司和项目", "认真落实项目",
  113. "开展的培训项目", "年已经开展的培训项目",
  114. "该工程", "本工程", "此工程", "各工程",
  115. "该计划", "本计划", "此计划", "各计划",
  116. "该方案", "本方案", "此方案", "各方案",
  117. # 动词开头的无效匹配
  118. "落实项目", "开展项目", "推进项目", "完成项目", "实施项目",
  119. # 太短的无意义实体
  120. "项目", "工程", "计划", "方案", "课题",
  121. }
  122. # 执行规则匹配
  123. seen_entities = set() # 用于去重
  124. for entity_type, patterns in rules.items():
  125. for pattern in patterns:
  126. for match in re.finditer(pattern, text):
  127. entity_text = match.group(1) if match.groups() else match.group(0)
  128. entity_text = entity_text.strip()
  129. # 跳过停用词
  130. if entity_text in stopwords:
  131. continue
  132. # 跳过太短的实体(少于3个字符)
  133. if len(entity_text) < 3:
  134. continue
  135. # 去重
  136. entity_key = f"{entity_type}:{entity_text}"
  137. if entity_key in seen_entities:
  138. continue
  139. seen_entities.add(entity_key)
  140. # 计算行号
  141. line_num = text[:match.start()].count('\n') + 1
  142. # 获取上下文
  143. context_start = max(0, match.start() - 20)
  144. context_end = min(len(text), match.end() + 20)
  145. context = text[context_start:context_end]
  146. if context_start > 0:
  147. context = "..." + context
  148. if context_end < len(text):
  149. context = context + "..."
  150. entity = EntityInfo(
  151. name=entity_text,
  152. type=entity_type,
  153. value=entity_text,
  154. position=PositionInfo(
  155. char_start=match.start(),
  156. char_end=match.end(),
  157. line=line_num
  158. ),
  159. context=context,
  160. confidence=0.8, # 规则匹配默认置信度
  161. temp_id=str(uuid.uuid4())[:8]
  162. )
  163. entities.append(entity)
  164. logger.info(f"规则 NER 提取完成: entity_count={len(entities)}")
  165. return entities
  166. async def _extract_by_spacy(
  167. self,
  168. text: str,
  169. entity_types: Optional[List[str]] = None
  170. ) -> List[EntityInfo]:
  171. """
  172. 使用 spaCy 进行 NER 提取
  173. """
  174. # TODO: 实现 spaCy NER
  175. logger.warning("spaCy NER 尚未实现,回退到规则模式")
  176. return await self._extract_by_rules(text, entity_types)
  177. async def _extract_by_transformers(
  178. self,
  179. text: str,
  180. entity_types: Optional[List[str]] = None
  181. ) -> List[EntityInfo]:
  182. """
  183. 使用 Transformers 模型进行 NER 提取
  184. """
  185. # TODO: 实现 Transformers NER
  186. logger.warning("Transformers NER 尚未实现,回退到规则模式")
  187. return await self._extract_by_rules(text, entity_types)
  188. async def _extract_by_api(
  189. self,
  190. text: str,
  191. entity_types: Optional[List[str]] = None
  192. ) -> List[EntityInfo]:
  193. """
  194. 调用外部 API 进行 NER 提取
  195. """
  196. # TODO: 实现 API NER(调用 DeepSeek/Qwen)
  197. logger.warning("API NER 尚未实现,回退到规则模式")
  198. return await self._extract_by_rules(text, entity_types)
  199. # 创建单例
  200. ner_service = NerService()