ner_service.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. """
  2. NER 服务实现
  3. 支持多种模式:
  4. 1. rule - 基于规则的简单 NER(默认,用于开发测试)
  5. 2. spacy - 使用 spaCy 模型
  6. 3. transformers - 使用 Transformers 模型
  7. 4. api - 调用外部 API(如 DeepSeek/Qwen)
  8. """
  9. import re
  10. import uuid
  11. from typing import List, Optional
  12. from loguru import logger
  13. from ..config import settings
  14. from ..models import EntityInfo, PositionInfo
  15. class NerService:
  16. """NER 服务"""
  17. def __init__(self):
  18. self.model_type = settings.ner_model
  19. logger.info(f"初始化 NER 服务: model_type={self.model_type}")
  20. async def extract_entities(
  21. self,
  22. text: str,
  23. entity_types: Optional[List[str]] = None
  24. ) -> List[EntityInfo]:
  25. """
  26. 从文本中提取实体
  27. Args:
  28. text: 待提取的文本
  29. entity_types: 指定要提取的实体类型,为空则提取所有类型
  30. Returns:
  31. 实体列表
  32. """
  33. if not text or not text.strip():
  34. return []
  35. if self.model_type == "rule":
  36. return await self._extract_by_rules(text, entity_types)
  37. elif self.model_type == "spacy":
  38. return await self._extract_by_spacy(text, entity_types)
  39. elif self.model_type == "transformers":
  40. return await self._extract_by_transformers(text, entity_types)
  41. elif self.model_type == "api":
  42. return await self._extract_by_api(text, entity_types)
  43. else:
  44. logger.warning(f"未知的模型类型: {self.model_type},使用规则模式")
  45. return await self._extract_by_rules(text, entity_types)
  46. async def _extract_by_rules(
  47. self,
  48. text: str,
  49. entity_types: Optional[List[str]] = None
  50. ) -> List[EntityInfo]:
  51. """
  52. 基于规则的 NER 提取
  53. 用于开发测试阶段,后续可替换为更高级的模型
  54. """
  55. entities = []
  56. # 规则定义
  57. rules = {
  58. "DATE": [
  59. # 中文日期格式
  60. r'(\d{4}年\d{1,2}月\d{1,2}日)',
  61. r'(\d{4}年\d{1,2}月)',
  62. r'(\d{4}-\d{1,2}-\d{1,2})',
  63. r'(\d{4}/\d{1,2}/\d{1,2})',
  64. ],
  65. "NUMBER": [
  66. # 带单位的数值
  67. r'(\d+\.?\d*\s*(?:万元|元|米|公里|千米|平方米|㎡|吨|kg|g|个|台|套|件|次|人|天|小时|分钟|秒|%|百分比))',
  68. # 百分比
  69. r'(\d+\.?\d*%)',
  70. # 纯数值(较大的数)
  71. r'(?<![a-zA-Z])(\d{4,}(?:\.\d+)?)(?![a-zA-Z])',
  72. ],
  73. "ORG": [
  74. # 机构/公司名称
  75. r'([\u4e00-\u9fa5]{2,10}(?:公司|集团|院|所|局|部|厅|委|会|中心|协会|学会|银行|医院|学校|大学|学院))',
  76. # xx省/市/县/区
  77. r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村)(?:人民)?(?:政府|委员会)?)',
  78. ],
  79. "LOC": [
  80. # 地点
  81. r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村|路|街|巷|号|楼|栋|单元|室))',
  82. # 常见地名后缀
  83. r'([\u4e00-\u9fa5]{2,8}(?:工业园|开发区|高新区|科技园|产业园))',
  84. ],
  85. "PERSON": [
  86. # 人名(简单规则:姓+名)
  87. r'(?:(?:张|王|李|赵|刘|陈|杨|黄|周|吴|徐|孙|马|朱|胡|郭|何|林|罗|高|郑|梁|谢|唐|许|邓|冯|韩|曹|曾|彭|萧|蔡|潘|田|董|袁|于|余|叶|蒋|杜|苏|魏|程|吕|丁|沈|任|姚|卢|傅|钟|姜|崔|谭|廖|范|汪|陆|金|石|戴|贾|韦|夏|邱|方|侯|邹|熊|孟|秦|白|江|阎|薛|尹|段|雷|黎|史|龙|陶|贺|顾|毛|郝|龚|邵|万|钱|严|赖|覃|洪|武|莫|孔)[\u4e00-\u9fa5]{1,2})(?:总|经理|主任|工程师|教授|博士|先生|女士|同志)?',
  88. ],
  89. "DEVICE": [
  90. # 设备名称
  91. r'([\u4e00-\u9fa5]{2,10}(?:设备|仪器|仪表|机器|装置|系统|探测器|传感器|检测仪|分析仪|监测仪))',
  92. ],
  93. "PROJECT": [
  94. # 项目名称
  95. r'([\u4e00-\u9fa5]{2,20}(?:项目|工程|计划|方案|课题))',
  96. ],
  97. }
  98. # 过滤实体类型
  99. if entity_types:
  100. rules = {k: v for k, v in rules.items() if k in entity_types}
  101. # 执行规则匹配
  102. seen_entities = set() # 用于去重
  103. for entity_type, patterns in rules.items():
  104. for pattern in patterns:
  105. for match in re.finditer(pattern, text):
  106. entity_text = match.group(1) if match.groups() else match.group(0)
  107. entity_text = entity_text.strip()
  108. # 去重
  109. entity_key = f"{entity_type}:{entity_text}"
  110. if entity_key in seen_entities:
  111. continue
  112. seen_entities.add(entity_key)
  113. # 计算行号
  114. line_num = text[:match.start()].count('\n') + 1
  115. # 获取上下文
  116. context_start = max(0, match.start() - 20)
  117. context_end = min(len(text), match.end() + 20)
  118. context = text[context_start:context_end]
  119. if context_start > 0:
  120. context = "..." + context
  121. if context_end < len(text):
  122. context = context + "..."
  123. entity = EntityInfo(
  124. name=entity_text,
  125. type=entity_type,
  126. value=entity_text,
  127. position=PositionInfo(
  128. char_start=match.start(),
  129. char_end=match.end(),
  130. line=line_num
  131. ),
  132. context=context,
  133. confidence=0.8, # 规则匹配默认置信度
  134. temp_id=str(uuid.uuid4())[:8]
  135. )
  136. entities.append(entity)
  137. logger.info(f"规则 NER 提取完成: entity_count={len(entities)}")
  138. return entities
  139. async def _extract_by_spacy(
  140. self,
  141. text: str,
  142. entity_types: Optional[List[str]] = None
  143. ) -> List[EntityInfo]:
  144. """
  145. 使用 spaCy 进行 NER 提取
  146. """
  147. # TODO: 实现 spaCy NER
  148. logger.warning("spaCy NER 尚未实现,回退到规则模式")
  149. return await self._extract_by_rules(text, entity_types)
  150. async def _extract_by_transformers(
  151. self,
  152. text: str,
  153. entity_types: Optional[List[str]] = None
  154. ) -> List[EntityInfo]:
  155. """
  156. 使用 Transformers 模型进行 NER 提取
  157. """
  158. # TODO: 实现 Transformers NER
  159. logger.warning("Transformers NER 尚未实现,回退到规则模式")
  160. return await self._extract_by_rules(text, entity_types)
  161. async def _extract_by_api(
  162. self,
  163. text: str,
  164. entity_types: Optional[List[str]] = None
  165. ) -> List[EntityInfo]:
  166. """
  167. 调用外部 API 进行 NER 提取
  168. """
  169. # TODO: 实现 API NER(调用 DeepSeek/Qwen)
  170. logger.warning("API NER 尚未实现,回退到规则模式")
  171. return await self._extract_by_rules(text, entity_types)
  172. # 创建单例
  173. ner_service = NerService()