| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227 |
- """
- NER 服务实现
- 支持多种模式:
- 1. rule - 基于规则的简单 NER(默认,用于开发测试)
- 2. spacy - 使用 spaCy 模型
- 3. transformers - 使用 Transformers 模型
- 4. api - 调用外部 API(如 DeepSeek/Qwen)
- """
- import re
- import uuid
- from typing import List, Optional
- from loguru import logger
- from ..config import settings
- from ..models import EntityInfo, PositionInfo
- class NerService:
- """NER 服务"""
-
- def __init__(self):
- self.model_type = settings.ner_model
- logger.info(f"初始化 NER 服务: model_type={self.model_type}")
-
- async def extract_entities(
- self,
- text: str,
- entity_types: Optional[List[str]] = None
- ) -> List[EntityInfo]:
- """
- 从文本中提取实体
-
- Args:
- text: 待提取的文本
- entity_types: 指定要提取的实体类型,为空则提取所有类型
-
- Returns:
- 实体列表
- """
- if not text or not text.strip():
- return []
-
- if self.model_type == "rule":
- return await self._extract_by_rules(text, entity_types)
- elif self.model_type == "spacy":
- return await self._extract_by_spacy(text, entity_types)
- elif self.model_type == "transformers":
- return await self._extract_by_transformers(text, entity_types)
- elif self.model_type == "api":
- return await self._extract_by_api(text, entity_types)
- else:
- logger.warning(f"未知的模型类型: {self.model_type},使用规则模式")
- return await self._extract_by_rules(text, entity_types)
-
- async def _extract_by_rules(
- self,
- text: str,
- entity_types: Optional[List[str]] = None
- ) -> List[EntityInfo]:
- """
- 基于规则的 NER 提取
- 用于开发测试阶段,后续可替换为更高级的模型
- """
- entities = []
-
- # 规则定义
- rules = {
- "DATE": [
- # 中文日期格式
- r'(\d{4}年\d{1,2}月\d{1,2}日)',
- r'(\d{4}年\d{1,2}月)',
- r'(\d{4}-\d{1,2}-\d{1,2})',
- r'(\d{4}/\d{1,2}/\d{1,2})',
- ],
- "NUMBER": [
- # 带单位的数值
- r'(\d+\.?\d*\s*(?:万元|元|米|公里|千米|平方米|㎡|吨|kg|g|个|台|套|件|次|人|天|小时|分钟|秒|%|百分比))',
- # 百分比
- r'(\d+\.?\d*%)',
- # 纯数值(较大的数)
- r'(?<![a-zA-Z])(\d{4,}(?:\.\d+)?)(?![a-zA-Z])',
- ],
- "ORG": [
- # 机构/公司名称
- r'([\u4e00-\u9fa5]{2,10}(?:公司|集团|院|所|局|部|厅|委|会|中心|协会|学会|银行|医院|学校|大学|学院))',
- # xx省/市/县/区
- r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村)(?:人民)?(?:政府|委员会)?)',
- ],
- "LOC": [
- # 地点
- r'([\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇|乡|村|路|街|巷|号|楼|栋|单元|室))',
- # 常见地名后缀
- r'([\u4e00-\u9fa5]{2,8}(?:工业园|开发区|高新区|科技园|产业园))',
- ],
- "PERSON": [
- # 人名(简单规则:姓+名)
- r'(?:(?:张|王|李|赵|刘|陈|杨|黄|周|吴|徐|孙|马|朱|胡|郭|何|林|罗|高|郑|梁|谢|唐|许|邓|冯|韩|曹|曾|彭|萧|蔡|潘|田|董|袁|于|余|叶|蒋|杜|苏|魏|程|吕|丁|沈|任|姚|卢|傅|钟|姜|崔|谭|廖|范|汪|陆|金|石|戴|贾|韦|夏|邱|方|侯|邹|熊|孟|秦|白|江|阎|薛|尹|段|雷|黎|史|龙|陶|贺|顾|毛|郝|龚|邵|万|钱|严|赖|覃|洪|武|莫|孔)[\u4e00-\u9fa5]{1,2})(?:总|经理|主任|工程师|教授|博士|先生|女士|同志)?',
- ],
- "DEVICE": [
- # 设备名称
- r'([\u4e00-\u9fa5]{2,10}(?:设备|仪器|仪表|机器|装置|系统|探测器|传感器|检测仪|分析仪|监测仪))',
- ],
- "PROJECT": [
- # 项目名称 - 更严格的规则
- # 要求:项目名应该是完整的名词短语,通常有特定前缀
- # 带书名号的项目名
- r'《([\u4e00-\u9fa5a-zA-Z0-9]{2,30}(?:项目|工程|计划|方案|课题))》',
- # 明确的项目编号/名称格式
- r'([A-Z0-9\-]+(?:项目|工程))',
- # 地名/机构名 + 项目类型(更严格)
- r'((?:[\u4e00-\u9fa5]{2,6}(?:省|市|县|区|镇))?[\u4e00-\u9fa5]{2,15}(?:建设|改造|修复|治理|开发|研究|试点|示范)(?:项目|工程))',
- # xx项目部/项目组
- r'([\u4e00-\u9fa5]{2,15}项目(?:部|组|办))',
- ],
- }
-
- # 过滤实体类型
- if entity_types:
- rules = {k: v for k, v in rules.items() if k in entity_types}
-
- # 停用词/无效实体过滤(这些词虽然匹配规则但不是有效实体)
- stopwords = {
- # 常见无意义匹配
- "该项目", "本项目", "此项目", "各项目", "子公司和项目", "认真落实项目",
- "开展的培训项目", "年已经开展的培训项目",
- "该工程", "本工程", "此工程", "各工程",
- "该计划", "本计划", "此计划", "各计划",
- "该方案", "本方案", "此方案", "各方案",
- # 动词开头的无效匹配
- "落实项目", "开展项目", "推进项目", "完成项目", "实施项目",
- # 太短的无意义实体
- "项目", "工程", "计划", "方案", "课题",
- }
-
- # 执行规则匹配
- seen_entities = set() # 用于去重
-
- for entity_type, patterns in rules.items():
- for pattern in patterns:
- for match in re.finditer(pattern, text):
- entity_text = match.group(1) if match.groups() else match.group(0)
- entity_text = entity_text.strip()
-
- # 跳过停用词
- if entity_text in stopwords:
- continue
-
- # 跳过太短的实体(少于3个字符)
- if len(entity_text) < 3:
- continue
-
- # 去重
- entity_key = f"{entity_type}:{entity_text}"
- if entity_key in seen_entities:
- continue
- seen_entities.add(entity_key)
-
- # 计算行号
- line_num = text[:match.start()].count('\n') + 1
-
- # 获取上下文
- context_start = max(0, match.start() - 20)
- context_end = min(len(text), match.end() + 20)
- context = text[context_start:context_end]
- if context_start > 0:
- context = "..." + context
- if context_end < len(text):
- context = context + "..."
-
- entity = EntityInfo(
- name=entity_text,
- type=entity_type,
- value=entity_text,
- position=PositionInfo(
- char_start=match.start(),
- char_end=match.end(),
- line=line_num
- ),
- context=context,
- confidence=0.8, # 规则匹配默认置信度
- temp_id=str(uuid.uuid4())[:8]
- )
- entities.append(entity)
-
- logger.info(f"规则 NER 提取完成: entity_count={len(entities)}")
- return entities
-
- async def _extract_by_spacy(
- self,
- text: str,
- entity_types: Optional[List[str]] = None
- ) -> List[EntityInfo]:
- """
- 使用 spaCy 进行 NER 提取
- """
- # TODO: 实现 spaCy NER
- logger.warning("spaCy NER 尚未实现,回退到规则模式")
- return await self._extract_by_rules(text, entity_types)
-
- async def _extract_by_transformers(
- self,
- text: str,
- entity_types: Optional[List[str]] = None
- ) -> List[EntityInfo]:
- """
- 使用 Transformers 模型进行 NER 提取
- """
- # TODO: 实现 Transformers NER
- logger.warning("Transformers NER 尚未实现,回退到规则模式")
- return await self._extract_by_rules(text, entity_types)
-
- async def _extract_by_api(
- self,
- text: str,
- entity_types: Optional[List[str]] = None
- ) -> List[EntityInfo]:
- """
- 调用外部 API 进行 NER 提取
- """
- # TODO: 实现 API NER(调用 DeepSeek/Qwen)
- logger.warning("API NER 尚未实现,回退到规则模式")
- return await self._extract_by_rules(text, entity_types)
- # 创建单例
- ner_service = NerService()
|