ollama_service.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. """
  2. Ollama LLM 服务
  3. 用于调用本地 Ollama 模型进行 NER 提取
  4. """
  5. import json
  6. import re
  7. import uuid
  8. import httpx
  9. from typing import List, Optional, Dict, Any
  10. from loguru import logger
  11. from ..config import settings
  12. from ..models import EntityInfo, PositionInfo
  13. class OllamaService:
  14. """Ollama LLM 服务"""
  15. def __init__(self):
  16. self.base_url = settings.ollama_url
  17. self.model = settings.ollama_model
  18. self.timeout = settings.ollama_timeout
  19. self.chunk_size = settings.chunk_size
  20. self.chunk_overlap = settings.chunk_overlap
  21. # 检测是否使用 UniversalNER
  22. self.is_universal_ner = "universal-ner" in self.model.lower()
  23. logger.info(f"初始化 Ollama 服务: url={self.base_url}, model={self.model}, universal_ner={self.is_universal_ner}")
  24. def _split_text(self, text: str) -> List[Dict[str, Any]]:
  25. """
  26. 将长文本分割成多个块
  27. Args:
  28. text: 原始文本
  29. Returns:
  30. 分块列表,每个块包含 text, start_pos, end_pos
  31. """
  32. if len(text) <= self.chunk_size:
  33. return [{"text": text, "start_pos": 0, "end_pos": len(text)}]
  34. chunks = []
  35. start = 0
  36. while start < len(text):
  37. end = min(start + self.chunk_size, len(text))
  38. # 尝试在句号、换行处分割,避免截断句子
  39. if end < len(text):
  40. # 向前查找最近的分隔符
  41. for sep in ['\n\n', '\n', '。', ';', '!', '?', '.']:
  42. sep_pos = text.rfind(sep, start + self.chunk_size // 2, end)
  43. if sep_pos > start:
  44. end = sep_pos + len(sep)
  45. break
  46. chunk_text = text[start:end]
  47. chunks.append({
  48. "text": chunk_text,
  49. "start_pos": start,
  50. "end_pos": end
  51. })
  52. # 下一个块的起始位置(考虑重叠)
  53. start = end - self.chunk_overlap if end < len(text) else end
  54. logger.info(f"文本分割完成: 总长度={len(text)}, 分块数={len(chunks)}")
  55. return chunks
  56. def _build_ner_prompt(self, text: str, entity_types: Optional[List[str]] = None) -> str:
  57. """
  58. 构建 NER 提取的 Prompt
  59. """
  60. types = entity_types or settings.entity_types
  61. types_desc = ", ".join(types)
  62. # 示例帮助模型理解格式
  63. example = '{"entities": [{"name": "成都市", "type": "LOC", "charStart": 10, "charEnd": 13}, {"name": "2024年5月", "type": "DATE", "charStart": 0, "charEnd": 7}]}'
  64. # 简洁直接的 prompt,要求模型只输出 JSON
  65. prompt = f"""请从以下文本中提取命名实体,直接输出JSON,不要解释。
  66. 实体类型: {types_desc}
  67. 输出格式示例:
  68. {example}
  69. 文本:
  70. {text}
  71. 请直接输出JSON:"""
  72. return prompt
  73. async def _call_ollama(self, prompt: str, disable_thinking: bool = True) -> Optional[str]:
  74. """
  75. 调用 Ollama Chat API
  76. Args:
  77. prompt: 输入提示词
  78. disable_thinking: 是否禁用思考模式(适用于 Qwen3 等支持思考的模型)
  79. """
  80. # 使用 /api/chat 接口,think 参数仅在此接口生效
  81. url = f"{self.base_url}/api/chat"
  82. payload = {
  83. "model": self.model,
  84. "messages": [
  85. {
  86. "role": "user",
  87. "content": prompt
  88. }
  89. ],
  90. "stream": False,
  91. "options": {
  92. "temperature": 0.1, # 低温度,更确定性的输出
  93. }
  94. }
  95. # Qwen3 思考模式:禁用思考,直接输出 JSON 结果
  96. # think 参数仅在 /api/chat 接口中生效
  97. if disable_thinking:
  98. payload["think"] = False
  99. try:
  100. async with httpx.AsyncClient(timeout=self.timeout) as client:
  101. response = await client.post(url, json=payload)
  102. response.raise_for_status()
  103. result = response.json()
  104. # chat 接口返回格式: {"message": {"role": "assistant", "content": "..."}}
  105. message = result.get("message", {})
  106. return message.get("content", "")
  107. except httpx.TimeoutException:
  108. logger.error(f"Ollama 请求超时: timeout={self.timeout}s")
  109. return None
  110. except Exception as e:
  111. logger.error(f"Ollama 请求失败: {e}")
  112. return None
  113. def _parse_llm_response(self, response: str, chunk_start_pos: int = 0) -> List[EntityInfo]:
  114. """
  115. 解析 LLM 返回的 JSON 结果
  116. Args:
  117. response: LLM 返回的文本
  118. chunk_start_pos: 当前分块在原文中的起始位置(用于位置校正)
  119. """
  120. entities = []
  121. try:
  122. # Qwen3 思考模式处理:提取 </think> 之后的内容
  123. think_end = response.find('</think>')
  124. if think_end != -1:
  125. # 只保留思考结束后的内容
  126. response = response[think_end + len('</think>'):]
  127. logger.debug(f"提取思考后内容: {response[:200]}...")
  128. else:
  129. # 检查是否存在 <think> 但没有 </think>(思考未完成或被截断)
  130. think_start = response.find('<think>')
  131. if think_start != -1:
  132. # 尝试从 <think> 之前的内容或整个响应中查找 JSON
  133. # 有些情况下 JSON 可能在思考标签之前
  134. pre_think = response[:think_start].strip()
  135. if pre_think:
  136. response = pre_think
  137. logger.debug(f"使用思考前内容: {response[:200]}...")
  138. else:
  139. # 思考内容中可能包含 JSON,尝试直接从响应中提取
  140. logger.debug("检测到不完整的思考模式,尝试直接提取JSON")
  141. # 移除 markdown code block 标记
  142. response = re.sub(r'```json\s*', '', response)
  143. response = re.sub(r'```\s*', '', response)
  144. response = response.strip()
  145. # 方法1:直接尝试解析整个响应(如果是纯 JSON)
  146. data = None
  147. try:
  148. data = json.loads(response)
  149. except json.JSONDecodeError:
  150. pass
  151. # 方法2:查找包含 entities 的 JSON 对象(使用更宽松的匹配)
  152. if not data or "entities" not in data:
  153. # 匹配 {"entities": [...]} 格式,使用贪婪匹配以捕获完整的嵌套结构
  154. # 先尝试找到所有可能的 JSON 对象
  155. json_matches = re.findall(r'\{[^{}]*"entities"\s*:\s*\[[^\]]*\][^{}]*\}', response)
  156. for json_str in json_matches:
  157. try:
  158. data = json.loads(json_str)
  159. if "entities" in data:
  160. break
  161. except json.JSONDecodeError:
  162. continue
  163. # 方法3:尝试更宽松的正则匹配(处理多行和嵌套)
  164. if not data or "entities" not in data:
  165. # 匹配从 {"entities" 开始到最后一个 ]} 的内容
  166. json_match = re.search(r'\{\s*"entities"\s*:\s*\[[\s\S]*\]\s*\}', response)
  167. if json_match:
  168. try:
  169. data = json.loads(json_match.group())
  170. except json.JSONDecodeError:
  171. pass
  172. if not data or "entities" not in data:
  173. logger.warning(f"未找到有效的 entities JSON, response={response[:300]}...")
  174. return entities
  175. entity_list = data.get("entities", [])
  176. for item in entity_list:
  177. name = item.get("name", "").strip()
  178. entity_type = item.get("type", "").upper()
  179. char_start = item.get("charStart", 0)
  180. char_end = item.get("charEnd", 0)
  181. if not name or len(name) < 2:
  182. continue
  183. # 校正位置(加上分块的起始位置)
  184. adjusted_start = char_start + chunk_start_pos
  185. adjusted_end = char_end + chunk_start_pos
  186. entity = EntityInfo(
  187. name=name,
  188. type=entity_type,
  189. value=name,
  190. position=PositionInfo(
  191. char_start=adjusted_start,
  192. char_end=adjusted_end,
  193. line=1 # LLM 模式不计算行号
  194. ),
  195. confidence=0.9, # LLM 模式默认较高置信度
  196. temp_id=str(uuid.uuid4())[:8]
  197. )
  198. entities.append(entity)
  199. except json.JSONDecodeError as e:
  200. logger.warning(f"JSON 解析失败: {e}, response={response[:200]}...")
  201. except Exception as e:
  202. logger.error(f"解析 LLM 响应失败: {e}")
  203. return entities
  204. async def extract_entities(
  205. self,
  206. text: str,
  207. entity_types: Optional[List[str]] = None
  208. ) -> List[EntityInfo]:
  209. """
  210. 使用 Ollama LLM 提取实体
  211. 支持长文本自动分块处理
  212. 自动检测是否使用 UniversalNER 并切换提取策略
  213. """
  214. if not text or not text.strip():
  215. return []
  216. # 根据模型类型选择提取策略
  217. if self.is_universal_ner:
  218. return await self._extract_with_universal_ner(text, entity_types)
  219. else:
  220. return await self._extract_with_general_llm(text, entity_types)
  221. async def _extract_with_general_llm(
  222. self,
  223. text: str,
  224. entity_types: Optional[List[str]] = None
  225. ) -> List[EntityInfo]:
  226. """
  227. 使用通用 LLM(如 Qwen)提取实体
  228. """
  229. # 分割长文本
  230. chunks = self._split_text(text)
  231. all_entities = []
  232. seen_entities = set() # 用于去重
  233. for i, chunk in enumerate(chunks):
  234. logger.info(f"处理分块 {i+1}/{len(chunks)}: 长度={len(chunk['text'])}")
  235. # 构建 prompt
  236. prompt = self._build_ner_prompt(chunk["text"], entity_types)
  237. # 调用 Ollama
  238. response = await self._call_ollama(prompt)
  239. if not response:
  240. logger.warning(f"分块 {i+1} Ollama 返回为空")
  241. continue
  242. # 打印完整响应用于调试
  243. logger.debug(f"分块 {i+1} LLM 完整响应:\n{response}\n{'='*50}")
  244. # 解析结果
  245. entities = self._parse_llm_response(response, chunk["start_pos"])
  246. # 去重
  247. for entity in entities:
  248. entity_key = f"{entity.type}:{entity.name}"
  249. if entity_key not in seen_entities:
  250. seen_entities.add(entity_key)
  251. all_entities.append(entity)
  252. logger.info(f"分块 {i+1} 提取实体: {len(entities)} 个")
  253. logger.info(f"通用 LLM NER 提取完成: 总实体数={len(all_entities)}")
  254. return all_entities
  255. async def _extract_with_universal_ner(
  256. self,
  257. text: str,
  258. entity_types: Optional[List[str]] = None
  259. ) -> List[EntityInfo]:
  260. """
  261. 使用 UniversalNER 模型提取实体
  262. UniversalNER 的 Prompt 格式: "文本内容. 实体类型英文名"
  263. 返回格式: ["实体1", "实体2", ...]
  264. """
  265. # 实体类型映射(中文类型 -> UniversalNER 英文类型)
  266. type_mapping = {
  267. "PERSON": ["person", "people", "human"],
  268. "ORG": ["organization", "company", "institution"],
  269. "LOC": ["location", "place", "address"],
  270. "DATE": ["date", "time"],
  271. "NUMBER": ["number", "quantity", "measurement"],
  272. "DEVICE": ["device", "equipment", "instrument"],
  273. "PROJECT": ["project", "program"],
  274. "METHOD": ["method", "standard", "specification"],
  275. }
  276. types_to_extract = entity_types or list(type_mapping.keys())
  277. # 分割长文本
  278. chunks = self._split_text(text)
  279. all_entities = []
  280. seen_entities = set() # 用于去重
  281. for i, chunk in enumerate(chunks):
  282. chunk_text = chunk["text"]
  283. chunk_start = chunk["start_pos"]
  284. logger.info(f"UniversalNER 处理分块 {i+1}/{len(chunks)}: 长度={len(chunk_text)}")
  285. # 对每种实体类型分别提取
  286. for entity_type in types_to_extract:
  287. if entity_type not in type_mapping:
  288. continue
  289. # 使用第一个英文类型名
  290. english_type = type_mapping[entity_type][0]
  291. # UniversalNER 的 Prompt 格式
  292. prompt = f"{chunk_text} {english_type}"
  293. # 调用 Ollama
  294. response = await self._call_ollama(prompt)
  295. if not response:
  296. continue
  297. # 解析 UniversalNER 响应(返回格式如: ["实体1", "实体2"])
  298. entities = self._parse_universal_ner_response(
  299. response, entity_type, chunk_text, chunk_start
  300. )
  301. # 去重
  302. for entity in entities:
  303. entity_key = f"{entity.type}:{entity.name}"
  304. if entity_key not in seen_entities:
  305. seen_entities.add(entity_key)
  306. all_entities.append(entity)
  307. logger.info(f"分块 {i+1} UniversalNER 提取实体: {len([e for e in all_entities if e not in seen_entities])} 个")
  308. logger.info(f"UniversalNER 提取完成: 总实体数={len(all_entities)}")
  309. return all_entities
  310. def _parse_universal_ner_response(
  311. self,
  312. response: str,
  313. entity_type: str,
  314. original_text: str,
  315. chunk_start_pos: int = 0
  316. ) -> List[EntityInfo]:
  317. """
  318. 解析 UniversalNER 的响应
  319. UniversalNER 返回格式: ["实体1", "实体2", ...]
  320. """
  321. entities = []
  322. try:
  323. # 清理响应,提取 JSON 数组
  324. response = response.strip()
  325. # 尝试找到 JSON 数组
  326. json_match = re.search(r'\[[\s\S]*?\]', response)
  327. if not json_match:
  328. logger.debug(f"UniversalNER 响应中未找到数组: {response[:100]}")
  329. return entities
  330. json_str = json_match.group()
  331. entity_names = json.loads(json_str)
  332. if not isinstance(entity_names, list):
  333. return entities
  334. for name in entity_names:
  335. if not isinstance(name, str) or len(name) < 2:
  336. continue
  337. name = name.strip()
  338. # 在原文中查找位置
  339. pos = original_text.find(name)
  340. char_start = pos + chunk_start_pos if pos >= 0 else 0
  341. char_end = char_start + len(name) if pos >= 0 else 0
  342. entity = EntityInfo(
  343. name=name,
  344. type=entity_type,
  345. value=name,
  346. position=PositionInfo(
  347. char_start=char_start,
  348. char_end=char_end,
  349. line=1
  350. ),
  351. confidence=0.85, # UniversalNER 置信度
  352. temp_id=str(uuid.uuid4())[:8]
  353. )
  354. entities.append(entity)
  355. except json.JSONDecodeError as e:
  356. logger.debug(f"UniversalNER JSON 解析失败: {e}, response={response[:100]}")
  357. except Exception as e:
  358. logger.error(f"解析 UniversalNER 响应失败: {e}")
  359. return entities
  360. async def check_health(self) -> bool:
  361. """
  362. 检查 Ollama 服务是否可用
  363. """
  364. try:
  365. async with httpx.AsyncClient(timeout=5) as client:
  366. response = await client.get(f"{self.base_url}/api/tags")
  367. return response.status_code == 200
  368. except Exception:
  369. return False
  370. # 创建单例
  371. ollama_service = OllamaService()